xref: /openbmc/qemu/target/arm/helper.c (revision 40cf6a54)
1 #include "qemu/osdep.h"
2 #include "target/arm/idau.h"
3 #include "trace.h"
4 #include "cpu.h"
5 #include "internals.h"
6 #include "exec/gdbstub.h"
7 #include "exec/helper-proto.h"
8 #include "qemu/host-utils.h"
9 #include "sysemu/arch_init.h"
10 #include "sysemu/sysemu.h"
11 #include "qemu/bitops.h"
12 #include "qemu/crc32c.h"
13 #include "exec/exec-all.h"
14 #include "exec/cpu_ldst.h"
15 #include "arm_ldst.h"
16 #include <zlib.h> /* For crc32 */
17 #include "exec/semihost.h"
18 #include "sysemu/kvm.h"
19 #include "fpu/softfloat.h"
20 #include "qemu/range.h"
21 
22 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
23 
24 #ifndef CONFIG_USER_ONLY
25 /* Cacheability and shareability attributes for a memory access */
26 typedef struct ARMCacheAttrs {
27     unsigned int attrs:8; /* as in the MAIR register encoding */
28     unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
29 } ARMCacheAttrs;
30 
31 static bool get_phys_addr(CPUARMState *env, target_ulong address,
32                           MMUAccessType access_type, ARMMMUIdx mmu_idx,
33                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
34                           target_ulong *page_size,
35                           ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
36 
37 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
38                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
39                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
40                                target_ulong *page_size_ptr,
41                                ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
42 
43 /* Security attributes for an address, as returned by v8m_security_lookup. */
44 typedef struct V8M_SAttributes {
45     bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
46     bool ns;
47     bool nsc;
48     uint8_t sregion;
49     bool srvalid;
50     uint8_t iregion;
51     bool irvalid;
52 } V8M_SAttributes;
53 
54 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
55                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
56                                 V8M_SAttributes *sattrs);
57 #endif
58 
59 static void switch_mode(CPUARMState *env, int mode);
60 
61 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
62 {
63     int nregs;
64 
65     /* VFP data registers are always little-endian.  */
66     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
67     if (reg < nregs) {
68         stq_le_p(buf, *aa32_vfp_dreg(env, reg));
69         return 8;
70     }
71     if (arm_feature(env, ARM_FEATURE_NEON)) {
72         /* Aliases for Q regs.  */
73         nregs += 16;
74         if (reg < nregs) {
75             uint64_t *q = aa32_vfp_qreg(env, reg - 32);
76             stq_le_p(buf, q[0]);
77             stq_le_p(buf + 8, q[1]);
78             return 16;
79         }
80     }
81     switch (reg - nregs) {
82     case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
83     case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
84     case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
85     }
86     return 0;
87 }
88 
89 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
90 {
91     int nregs;
92 
93     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
94     if (reg < nregs) {
95         *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
96         return 8;
97     }
98     if (arm_feature(env, ARM_FEATURE_NEON)) {
99         nregs += 16;
100         if (reg < nregs) {
101             uint64_t *q = aa32_vfp_qreg(env, reg - 32);
102             q[0] = ldq_le_p(buf);
103             q[1] = ldq_le_p(buf + 8);
104             return 16;
105         }
106     }
107     switch (reg - nregs) {
108     case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
109     case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
110     case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
111     }
112     return 0;
113 }
114 
115 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
116 {
117     switch (reg) {
118     case 0 ... 31:
119         /* 128 bit FP register */
120         {
121             uint64_t *q = aa64_vfp_qreg(env, reg);
122             stq_le_p(buf, q[0]);
123             stq_le_p(buf + 8, q[1]);
124             return 16;
125         }
126     case 32:
127         /* FPSR */
128         stl_p(buf, vfp_get_fpsr(env));
129         return 4;
130     case 33:
131         /* FPCR */
132         stl_p(buf, vfp_get_fpcr(env));
133         return 4;
134     default:
135         return 0;
136     }
137 }
138 
139 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
140 {
141     switch (reg) {
142     case 0 ... 31:
143         /* 128 bit FP register */
144         {
145             uint64_t *q = aa64_vfp_qreg(env, reg);
146             q[0] = ldq_le_p(buf);
147             q[1] = ldq_le_p(buf + 8);
148             return 16;
149         }
150     case 32:
151         /* FPSR */
152         vfp_set_fpsr(env, ldl_p(buf));
153         return 4;
154     case 33:
155         /* FPCR */
156         vfp_set_fpcr(env, ldl_p(buf));
157         return 4;
158     default:
159         return 0;
160     }
161 }
162 
163 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
164 {
165     assert(ri->fieldoffset);
166     if (cpreg_field_is_64bit(ri)) {
167         return CPREG_FIELD64(env, ri);
168     } else {
169         return CPREG_FIELD32(env, ri);
170     }
171 }
172 
173 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
174                       uint64_t value)
175 {
176     assert(ri->fieldoffset);
177     if (cpreg_field_is_64bit(ri)) {
178         CPREG_FIELD64(env, ri) = value;
179     } else {
180         CPREG_FIELD32(env, ri) = value;
181     }
182 }
183 
184 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
185 {
186     return (char *)env + ri->fieldoffset;
187 }
188 
189 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
190 {
191     /* Raw read of a coprocessor register (as needed for migration, etc). */
192     if (ri->type & ARM_CP_CONST) {
193         return ri->resetvalue;
194     } else if (ri->raw_readfn) {
195         return ri->raw_readfn(env, ri);
196     } else if (ri->readfn) {
197         return ri->readfn(env, ri);
198     } else {
199         return raw_read(env, ri);
200     }
201 }
202 
203 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
204                              uint64_t v)
205 {
206     /* Raw write of a coprocessor register (as needed for migration, etc).
207      * Note that constant registers are treated as write-ignored; the
208      * caller should check for success by whether a readback gives the
209      * value written.
210      */
211     if (ri->type & ARM_CP_CONST) {
212         return;
213     } else if (ri->raw_writefn) {
214         ri->raw_writefn(env, ri, v);
215     } else if (ri->writefn) {
216         ri->writefn(env, ri, v);
217     } else {
218         raw_write(env, ri, v);
219     }
220 }
221 
222 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
223 {
224     ARMCPU *cpu = arm_env_get_cpu(env);
225     const ARMCPRegInfo *ri;
226     uint32_t key;
227 
228     key = cpu->dyn_xml.cpregs_keys[reg];
229     ri = get_arm_cp_reginfo(cpu->cp_regs, key);
230     if (ri) {
231         if (cpreg_field_is_64bit(ri)) {
232             return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
233         } else {
234             return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
235         }
236     }
237     return 0;
238 }
239 
240 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
241 {
242     return 0;
243 }
244 
245 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
246 {
247    /* Return true if the regdef would cause an assertion if you called
248     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
249     * program bug for it not to have the NO_RAW flag).
250     * NB that returning false here doesn't necessarily mean that calling
251     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
252     * read/write access functions which are safe for raw use" from "has
253     * read/write access functions which have side effects but has forgotten
254     * to provide raw access functions".
255     * The tests here line up with the conditions in read/write_raw_cp_reg()
256     * and assertions in raw_read()/raw_write().
257     */
258     if ((ri->type & ARM_CP_CONST) ||
259         ri->fieldoffset ||
260         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
261         return false;
262     }
263     return true;
264 }
265 
266 bool write_cpustate_to_list(ARMCPU *cpu)
267 {
268     /* Write the coprocessor state from cpu->env to the (index,value) list. */
269     int i;
270     bool ok = true;
271 
272     for (i = 0; i < cpu->cpreg_array_len; i++) {
273         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
274         const ARMCPRegInfo *ri;
275 
276         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
277         if (!ri) {
278             ok = false;
279             continue;
280         }
281         if (ri->type & ARM_CP_NO_RAW) {
282             continue;
283         }
284         cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
285     }
286     return ok;
287 }
288 
289 bool write_list_to_cpustate(ARMCPU *cpu)
290 {
291     int i;
292     bool ok = true;
293 
294     for (i = 0; i < cpu->cpreg_array_len; i++) {
295         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
296         uint64_t v = cpu->cpreg_values[i];
297         const ARMCPRegInfo *ri;
298 
299         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
300         if (!ri) {
301             ok = false;
302             continue;
303         }
304         if (ri->type & ARM_CP_NO_RAW) {
305             continue;
306         }
307         /* Write value and confirm it reads back as written
308          * (to catch read-only registers and partially read-only
309          * registers where the incoming migration value doesn't match)
310          */
311         write_raw_cp_reg(&cpu->env, ri, v);
312         if (read_raw_cp_reg(&cpu->env, ri) != v) {
313             ok = false;
314         }
315     }
316     return ok;
317 }
318 
319 static void add_cpreg_to_list(gpointer key, gpointer opaque)
320 {
321     ARMCPU *cpu = opaque;
322     uint64_t regidx;
323     const ARMCPRegInfo *ri;
324 
325     regidx = *(uint32_t *)key;
326     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
327 
328     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
329         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
330         /* The value array need not be initialized at this point */
331         cpu->cpreg_array_len++;
332     }
333 }
334 
335 static void count_cpreg(gpointer key, gpointer opaque)
336 {
337     ARMCPU *cpu = opaque;
338     uint64_t regidx;
339     const ARMCPRegInfo *ri;
340 
341     regidx = *(uint32_t *)key;
342     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
343 
344     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
345         cpu->cpreg_array_len++;
346     }
347 }
348 
349 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
350 {
351     uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
352     uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
353 
354     if (aidx > bidx) {
355         return 1;
356     }
357     if (aidx < bidx) {
358         return -1;
359     }
360     return 0;
361 }
362 
363 void init_cpreg_list(ARMCPU *cpu)
364 {
365     /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
366      * Note that we require cpreg_tuples[] to be sorted by key ID.
367      */
368     GList *keys;
369     int arraylen;
370 
371     keys = g_hash_table_get_keys(cpu->cp_regs);
372     keys = g_list_sort(keys, cpreg_key_compare);
373 
374     cpu->cpreg_array_len = 0;
375 
376     g_list_foreach(keys, count_cpreg, cpu);
377 
378     arraylen = cpu->cpreg_array_len;
379     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
380     cpu->cpreg_values = g_new(uint64_t, arraylen);
381     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
382     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
383     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
384     cpu->cpreg_array_len = 0;
385 
386     g_list_foreach(keys, add_cpreg_to_list, cpu);
387 
388     assert(cpu->cpreg_array_len == arraylen);
389 
390     g_list_free(keys);
391 }
392 
393 /*
394  * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
395  * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
396  *
397  * access_el3_aa32ns: Used to check AArch32 register views.
398  * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
399  */
400 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
401                                         const ARMCPRegInfo *ri,
402                                         bool isread)
403 {
404     bool secure = arm_is_secure_below_el3(env);
405 
406     assert(!arm_el_is_aa64(env, 3));
407     if (secure) {
408         return CP_ACCESS_TRAP_UNCATEGORIZED;
409     }
410     return CP_ACCESS_OK;
411 }
412 
413 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
414                                                 const ARMCPRegInfo *ri,
415                                                 bool isread)
416 {
417     if (!arm_el_is_aa64(env, 3)) {
418         return access_el3_aa32ns(env, ri, isread);
419     }
420     return CP_ACCESS_OK;
421 }
422 
423 /* Some secure-only AArch32 registers trap to EL3 if used from
424  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
425  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
426  * We assume that the .access field is set to PL1_RW.
427  */
428 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
429                                             const ARMCPRegInfo *ri,
430                                             bool isread)
431 {
432     if (arm_current_el(env) == 3) {
433         return CP_ACCESS_OK;
434     }
435     if (arm_is_secure_below_el3(env)) {
436         return CP_ACCESS_TRAP_EL3;
437     }
438     /* This will be EL1 NS and EL2 NS, which just UNDEF */
439     return CP_ACCESS_TRAP_UNCATEGORIZED;
440 }
441 
442 /* Check for traps to "powerdown debug" registers, which are controlled
443  * by MDCR.TDOSA
444  */
445 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
446                                    bool isread)
447 {
448     int el = arm_current_el(env);
449     bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
450         (env->cp15.mdcr_el2 & MDCR_TDE) ||
451         (env->cp15.hcr_el2 & HCR_TGE);
452 
453     if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
454         return CP_ACCESS_TRAP_EL2;
455     }
456     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
457         return CP_ACCESS_TRAP_EL3;
458     }
459     return CP_ACCESS_OK;
460 }
461 
462 /* Check for traps to "debug ROM" registers, which are controlled
463  * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
464  */
465 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
466                                   bool isread)
467 {
468     int el = arm_current_el(env);
469     bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
470         (env->cp15.mdcr_el2 & MDCR_TDE) ||
471         (env->cp15.hcr_el2 & HCR_TGE);
472 
473     if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
474         return CP_ACCESS_TRAP_EL2;
475     }
476     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
477         return CP_ACCESS_TRAP_EL3;
478     }
479     return CP_ACCESS_OK;
480 }
481 
482 /* Check for traps to general debug registers, which are controlled
483  * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
484  */
485 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
486                                   bool isread)
487 {
488     int el = arm_current_el(env);
489     bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
490         (env->cp15.mdcr_el2 & MDCR_TDE) ||
491         (env->cp15.hcr_el2 & HCR_TGE);
492 
493     if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
494         return CP_ACCESS_TRAP_EL2;
495     }
496     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
497         return CP_ACCESS_TRAP_EL3;
498     }
499     return CP_ACCESS_OK;
500 }
501 
502 /* Check for traps to performance monitor registers, which are controlled
503  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
504  */
505 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
506                                  bool isread)
507 {
508     int el = arm_current_el(env);
509 
510     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
511         && !arm_is_secure_below_el3(env)) {
512         return CP_ACCESS_TRAP_EL2;
513     }
514     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
515         return CP_ACCESS_TRAP_EL3;
516     }
517     return CP_ACCESS_OK;
518 }
519 
520 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
521 {
522     ARMCPU *cpu = arm_env_get_cpu(env);
523 
524     raw_write(env, ri, value);
525     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
526 }
527 
528 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
529 {
530     ARMCPU *cpu = arm_env_get_cpu(env);
531 
532     if (raw_read(env, ri) != value) {
533         /* Unlike real hardware the qemu TLB uses virtual addresses,
534          * not modified virtual addresses, so this causes a TLB flush.
535          */
536         tlb_flush(CPU(cpu));
537         raw_write(env, ri, value);
538     }
539 }
540 
541 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
542                              uint64_t value)
543 {
544     ARMCPU *cpu = arm_env_get_cpu(env);
545 
546     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
547         && !extended_addresses_enabled(env)) {
548         /* For VMSA (when not using the LPAE long descriptor page table
549          * format) this register includes the ASID, so do a TLB flush.
550          * For PMSA it is purely a process ID and no action is needed.
551          */
552         tlb_flush(CPU(cpu));
553     }
554     raw_write(env, ri, value);
555 }
556 
557 /* IS variants of TLB operations must affect all cores */
558 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
559                              uint64_t value)
560 {
561     CPUState *cs = ENV_GET_CPU(env);
562 
563     tlb_flush_all_cpus_synced(cs);
564 }
565 
566 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
567                              uint64_t value)
568 {
569     CPUState *cs = ENV_GET_CPU(env);
570 
571     tlb_flush_all_cpus_synced(cs);
572 }
573 
574 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
575                              uint64_t value)
576 {
577     CPUState *cs = ENV_GET_CPU(env);
578 
579     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
580 }
581 
582 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
583                              uint64_t value)
584 {
585     CPUState *cs = ENV_GET_CPU(env);
586 
587     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
588 }
589 
590 /*
591  * Non-IS variants of TLB operations are upgraded to
592  * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
593  * force broadcast of these operations.
594  */
595 static bool tlb_force_broadcast(CPUARMState *env)
596 {
597     return (env->cp15.hcr_el2 & HCR_FB) &&
598         arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
599 }
600 
601 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
602                           uint64_t value)
603 {
604     /* Invalidate all (TLBIALL) */
605     ARMCPU *cpu = arm_env_get_cpu(env);
606 
607     if (tlb_force_broadcast(env)) {
608         tlbiall_is_write(env, NULL, value);
609         return;
610     }
611 
612     tlb_flush(CPU(cpu));
613 }
614 
615 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
616                           uint64_t value)
617 {
618     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
619     ARMCPU *cpu = arm_env_get_cpu(env);
620 
621     if (tlb_force_broadcast(env)) {
622         tlbimva_is_write(env, NULL, value);
623         return;
624     }
625 
626     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
627 }
628 
629 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
630                            uint64_t value)
631 {
632     /* Invalidate by ASID (TLBIASID) */
633     ARMCPU *cpu = arm_env_get_cpu(env);
634 
635     if (tlb_force_broadcast(env)) {
636         tlbiasid_is_write(env, NULL, value);
637         return;
638     }
639 
640     tlb_flush(CPU(cpu));
641 }
642 
643 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
644                            uint64_t value)
645 {
646     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
647     ARMCPU *cpu = arm_env_get_cpu(env);
648 
649     if (tlb_force_broadcast(env)) {
650         tlbimvaa_is_write(env, NULL, value);
651         return;
652     }
653 
654     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
655 }
656 
657 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
658                                uint64_t value)
659 {
660     CPUState *cs = ENV_GET_CPU(env);
661 
662     tlb_flush_by_mmuidx(cs,
663                         ARMMMUIdxBit_S12NSE1 |
664                         ARMMMUIdxBit_S12NSE0 |
665                         ARMMMUIdxBit_S2NS);
666 }
667 
668 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
669                                   uint64_t value)
670 {
671     CPUState *cs = ENV_GET_CPU(env);
672 
673     tlb_flush_by_mmuidx_all_cpus_synced(cs,
674                                         ARMMMUIdxBit_S12NSE1 |
675                                         ARMMMUIdxBit_S12NSE0 |
676                                         ARMMMUIdxBit_S2NS);
677 }
678 
679 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
680                             uint64_t value)
681 {
682     /* Invalidate by IPA. This has to invalidate any structures that
683      * contain only stage 2 translation information, but does not need
684      * to apply to structures that contain combined stage 1 and stage 2
685      * translation information.
686      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
687      */
688     CPUState *cs = ENV_GET_CPU(env);
689     uint64_t pageaddr;
690 
691     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
692         return;
693     }
694 
695     pageaddr = sextract64(value << 12, 0, 40);
696 
697     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
698 }
699 
700 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
701                                uint64_t value)
702 {
703     CPUState *cs = ENV_GET_CPU(env);
704     uint64_t pageaddr;
705 
706     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
707         return;
708     }
709 
710     pageaddr = sextract64(value << 12, 0, 40);
711 
712     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
713                                              ARMMMUIdxBit_S2NS);
714 }
715 
716 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
717                               uint64_t value)
718 {
719     CPUState *cs = ENV_GET_CPU(env);
720 
721     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
722 }
723 
724 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
725                                  uint64_t value)
726 {
727     CPUState *cs = ENV_GET_CPU(env);
728 
729     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
730 }
731 
732 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
733                               uint64_t value)
734 {
735     CPUState *cs = ENV_GET_CPU(env);
736     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
737 
738     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
739 }
740 
741 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
742                                  uint64_t value)
743 {
744     CPUState *cs = ENV_GET_CPU(env);
745     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
746 
747     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
748                                              ARMMMUIdxBit_S1E2);
749 }
750 
751 static const ARMCPRegInfo cp_reginfo[] = {
752     /* Define the secure and non-secure FCSE identifier CP registers
753      * separately because there is no secure bank in V8 (no _EL3).  This allows
754      * the secure register to be properly reset and migrated. There is also no
755      * v8 EL1 version of the register so the non-secure instance stands alone.
756      */
757     { .name = "FCSEIDR",
758       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
759       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
760       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
761       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
762     { .name = "FCSEIDR_S",
763       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
764       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
765       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
766       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
767     /* Define the secure and non-secure context identifier CP registers
768      * separately because there is no secure bank in V8 (no _EL3).  This allows
769      * the secure register to be properly reset and migrated.  In the
770      * non-secure case, the 32-bit register will have reset and migration
771      * disabled during registration as it is handled by the 64-bit instance.
772      */
773     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
774       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
775       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
776       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
777       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
778     { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
779       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
780       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
781       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
782       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
783     REGINFO_SENTINEL
784 };
785 
786 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
787     /* NB: Some of these registers exist in v8 but with more precise
788      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
789      */
790     /* MMU Domain access control / MPU write buffer control */
791     { .name = "DACR",
792       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
793       .access = PL1_RW, .resetvalue = 0,
794       .writefn = dacr_write, .raw_writefn = raw_write,
795       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
796                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
797     /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
798      * For v6 and v5, these mappings are overly broad.
799      */
800     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
801       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
802     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
803       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
804     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
805       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
806     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
807       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
808     /* Cache maintenance ops; some of this space may be overridden later. */
809     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
810       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
811       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
812     REGINFO_SENTINEL
813 };
814 
815 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
816     /* Not all pre-v6 cores implemented this WFI, so this is slightly
817      * over-broad.
818      */
819     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
820       .access = PL1_W, .type = ARM_CP_WFI },
821     REGINFO_SENTINEL
822 };
823 
824 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
825     /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
826      * is UNPREDICTABLE; we choose to NOP as most implementations do).
827      */
828     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
829       .access = PL1_W, .type = ARM_CP_WFI },
830     /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
831      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
832      * OMAPCP will override this space.
833      */
834     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
835       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
836       .resetvalue = 0 },
837     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
838       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
839       .resetvalue = 0 },
840     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
841     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
842       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
843       .resetvalue = 0 },
844     /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
845      * implementing it as RAZ means the "debug architecture version" bits
846      * will read as a reserved value, which should cause Linux to not try
847      * to use the debug hardware.
848      */
849     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
850       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
851     /* MMU TLB control. Note that the wildcarding means we cover not just
852      * the unified TLB ops but also the dside/iside/inner-shareable variants.
853      */
854     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
855       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
856       .type = ARM_CP_NO_RAW },
857     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
858       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
859       .type = ARM_CP_NO_RAW },
860     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
861       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
862       .type = ARM_CP_NO_RAW },
863     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
864       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
865       .type = ARM_CP_NO_RAW },
866     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
867       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
868     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
869       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
870     REGINFO_SENTINEL
871 };
872 
873 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
874                         uint64_t value)
875 {
876     uint32_t mask = 0;
877 
878     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
879     if (!arm_feature(env, ARM_FEATURE_V8)) {
880         /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
881          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
882          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
883          */
884         if (arm_feature(env, ARM_FEATURE_VFP)) {
885             /* VFP coprocessor: cp10 & cp11 [23:20] */
886             mask |= (1 << 31) | (1 << 30) | (0xf << 20);
887 
888             if (!arm_feature(env, ARM_FEATURE_NEON)) {
889                 /* ASEDIS [31] bit is RAO/WI */
890                 value |= (1 << 31);
891             }
892 
893             /* VFPv3 and upwards with NEON implement 32 double precision
894              * registers (D0-D31).
895              */
896             if (!arm_feature(env, ARM_FEATURE_NEON) ||
897                     !arm_feature(env, ARM_FEATURE_VFP3)) {
898                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
899                 value |= (1 << 30);
900             }
901         }
902         value &= mask;
903     }
904     env->cp15.cpacr_el1 = value;
905 }
906 
907 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
908 {
909     /* Call cpacr_write() so that we reset with the correct RAO bits set
910      * for our CPU features.
911      */
912     cpacr_write(env, ri, 0);
913 }
914 
915 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
916                                    bool isread)
917 {
918     if (arm_feature(env, ARM_FEATURE_V8)) {
919         /* Check if CPACR accesses are to be trapped to EL2 */
920         if (arm_current_el(env) == 1 &&
921             (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
922             return CP_ACCESS_TRAP_EL2;
923         /* Check if CPACR accesses are to be trapped to EL3 */
924         } else if (arm_current_el(env) < 3 &&
925                    (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
926             return CP_ACCESS_TRAP_EL3;
927         }
928     }
929 
930     return CP_ACCESS_OK;
931 }
932 
933 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
934                                   bool isread)
935 {
936     /* Check if CPTR accesses are set to trap to EL3 */
937     if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
938         return CP_ACCESS_TRAP_EL3;
939     }
940 
941     return CP_ACCESS_OK;
942 }
943 
944 static const ARMCPRegInfo v6_cp_reginfo[] = {
945     /* prefetch by MVA in v6, NOP in v7 */
946     { .name = "MVA_prefetch",
947       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
948       .access = PL1_W, .type = ARM_CP_NOP },
949     /* We need to break the TB after ISB to execute self-modifying code
950      * correctly and also to take any pending interrupts immediately.
951      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
952      */
953     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
954       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
955     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
956       .access = PL0_W, .type = ARM_CP_NOP },
957     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
958       .access = PL0_W, .type = ARM_CP_NOP },
959     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
960       .access = PL1_RW,
961       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
962                              offsetof(CPUARMState, cp15.ifar_ns) },
963       .resetvalue = 0, },
964     /* Watchpoint Fault Address Register : should actually only be present
965      * for 1136, 1176, 11MPCore.
966      */
967     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
968       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
969     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
970       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
971       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
972       .resetfn = cpacr_reset, .writefn = cpacr_write },
973     REGINFO_SENTINEL
974 };
975 
976 /* Definitions for the PMU registers */
977 #define PMCRN_MASK  0xf800
978 #define PMCRN_SHIFT 11
979 #define PMCRD   0x8
980 #define PMCRC   0x4
981 #define PMCRE   0x1
982 
983 static inline uint32_t pmu_num_counters(CPUARMState *env)
984 {
985   return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
986 }
987 
988 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
989 static inline uint64_t pmu_counter_mask(CPUARMState *env)
990 {
991   return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
992 }
993 
994 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
995                                    bool isread)
996 {
997     /* Performance monitor registers user accessibility is controlled
998      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
999      * trapping to EL2 or EL3 for other accesses.
1000      */
1001     int el = arm_current_el(env);
1002 
1003     if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1004         return CP_ACCESS_TRAP;
1005     }
1006     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1007         && !arm_is_secure_below_el3(env)) {
1008         return CP_ACCESS_TRAP_EL2;
1009     }
1010     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1011         return CP_ACCESS_TRAP_EL3;
1012     }
1013 
1014     return CP_ACCESS_OK;
1015 }
1016 
1017 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1018                                            const ARMCPRegInfo *ri,
1019                                            bool isread)
1020 {
1021     /* ER: event counter read trap control */
1022     if (arm_feature(env, ARM_FEATURE_V8)
1023         && arm_current_el(env) == 0
1024         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1025         && isread) {
1026         return CP_ACCESS_OK;
1027     }
1028 
1029     return pmreg_access(env, ri, isread);
1030 }
1031 
1032 static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1033                                          const ARMCPRegInfo *ri,
1034                                          bool isread)
1035 {
1036     /* SW: software increment write trap control */
1037     if (arm_feature(env, ARM_FEATURE_V8)
1038         && arm_current_el(env) == 0
1039         && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1040         && !isread) {
1041         return CP_ACCESS_OK;
1042     }
1043 
1044     return pmreg_access(env, ri, isread);
1045 }
1046 
1047 #ifndef CONFIG_USER_ONLY
1048 
1049 static CPAccessResult pmreg_access_selr(CPUARMState *env,
1050                                         const ARMCPRegInfo *ri,
1051                                         bool isread)
1052 {
1053     /* ER: event counter read trap control */
1054     if (arm_feature(env, ARM_FEATURE_V8)
1055         && arm_current_el(env) == 0
1056         && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1057         return CP_ACCESS_OK;
1058     }
1059 
1060     return pmreg_access(env, ri, isread);
1061 }
1062 
1063 static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1064                                          const ARMCPRegInfo *ri,
1065                                          bool isread)
1066 {
1067     /* CR: cycle counter read trap control */
1068     if (arm_feature(env, ARM_FEATURE_V8)
1069         && arm_current_el(env) == 0
1070         && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1071         && isread) {
1072         return CP_ACCESS_OK;
1073     }
1074 
1075     return pmreg_access(env, ri, isread);
1076 }
1077 
1078 static inline bool arm_ccnt_enabled(CPUARMState *env)
1079 {
1080     /* This does not support checking PMCCFILTR_EL0 register */
1081 
1082     if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) {
1083         return false;
1084     }
1085 
1086     return true;
1087 }
1088 
1089 void pmccntr_sync(CPUARMState *env)
1090 {
1091     uint64_t temp_ticks;
1092 
1093     temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1094                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1095 
1096     if (env->cp15.c9_pmcr & PMCRD) {
1097         /* Increment once every 64 processor clock cycles */
1098         temp_ticks /= 64;
1099     }
1100 
1101     if (arm_ccnt_enabled(env)) {
1102         env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
1103     }
1104 }
1105 
1106 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1107                        uint64_t value)
1108 {
1109     pmccntr_sync(env);
1110 
1111     if (value & PMCRC) {
1112         /* The counter has been reset */
1113         env->cp15.c15_ccnt = 0;
1114     }
1115 
1116     /* only the DP, X, D and E bits are writable */
1117     env->cp15.c9_pmcr &= ~0x39;
1118     env->cp15.c9_pmcr |= (value & 0x39);
1119 
1120     pmccntr_sync(env);
1121 }
1122 
1123 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1124 {
1125     uint64_t total_ticks;
1126 
1127     if (!arm_ccnt_enabled(env)) {
1128         /* Counter is disabled, do not change value */
1129         return env->cp15.c15_ccnt;
1130     }
1131 
1132     total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1133                            ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1134 
1135     if (env->cp15.c9_pmcr & PMCRD) {
1136         /* Increment once every 64 processor clock cycles */
1137         total_ticks /= 64;
1138     }
1139     return total_ticks - env->cp15.c15_ccnt;
1140 }
1141 
1142 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1143                          uint64_t value)
1144 {
1145     /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1146      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1147      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1148      * accessed.
1149      */
1150     env->cp15.c9_pmselr = value & 0x1f;
1151 }
1152 
1153 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1154                         uint64_t value)
1155 {
1156     uint64_t total_ticks;
1157 
1158     if (!arm_ccnt_enabled(env)) {
1159         /* Counter is disabled, set the absolute value */
1160         env->cp15.c15_ccnt = value;
1161         return;
1162     }
1163 
1164     total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1165                            ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1166 
1167     if (env->cp15.c9_pmcr & PMCRD) {
1168         /* Increment once every 64 processor clock cycles */
1169         total_ticks /= 64;
1170     }
1171     env->cp15.c15_ccnt = total_ticks - value;
1172 }
1173 
1174 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1175                             uint64_t value)
1176 {
1177     uint64_t cur_val = pmccntr_read(env, NULL);
1178 
1179     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1180 }
1181 
1182 #else /* CONFIG_USER_ONLY */
1183 
1184 void pmccntr_sync(CPUARMState *env)
1185 {
1186 }
1187 
1188 #endif
1189 
1190 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1191                             uint64_t value)
1192 {
1193     pmccntr_sync(env);
1194     env->cp15.pmccfiltr_el0 = value & 0xfc000000;
1195     pmccntr_sync(env);
1196 }
1197 
1198 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1199                             uint64_t value)
1200 {
1201     value &= pmu_counter_mask(env);
1202     env->cp15.c9_pmcnten |= value;
1203 }
1204 
1205 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1206                              uint64_t value)
1207 {
1208     value &= pmu_counter_mask(env);
1209     env->cp15.c9_pmcnten &= ~value;
1210 }
1211 
1212 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1213                          uint64_t value)
1214 {
1215     value &= pmu_counter_mask(env);
1216     env->cp15.c9_pmovsr &= ~value;
1217 }
1218 
1219 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1220                              uint64_t value)
1221 {
1222     /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1223      * PMSELR value is equal to or greater than the number of implemented
1224      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1225      */
1226     if (env->cp15.c9_pmselr == 0x1f) {
1227         pmccfiltr_write(env, ri, value);
1228     }
1229 }
1230 
1231 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1232 {
1233     /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1234      * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1235      */
1236     if (env->cp15.c9_pmselr == 0x1f) {
1237         return env->cp15.pmccfiltr_el0;
1238     } else {
1239         return 0;
1240     }
1241 }
1242 
1243 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1244                             uint64_t value)
1245 {
1246     if (arm_feature(env, ARM_FEATURE_V8)) {
1247         env->cp15.c9_pmuserenr = value & 0xf;
1248     } else {
1249         env->cp15.c9_pmuserenr = value & 1;
1250     }
1251 }
1252 
1253 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1254                              uint64_t value)
1255 {
1256     /* We have no event counters so only the C bit can be changed */
1257     value &= pmu_counter_mask(env);
1258     env->cp15.c9_pminten |= value;
1259 }
1260 
1261 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1262                              uint64_t value)
1263 {
1264     value &= pmu_counter_mask(env);
1265     env->cp15.c9_pminten &= ~value;
1266 }
1267 
1268 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1269                        uint64_t value)
1270 {
1271     /* Note that even though the AArch64 view of this register has bits
1272      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1273      * architectural requirements for bits which are RES0 only in some
1274      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1275      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1276      */
1277     raw_write(env, ri, value & ~0x1FULL);
1278 }
1279 
1280 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1281 {
1282     /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1283      * For bits that vary between AArch32/64, code needs to check the
1284      * current execution mode before directly using the feature bit.
1285      */
1286     uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1287 
1288     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1289         valid_mask &= ~SCR_HCE;
1290 
1291         /* On ARMv7, SMD (or SCD as it is called in v7) is only
1292          * supported if EL2 exists. The bit is UNK/SBZP when
1293          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1294          * when EL2 is unavailable.
1295          * On ARMv8, this bit is always available.
1296          */
1297         if (arm_feature(env, ARM_FEATURE_V7) &&
1298             !arm_feature(env, ARM_FEATURE_V8)) {
1299             valid_mask &= ~SCR_SMD;
1300         }
1301     }
1302 
1303     /* Clear all-context RES0 bits.  */
1304     value &= valid_mask;
1305     raw_write(env, ri, value);
1306 }
1307 
1308 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1309 {
1310     ARMCPU *cpu = arm_env_get_cpu(env);
1311 
1312     /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1313      * bank
1314      */
1315     uint32_t index = A32_BANKED_REG_GET(env, csselr,
1316                                         ri->secure & ARM_CP_SECSTATE_S);
1317 
1318     return cpu->ccsidr[index];
1319 }
1320 
1321 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1322                          uint64_t value)
1323 {
1324     raw_write(env, ri, value & 0xf);
1325 }
1326 
1327 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1328 {
1329     CPUState *cs = ENV_GET_CPU(env);
1330     uint64_t ret = 0;
1331 
1332     if (arm_hcr_el2_imo(env)) {
1333         if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1334             ret |= CPSR_I;
1335         }
1336     } else {
1337         if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1338             ret |= CPSR_I;
1339         }
1340     }
1341 
1342     if (arm_hcr_el2_fmo(env)) {
1343         if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1344             ret |= CPSR_F;
1345         }
1346     } else {
1347         if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1348             ret |= CPSR_F;
1349         }
1350     }
1351 
1352     /* External aborts are not possible in QEMU so A bit is always clear */
1353     return ret;
1354 }
1355 
1356 static const ARMCPRegInfo v7_cp_reginfo[] = {
1357     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1358     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1359       .access = PL1_W, .type = ARM_CP_NOP },
1360     /* Performance monitors are implementation defined in v7,
1361      * but with an ARM recommended set of registers, which we
1362      * follow (although we don't actually implement any counters)
1363      *
1364      * Performance registers fall into three categories:
1365      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1366      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1367      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1368      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1369      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1370      */
1371     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1372       .access = PL0_RW, .type = ARM_CP_ALIAS,
1373       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1374       .writefn = pmcntenset_write,
1375       .accessfn = pmreg_access,
1376       .raw_writefn = raw_write },
1377     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1378       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1379       .access = PL0_RW, .accessfn = pmreg_access,
1380       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1381       .writefn = pmcntenset_write, .raw_writefn = raw_write },
1382     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1383       .access = PL0_RW,
1384       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1385       .accessfn = pmreg_access,
1386       .writefn = pmcntenclr_write,
1387       .type = ARM_CP_ALIAS },
1388     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1389       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1390       .access = PL0_RW, .accessfn = pmreg_access,
1391       .type = ARM_CP_ALIAS,
1392       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1393       .writefn = pmcntenclr_write },
1394     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1395       .access = PL0_RW,
1396       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1397       .accessfn = pmreg_access,
1398       .writefn = pmovsr_write,
1399       .raw_writefn = raw_write },
1400     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1401       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1402       .access = PL0_RW, .accessfn = pmreg_access,
1403       .type = ARM_CP_ALIAS,
1404       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1405       .writefn = pmovsr_write,
1406       .raw_writefn = raw_write },
1407     /* Unimplemented so WI. */
1408     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1409       .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP },
1410 #ifndef CONFIG_USER_ONLY
1411     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1412       .access = PL0_RW, .type = ARM_CP_ALIAS,
1413       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1414       .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1415       .raw_writefn = raw_write},
1416     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1417       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1418       .access = PL0_RW, .accessfn = pmreg_access_selr,
1419       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1420       .writefn = pmselr_write, .raw_writefn = raw_write, },
1421     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1422       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1423       .readfn = pmccntr_read, .writefn = pmccntr_write32,
1424       .accessfn = pmreg_access_ccntr },
1425     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1426       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1427       .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1428       .type = ARM_CP_IO,
1429       .readfn = pmccntr_read, .writefn = pmccntr_write, },
1430 #endif
1431     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1432       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1433       .writefn = pmccfiltr_write,
1434       .access = PL0_RW, .accessfn = pmreg_access,
1435       .type = ARM_CP_IO,
1436       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1437       .resetvalue = 0, },
1438     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1439       .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1440       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1441     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1442       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1443       .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1444       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1445     /* Unimplemented, RAZ/WI. */
1446     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1447       .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1448       .accessfn = pmreg_access_xevcntr },
1449     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1450       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1451       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
1452       .resetvalue = 0,
1453       .writefn = pmuserenr_write, .raw_writefn = raw_write },
1454     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1455       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1456       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1457       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1458       .resetvalue = 0,
1459       .writefn = pmuserenr_write, .raw_writefn = raw_write },
1460     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1461       .access = PL1_RW, .accessfn = access_tpm,
1462       .type = ARM_CP_ALIAS | ARM_CP_IO,
1463       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1464       .resetvalue = 0,
1465       .writefn = pmintenset_write, .raw_writefn = raw_write },
1466     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1467       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1468       .access = PL1_RW, .accessfn = access_tpm,
1469       .type = ARM_CP_IO,
1470       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1471       .writefn = pmintenset_write, .raw_writefn = raw_write,
1472       .resetvalue = 0x0 },
1473     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1474       .access = PL1_RW, .accessfn = access_tpm,
1475       .type = ARM_CP_ALIAS | ARM_CP_IO,
1476       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1477       .writefn = pmintenclr_write, },
1478     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1479       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1480       .access = PL1_RW, .accessfn = access_tpm,
1481       .type = ARM_CP_ALIAS | ARM_CP_IO,
1482       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1483       .writefn = pmintenclr_write },
1484     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1485       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1486       .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1487     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1488       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1489       .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1490       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1491                              offsetof(CPUARMState, cp15.csselr_ns) } },
1492     /* Auxiliary ID register: this actually has an IMPDEF value but for now
1493      * just RAZ for all cores:
1494      */
1495     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1496       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1497       .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1498     /* Auxiliary fault status registers: these also are IMPDEF, and we
1499      * choose to RAZ/WI for all cores.
1500      */
1501     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1502       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1503       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1504     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1505       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1506       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1507     /* MAIR can just read-as-written because we don't implement caches
1508      * and so don't need to care about memory attributes.
1509      */
1510     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1511       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1512       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1513       .resetvalue = 0 },
1514     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1515       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1516       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1517       .resetvalue = 0 },
1518     /* For non-long-descriptor page tables these are PRRR and NMRR;
1519      * regardless they still act as reads-as-written for QEMU.
1520      */
1521      /* MAIR0/1 are defined separately from their 64-bit counterpart which
1522       * allows them to assign the correct fieldoffset based on the endianness
1523       * handled in the field definitions.
1524       */
1525     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1526       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1527       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1528                              offsetof(CPUARMState, cp15.mair0_ns) },
1529       .resetfn = arm_cp_reset_ignore },
1530     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1531       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1532       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1533                              offsetof(CPUARMState, cp15.mair1_ns) },
1534       .resetfn = arm_cp_reset_ignore },
1535     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1536       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1537       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1538     /* 32 bit ITLB invalidates */
1539     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1540       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1541     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1542       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1543     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1544       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1545     /* 32 bit DTLB invalidates */
1546     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1547       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1548     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1549       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1550     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1551       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1552     /* 32 bit TLB invalidates */
1553     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1554       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1555     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1556       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1557     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1558       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1559     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1560       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1561     REGINFO_SENTINEL
1562 };
1563 
1564 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1565     /* 32 bit TLB invalidates, Inner Shareable */
1566     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1567       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1568     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1569       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1570     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1571       .type = ARM_CP_NO_RAW, .access = PL1_W,
1572       .writefn = tlbiasid_is_write },
1573     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1574       .type = ARM_CP_NO_RAW, .access = PL1_W,
1575       .writefn = tlbimvaa_is_write },
1576     REGINFO_SENTINEL
1577 };
1578 
1579 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1580                         uint64_t value)
1581 {
1582     value &= 1;
1583     env->teecr = value;
1584 }
1585 
1586 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1587                                     bool isread)
1588 {
1589     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1590         return CP_ACCESS_TRAP;
1591     }
1592     return CP_ACCESS_OK;
1593 }
1594 
1595 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1596     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1597       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1598       .resetvalue = 0,
1599       .writefn = teecr_write },
1600     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1601       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1602       .accessfn = teehbr_access, .resetvalue = 0 },
1603     REGINFO_SENTINEL
1604 };
1605 
1606 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1607     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1608       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1609       .access = PL0_RW,
1610       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1611     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1612       .access = PL0_RW,
1613       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1614                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1615       .resetfn = arm_cp_reset_ignore },
1616     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1617       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1618       .access = PL0_R|PL1_W,
1619       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1620       .resetvalue = 0},
1621     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1622       .access = PL0_R|PL1_W,
1623       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1624                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1625       .resetfn = arm_cp_reset_ignore },
1626     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1627       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1628       .access = PL1_RW,
1629       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1630     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1631       .access = PL1_RW,
1632       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1633                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1634       .resetvalue = 0 },
1635     REGINFO_SENTINEL
1636 };
1637 
1638 #ifndef CONFIG_USER_ONLY
1639 
1640 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1641                                        bool isread)
1642 {
1643     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1644      * Writable only at the highest implemented exception level.
1645      */
1646     int el = arm_current_el(env);
1647 
1648     switch (el) {
1649     case 0:
1650         if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1651             return CP_ACCESS_TRAP;
1652         }
1653         break;
1654     case 1:
1655         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1656             arm_is_secure_below_el3(env)) {
1657             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1658             return CP_ACCESS_TRAP_UNCATEGORIZED;
1659         }
1660         break;
1661     case 2:
1662     case 3:
1663         break;
1664     }
1665 
1666     if (!isread && el < arm_highest_el(env)) {
1667         return CP_ACCESS_TRAP_UNCATEGORIZED;
1668     }
1669 
1670     return CP_ACCESS_OK;
1671 }
1672 
1673 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1674                                         bool isread)
1675 {
1676     unsigned int cur_el = arm_current_el(env);
1677     bool secure = arm_is_secure(env);
1678 
1679     /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1680     if (cur_el == 0 &&
1681         !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1682         return CP_ACCESS_TRAP;
1683     }
1684 
1685     if (arm_feature(env, ARM_FEATURE_EL2) &&
1686         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1687         !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1688         return CP_ACCESS_TRAP_EL2;
1689     }
1690     return CP_ACCESS_OK;
1691 }
1692 
1693 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1694                                       bool isread)
1695 {
1696     unsigned int cur_el = arm_current_el(env);
1697     bool secure = arm_is_secure(env);
1698 
1699     /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1700      * EL0[PV]TEN is zero.
1701      */
1702     if (cur_el == 0 &&
1703         !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1704         return CP_ACCESS_TRAP;
1705     }
1706 
1707     if (arm_feature(env, ARM_FEATURE_EL2) &&
1708         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1709         !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1710         return CP_ACCESS_TRAP_EL2;
1711     }
1712     return CP_ACCESS_OK;
1713 }
1714 
1715 static CPAccessResult gt_pct_access(CPUARMState *env,
1716                                     const ARMCPRegInfo *ri,
1717                                     bool isread)
1718 {
1719     return gt_counter_access(env, GTIMER_PHYS, isread);
1720 }
1721 
1722 static CPAccessResult gt_vct_access(CPUARMState *env,
1723                                     const ARMCPRegInfo *ri,
1724                                     bool isread)
1725 {
1726     return gt_counter_access(env, GTIMER_VIRT, isread);
1727 }
1728 
1729 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1730                                        bool isread)
1731 {
1732     return gt_timer_access(env, GTIMER_PHYS, isread);
1733 }
1734 
1735 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1736                                        bool isread)
1737 {
1738     return gt_timer_access(env, GTIMER_VIRT, isread);
1739 }
1740 
1741 static CPAccessResult gt_stimer_access(CPUARMState *env,
1742                                        const ARMCPRegInfo *ri,
1743                                        bool isread)
1744 {
1745     /* The AArch64 register view of the secure physical timer is
1746      * always accessible from EL3, and configurably accessible from
1747      * Secure EL1.
1748      */
1749     switch (arm_current_el(env)) {
1750     case 1:
1751         if (!arm_is_secure(env)) {
1752             return CP_ACCESS_TRAP;
1753         }
1754         if (!(env->cp15.scr_el3 & SCR_ST)) {
1755             return CP_ACCESS_TRAP_EL3;
1756         }
1757         return CP_ACCESS_OK;
1758     case 0:
1759     case 2:
1760         return CP_ACCESS_TRAP;
1761     case 3:
1762         return CP_ACCESS_OK;
1763     default:
1764         g_assert_not_reached();
1765     }
1766 }
1767 
1768 static uint64_t gt_get_countervalue(CPUARMState *env)
1769 {
1770     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1771 }
1772 
1773 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1774 {
1775     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1776 
1777     if (gt->ctl & 1) {
1778         /* Timer enabled: calculate and set current ISTATUS, irq, and
1779          * reset timer to when ISTATUS next has to change
1780          */
1781         uint64_t offset = timeridx == GTIMER_VIRT ?
1782                                       cpu->env.cp15.cntvoff_el2 : 0;
1783         uint64_t count = gt_get_countervalue(&cpu->env);
1784         /* Note that this must be unsigned 64 bit arithmetic: */
1785         int istatus = count - offset >= gt->cval;
1786         uint64_t nexttick;
1787         int irqstate;
1788 
1789         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1790 
1791         irqstate = (istatus && !(gt->ctl & 2));
1792         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1793 
1794         if (istatus) {
1795             /* Next transition is when count rolls back over to zero */
1796             nexttick = UINT64_MAX;
1797         } else {
1798             /* Next transition is when we hit cval */
1799             nexttick = gt->cval + offset;
1800         }
1801         /* Note that the desired next expiry time might be beyond the
1802          * signed-64-bit range of a QEMUTimer -- in this case we just
1803          * set the timer for as far in the future as possible. When the
1804          * timer expires we will reset the timer for any remaining period.
1805          */
1806         if (nexttick > INT64_MAX / GTIMER_SCALE) {
1807             nexttick = INT64_MAX / GTIMER_SCALE;
1808         }
1809         timer_mod(cpu->gt_timer[timeridx], nexttick);
1810         trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1811     } else {
1812         /* Timer disabled: ISTATUS and timer output always clear */
1813         gt->ctl &= ~4;
1814         qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1815         timer_del(cpu->gt_timer[timeridx]);
1816         trace_arm_gt_recalc_disabled(timeridx);
1817     }
1818 }
1819 
1820 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1821                            int timeridx)
1822 {
1823     ARMCPU *cpu = arm_env_get_cpu(env);
1824 
1825     timer_del(cpu->gt_timer[timeridx]);
1826 }
1827 
1828 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829 {
1830     return gt_get_countervalue(env);
1831 }
1832 
1833 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1834 {
1835     return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1836 }
1837 
1838 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1839                           int timeridx,
1840                           uint64_t value)
1841 {
1842     trace_arm_gt_cval_write(timeridx, value);
1843     env->cp15.c14_timer[timeridx].cval = value;
1844     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1845 }
1846 
1847 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1848                              int timeridx)
1849 {
1850     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1851 
1852     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1853                       (gt_get_countervalue(env) - offset));
1854 }
1855 
1856 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1857                           int timeridx,
1858                           uint64_t value)
1859 {
1860     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1861 
1862     trace_arm_gt_tval_write(timeridx, value);
1863     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1864                                          sextract64(value, 0, 32);
1865     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1866 }
1867 
1868 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1869                          int timeridx,
1870                          uint64_t value)
1871 {
1872     ARMCPU *cpu = arm_env_get_cpu(env);
1873     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1874 
1875     trace_arm_gt_ctl_write(timeridx, value);
1876     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1877     if ((oldval ^ value) & 1) {
1878         /* Enable toggled */
1879         gt_recalc_timer(cpu, timeridx);
1880     } else if ((oldval ^ value) & 2) {
1881         /* IMASK toggled: don't need to recalculate,
1882          * just set the interrupt line based on ISTATUS
1883          */
1884         int irqstate = (oldval & 4) && !(value & 2);
1885 
1886         trace_arm_gt_imask_toggle(timeridx, irqstate);
1887         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1888     }
1889 }
1890 
1891 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1892 {
1893     gt_timer_reset(env, ri, GTIMER_PHYS);
1894 }
1895 
1896 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1897                                uint64_t value)
1898 {
1899     gt_cval_write(env, ri, GTIMER_PHYS, value);
1900 }
1901 
1902 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1903 {
1904     return gt_tval_read(env, ri, GTIMER_PHYS);
1905 }
1906 
1907 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1908                                uint64_t value)
1909 {
1910     gt_tval_write(env, ri, GTIMER_PHYS, value);
1911 }
1912 
1913 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1914                               uint64_t value)
1915 {
1916     gt_ctl_write(env, ri, GTIMER_PHYS, value);
1917 }
1918 
1919 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1920 {
1921     gt_timer_reset(env, ri, GTIMER_VIRT);
1922 }
1923 
1924 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1925                                uint64_t value)
1926 {
1927     gt_cval_write(env, ri, GTIMER_VIRT, value);
1928 }
1929 
1930 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1931 {
1932     return gt_tval_read(env, ri, GTIMER_VIRT);
1933 }
1934 
1935 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1936                                uint64_t value)
1937 {
1938     gt_tval_write(env, ri, GTIMER_VIRT, value);
1939 }
1940 
1941 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1942                               uint64_t value)
1943 {
1944     gt_ctl_write(env, ri, GTIMER_VIRT, value);
1945 }
1946 
1947 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1948                               uint64_t value)
1949 {
1950     ARMCPU *cpu = arm_env_get_cpu(env);
1951 
1952     trace_arm_gt_cntvoff_write(value);
1953     raw_write(env, ri, value);
1954     gt_recalc_timer(cpu, GTIMER_VIRT);
1955 }
1956 
1957 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1958 {
1959     gt_timer_reset(env, ri, GTIMER_HYP);
1960 }
1961 
1962 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1963                               uint64_t value)
1964 {
1965     gt_cval_write(env, ri, GTIMER_HYP, value);
1966 }
1967 
1968 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1969 {
1970     return gt_tval_read(env, ri, GTIMER_HYP);
1971 }
1972 
1973 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1974                               uint64_t value)
1975 {
1976     gt_tval_write(env, ri, GTIMER_HYP, value);
1977 }
1978 
1979 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1980                               uint64_t value)
1981 {
1982     gt_ctl_write(env, ri, GTIMER_HYP, value);
1983 }
1984 
1985 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1986 {
1987     gt_timer_reset(env, ri, GTIMER_SEC);
1988 }
1989 
1990 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1991                               uint64_t value)
1992 {
1993     gt_cval_write(env, ri, GTIMER_SEC, value);
1994 }
1995 
1996 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1997 {
1998     return gt_tval_read(env, ri, GTIMER_SEC);
1999 }
2000 
2001 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2002                               uint64_t value)
2003 {
2004     gt_tval_write(env, ri, GTIMER_SEC, value);
2005 }
2006 
2007 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2008                               uint64_t value)
2009 {
2010     gt_ctl_write(env, ri, GTIMER_SEC, value);
2011 }
2012 
2013 void arm_gt_ptimer_cb(void *opaque)
2014 {
2015     ARMCPU *cpu = opaque;
2016 
2017     gt_recalc_timer(cpu, GTIMER_PHYS);
2018 }
2019 
2020 void arm_gt_vtimer_cb(void *opaque)
2021 {
2022     ARMCPU *cpu = opaque;
2023 
2024     gt_recalc_timer(cpu, GTIMER_VIRT);
2025 }
2026 
2027 void arm_gt_htimer_cb(void *opaque)
2028 {
2029     ARMCPU *cpu = opaque;
2030 
2031     gt_recalc_timer(cpu, GTIMER_HYP);
2032 }
2033 
2034 void arm_gt_stimer_cb(void *opaque)
2035 {
2036     ARMCPU *cpu = opaque;
2037 
2038     gt_recalc_timer(cpu, GTIMER_SEC);
2039 }
2040 
2041 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2042     /* Note that CNTFRQ is purely reads-as-written for the benefit
2043      * of software; writing it doesn't actually change the timer frequency.
2044      * Our reset value matches the fixed frequency we implement the timer at.
2045      */
2046     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2047       .type = ARM_CP_ALIAS,
2048       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2049       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2050     },
2051     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2052       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2053       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2054       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2055       .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2056     },
2057     /* overall control: mostly access permissions */
2058     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2059       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2060       .access = PL1_RW,
2061       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2062       .resetvalue = 0,
2063     },
2064     /* per-timer control */
2065     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2066       .secure = ARM_CP_SECSTATE_NS,
2067       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2068       .accessfn = gt_ptimer_access,
2069       .fieldoffset = offsetoflow32(CPUARMState,
2070                                    cp15.c14_timer[GTIMER_PHYS].ctl),
2071       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2072     },
2073     { .name = "CNTP_CTL_S",
2074       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2075       .secure = ARM_CP_SECSTATE_S,
2076       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2077       .accessfn = gt_ptimer_access,
2078       .fieldoffset = offsetoflow32(CPUARMState,
2079                                    cp15.c14_timer[GTIMER_SEC].ctl),
2080       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2081     },
2082     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2083       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2084       .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2085       .accessfn = gt_ptimer_access,
2086       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2087       .resetvalue = 0,
2088       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2089     },
2090     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2091       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2092       .accessfn = gt_vtimer_access,
2093       .fieldoffset = offsetoflow32(CPUARMState,
2094                                    cp15.c14_timer[GTIMER_VIRT].ctl),
2095       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2096     },
2097     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2098       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2099       .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2100       .accessfn = gt_vtimer_access,
2101       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2102       .resetvalue = 0,
2103       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2104     },
2105     /* TimerValue views: a 32 bit downcounting view of the underlying state */
2106     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2107       .secure = ARM_CP_SECSTATE_NS,
2108       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2109       .accessfn = gt_ptimer_access,
2110       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2111     },
2112     { .name = "CNTP_TVAL_S",
2113       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2114       .secure = ARM_CP_SECSTATE_S,
2115       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2116       .accessfn = gt_ptimer_access,
2117       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2118     },
2119     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2120       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2121       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2122       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2123       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2124     },
2125     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2126       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2127       .accessfn = gt_vtimer_access,
2128       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2129     },
2130     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2131       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2132       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2133       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2134       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2135     },
2136     /* The counter itself */
2137     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2138       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2139       .accessfn = gt_pct_access,
2140       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2141     },
2142     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2143       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2144       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2145       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2146     },
2147     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2148       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2149       .accessfn = gt_vct_access,
2150       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2151     },
2152     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2153       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2154       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2155       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2156     },
2157     /* Comparison value, indicating when the timer goes off */
2158     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2159       .secure = ARM_CP_SECSTATE_NS,
2160       .access = PL1_RW | PL0_R,
2161       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2162       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2163       .accessfn = gt_ptimer_access,
2164       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2165     },
2166     { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2167       .secure = ARM_CP_SECSTATE_S,
2168       .access = PL1_RW | PL0_R,
2169       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2170       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2171       .accessfn = gt_ptimer_access,
2172       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2173     },
2174     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2175       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2176       .access = PL1_RW | PL0_R,
2177       .type = ARM_CP_IO,
2178       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2179       .resetvalue = 0, .accessfn = gt_ptimer_access,
2180       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2181     },
2182     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2183       .access = PL1_RW | PL0_R,
2184       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2185       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2186       .accessfn = gt_vtimer_access,
2187       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2188     },
2189     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2190       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2191       .access = PL1_RW | PL0_R,
2192       .type = ARM_CP_IO,
2193       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2194       .resetvalue = 0, .accessfn = gt_vtimer_access,
2195       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2196     },
2197     /* Secure timer -- this is actually restricted to only EL3
2198      * and configurably Secure-EL1 via the accessfn.
2199      */
2200     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2201       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2202       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2203       .accessfn = gt_stimer_access,
2204       .readfn = gt_sec_tval_read,
2205       .writefn = gt_sec_tval_write,
2206       .resetfn = gt_sec_timer_reset,
2207     },
2208     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2209       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2210       .type = ARM_CP_IO, .access = PL1_RW,
2211       .accessfn = gt_stimer_access,
2212       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2213       .resetvalue = 0,
2214       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2215     },
2216     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2217       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2218       .type = ARM_CP_IO, .access = PL1_RW,
2219       .accessfn = gt_stimer_access,
2220       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2221       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2222     },
2223     REGINFO_SENTINEL
2224 };
2225 
2226 #else
2227 
2228 /* In user-mode most of the generic timer registers are inaccessible
2229  * however modern kernels (4.12+) allow access to cntvct_el0
2230  */
2231 
2232 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2233 {
2234     /* Currently we have no support for QEMUTimer in linux-user so we
2235      * can't call gt_get_countervalue(env), instead we directly
2236      * call the lower level functions.
2237      */
2238     return cpu_get_clock() / GTIMER_SCALE;
2239 }
2240 
2241 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2242     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2243       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2244       .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2245       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2246       .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2247     },
2248     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2249       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2250       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2251       .readfn = gt_virt_cnt_read,
2252     },
2253     REGINFO_SENTINEL
2254 };
2255 
2256 #endif
2257 
2258 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2259 {
2260     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2261         raw_write(env, ri, value);
2262     } else if (arm_feature(env, ARM_FEATURE_V7)) {
2263         raw_write(env, ri, value & 0xfffff6ff);
2264     } else {
2265         raw_write(env, ri, value & 0xfffff1ff);
2266     }
2267 }
2268 
2269 #ifndef CONFIG_USER_ONLY
2270 /* get_phys_addr() isn't present for user-mode-only targets */
2271 
2272 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2273                                  bool isread)
2274 {
2275     if (ri->opc2 & 4) {
2276         /* The ATS12NSO* operations must trap to EL3 if executed in
2277          * Secure EL1 (which can only happen if EL3 is AArch64).
2278          * They are simply UNDEF if executed from NS EL1.
2279          * They function normally from EL2 or EL3.
2280          */
2281         if (arm_current_el(env) == 1) {
2282             if (arm_is_secure_below_el3(env)) {
2283                 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2284             }
2285             return CP_ACCESS_TRAP_UNCATEGORIZED;
2286         }
2287     }
2288     return CP_ACCESS_OK;
2289 }
2290 
2291 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2292                              MMUAccessType access_type, ARMMMUIdx mmu_idx)
2293 {
2294     hwaddr phys_addr;
2295     target_ulong page_size;
2296     int prot;
2297     bool ret;
2298     uint64_t par64;
2299     bool format64 = false;
2300     MemTxAttrs attrs = {};
2301     ARMMMUFaultInfo fi = {};
2302     ARMCacheAttrs cacheattrs = {};
2303 
2304     ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2305                         &prot, &page_size, &fi, &cacheattrs);
2306 
2307     if (is_a64(env)) {
2308         format64 = true;
2309     } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2310         /*
2311          * ATS1Cxx:
2312          * * TTBCR.EAE determines whether the result is returned using the
2313          *   32-bit or the 64-bit PAR format
2314          * * Instructions executed in Hyp mode always use the 64bit format
2315          *
2316          * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2317          * * The Non-secure TTBCR.EAE bit is set to 1
2318          * * The implementation includes EL2, and the value of HCR.VM is 1
2319          *
2320          * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2321          *
2322          * ATS1Hx always uses the 64bit format.
2323          */
2324         format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2325 
2326         if (arm_feature(env, ARM_FEATURE_EL2)) {
2327             if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2328                 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
2329             } else {
2330                 format64 |= arm_current_el(env) == 2;
2331             }
2332         }
2333     }
2334 
2335     if (format64) {
2336         /* Create a 64-bit PAR */
2337         par64 = (1 << 11); /* LPAE bit always set */
2338         if (!ret) {
2339             par64 |= phys_addr & ~0xfffULL;
2340             if (!attrs.secure) {
2341                 par64 |= (1 << 9); /* NS */
2342             }
2343             par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
2344             par64 |= cacheattrs.shareability << 7; /* SH */
2345         } else {
2346             uint32_t fsr = arm_fi_to_lfsc(&fi);
2347 
2348             par64 |= 1; /* F */
2349             par64 |= (fsr & 0x3f) << 1; /* FS */
2350             if (fi.stage2) {
2351                 par64 |= (1 << 9); /* S */
2352             }
2353             if (fi.s1ptw) {
2354                 par64 |= (1 << 8); /* PTW */
2355             }
2356         }
2357     } else {
2358         /* fsr is a DFSR/IFSR value for the short descriptor
2359          * translation table format (with WnR always clear).
2360          * Convert it to a 32-bit PAR.
2361          */
2362         if (!ret) {
2363             /* We do not set any attribute bits in the PAR */
2364             if (page_size == (1 << 24)
2365                 && arm_feature(env, ARM_FEATURE_V7)) {
2366                 par64 = (phys_addr & 0xff000000) | (1 << 1);
2367             } else {
2368                 par64 = phys_addr & 0xfffff000;
2369             }
2370             if (!attrs.secure) {
2371                 par64 |= (1 << 9); /* NS */
2372             }
2373         } else {
2374             uint32_t fsr = arm_fi_to_sfsc(&fi);
2375 
2376             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2377                     ((fsr & 0xf) << 1) | 1;
2378         }
2379     }
2380     return par64;
2381 }
2382 
2383 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2384 {
2385     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2386     uint64_t par64;
2387     ARMMMUIdx mmu_idx;
2388     int el = arm_current_el(env);
2389     bool secure = arm_is_secure_below_el3(env);
2390 
2391     switch (ri->opc2 & 6) {
2392     case 0:
2393         /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2394         switch (el) {
2395         case 3:
2396             mmu_idx = ARMMMUIdx_S1E3;
2397             break;
2398         case 2:
2399             mmu_idx = ARMMMUIdx_S1NSE1;
2400             break;
2401         case 1:
2402             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2403             break;
2404         default:
2405             g_assert_not_reached();
2406         }
2407         break;
2408     case 2:
2409         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2410         switch (el) {
2411         case 3:
2412             mmu_idx = ARMMMUIdx_S1SE0;
2413             break;
2414         case 2:
2415             mmu_idx = ARMMMUIdx_S1NSE0;
2416             break;
2417         case 1:
2418             mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2419             break;
2420         default:
2421             g_assert_not_reached();
2422         }
2423         break;
2424     case 4:
2425         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2426         mmu_idx = ARMMMUIdx_S12NSE1;
2427         break;
2428     case 6:
2429         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2430         mmu_idx = ARMMMUIdx_S12NSE0;
2431         break;
2432     default:
2433         g_assert_not_reached();
2434     }
2435 
2436     par64 = do_ats_write(env, value, access_type, mmu_idx);
2437 
2438     A32_BANKED_CURRENT_REG_SET(env, par, par64);
2439 }
2440 
2441 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2442                         uint64_t value)
2443 {
2444     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2445     uint64_t par64;
2446 
2447     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
2448 
2449     A32_BANKED_CURRENT_REG_SET(env, par, par64);
2450 }
2451 
2452 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2453                                      bool isread)
2454 {
2455     if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2456         return CP_ACCESS_TRAP;
2457     }
2458     return CP_ACCESS_OK;
2459 }
2460 
2461 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2462                         uint64_t value)
2463 {
2464     MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2465     ARMMMUIdx mmu_idx;
2466     int secure = arm_is_secure_below_el3(env);
2467 
2468     switch (ri->opc2 & 6) {
2469     case 0:
2470         switch (ri->opc1) {
2471         case 0: /* AT S1E1R, AT S1E1W */
2472             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2473             break;
2474         case 4: /* AT S1E2R, AT S1E2W */
2475             mmu_idx = ARMMMUIdx_S1E2;
2476             break;
2477         case 6: /* AT S1E3R, AT S1E3W */
2478             mmu_idx = ARMMMUIdx_S1E3;
2479             break;
2480         default:
2481             g_assert_not_reached();
2482         }
2483         break;
2484     case 2: /* AT S1E0R, AT S1E0W */
2485         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2486         break;
2487     case 4: /* AT S12E1R, AT S12E1W */
2488         mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2489         break;
2490     case 6: /* AT S12E0R, AT S12E0W */
2491         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2492         break;
2493     default:
2494         g_assert_not_reached();
2495     }
2496 
2497     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2498 }
2499 #endif
2500 
2501 static const ARMCPRegInfo vapa_cp_reginfo[] = {
2502     { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2503       .access = PL1_RW, .resetvalue = 0,
2504       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2505                              offsetoflow32(CPUARMState, cp15.par_ns) },
2506       .writefn = par_write },
2507 #ifndef CONFIG_USER_ONLY
2508     /* This underdecoding is safe because the reginfo is NO_RAW. */
2509     { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2510       .access = PL1_W, .accessfn = ats_access,
2511       .writefn = ats_write, .type = ARM_CP_NO_RAW },
2512 #endif
2513     REGINFO_SENTINEL
2514 };
2515 
2516 /* Return basic MPU access permission bits.  */
2517 static uint32_t simple_mpu_ap_bits(uint32_t val)
2518 {
2519     uint32_t ret;
2520     uint32_t mask;
2521     int i;
2522     ret = 0;
2523     mask = 3;
2524     for (i = 0; i < 16; i += 2) {
2525         ret |= (val >> i) & mask;
2526         mask <<= 2;
2527     }
2528     return ret;
2529 }
2530 
2531 /* Pad basic MPU access permission bits to extended format.  */
2532 static uint32_t extended_mpu_ap_bits(uint32_t val)
2533 {
2534     uint32_t ret;
2535     uint32_t mask;
2536     int i;
2537     ret = 0;
2538     mask = 3;
2539     for (i = 0; i < 16; i += 2) {
2540         ret |= (val & mask) << i;
2541         mask <<= 2;
2542     }
2543     return ret;
2544 }
2545 
2546 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2547                                  uint64_t value)
2548 {
2549     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2550 }
2551 
2552 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2553 {
2554     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2555 }
2556 
2557 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2558                                  uint64_t value)
2559 {
2560     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2561 }
2562 
2563 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2564 {
2565     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2566 }
2567 
2568 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2569 {
2570     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2571 
2572     if (!u32p) {
2573         return 0;
2574     }
2575 
2576     u32p += env->pmsav7.rnr[M_REG_NS];
2577     return *u32p;
2578 }
2579 
2580 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2581                          uint64_t value)
2582 {
2583     ARMCPU *cpu = arm_env_get_cpu(env);
2584     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2585 
2586     if (!u32p) {
2587         return;
2588     }
2589 
2590     u32p += env->pmsav7.rnr[M_REG_NS];
2591     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2592     *u32p = value;
2593 }
2594 
2595 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2596                               uint64_t value)
2597 {
2598     ARMCPU *cpu = arm_env_get_cpu(env);
2599     uint32_t nrgs = cpu->pmsav7_dregion;
2600 
2601     if (value >= nrgs) {
2602         qemu_log_mask(LOG_GUEST_ERROR,
2603                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2604                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2605         return;
2606     }
2607 
2608     raw_write(env, ri, value);
2609 }
2610 
2611 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2612     /* Reset for all these registers is handled in arm_cpu_reset(),
2613      * because the PMSAv7 is also used by M-profile CPUs, which do
2614      * not register cpregs but still need the state to be reset.
2615      */
2616     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2617       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2618       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2619       .readfn = pmsav7_read, .writefn = pmsav7_write,
2620       .resetfn = arm_cp_reset_ignore },
2621     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2622       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2623       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2624       .readfn = pmsav7_read, .writefn = pmsav7_write,
2625       .resetfn = arm_cp_reset_ignore },
2626     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2627       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2628       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2629       .readfn = pmsav7_read, .writefn = pmsav7_write,
2630       .resetfn = arm_cp_reset_ignore },
2631     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2632       .access = PL1_RW,
2633       .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2634       .writefn = pmsav7_rgnr_write,
2635       .resetfn = arm_cp_reset_ignore },
2636     REGINFO_SENTINEL
2637 };
2638 
2639 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2640     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2641       .access = PL1_RW, .type = ARM_CP_ALIAS,
2642       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2643       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2644     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2645       .access = PL1_RW, .type = ARM_CP_ALIAS,
2646       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2647       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2648     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2649       .access = PL1_RW,
2650       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2651       .resetvalue = 0, },
2652     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2653       .access = PL1_RW,
2654       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2655       .resetvalue = 0, },
2656     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2657       .access = PL1_RW,
2658       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2659     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2660       .access = PL1_RW,
2661       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2662     /* Protection region base and size registers */
2663     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2664       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2665       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2666     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2667       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2668       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2669     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2670       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2671       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2672     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2673       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2674       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2675     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2676       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2677       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2678     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2679       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2680       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2681     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2682       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2683       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2684     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2685       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2686       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2687     REGINFO_SENTINEL
2688 };
2689 
2690 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2691                                  uint64_t value)
2692 {
2693     TCR *tcr = raw_ptr(env, ri);
2694     int maskshift = extract32(value, 0, 3);
2695 
2696     if (!arm_feature(env, ARM_FEATURE_V8)) {
2697         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2698             /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2699              * using Long-desciptor translation table format */
2700             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2701         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2702             /* In an implementation that includes the Security Extensions
2703              * TTBCR has additional fields PD0 [4] and PD1 [5] for
2704              * Short-descriptor translation table format.
2705              */
2706             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2707         } else {
2708             value &= TTBCR_N;
2709         }
2710     }
2711 
2712     /* Update the masks corresponding to the TCR bank being written
2713      * Note that we always calculate mask and base_mask, but
2714      * they are only used for short-descriptor tables (ie if EAE is 0);
2715      * for long-descriptor tables the TCR fields are used differently
2716      * and the mask and base_mask values are meaningless.
2717      */
2718     tcr->raw_tcr = value;
2719     tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2720     tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2721 }
2722 
2723 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2724                              uint64_t value)
2725 {
2726     ARMCPU *cpu = arm_env_get_cpu(env);
2727 
2728     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2729         /* With LPAE the TTBCR could result in a change of ASID
2730          * via the TTBCR.A1 bit, so do a TLB flush.
2731          */
2732         tlb_flush(CPU(cpu));
2733     }
2734     vmsa_ttbcr_raw_write(env, ri, value);
2735 }
2736 
2737 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2738 {
2739     TCR *tcr = raw_ptr(env, ri);
2740 
2741     /* Reset both the TCR as well as the masks corresponding to the bank of
2742      * the TCR being reset.
2743      */
2744     tcr->raw_tcr = 0;
2745     tcr->mask = 0;
2746     tcr->base_mask = 0xffffc000u;
2747 }
2748 
2749 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2750                                uint64_t value)
2751 {
2752     ARMCPU *cpu = arm_env_get_cpu(env);
2753     TCR *tcr = raw_ptr(env, ri);
2754 
2755     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2756     tlb_flush(CPU(cpu));
2757     tcr->raw_tcr = value;
2758 }
2759 
2760 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2761                             uint64_t value)
2762 {
2763     /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
2764     if (cpreg_field_is_64bit(ri) &&
2765         extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2766         ARMCPU *cpu = arm_env_get_cpu(env);
2767         tlb_flush(CPU(cpu));
2768     }
2769     raw_write(env, ri, value);
2770 }
2771 
2772 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2773                         uint64_t value)
2774 {
2775     ARMCPU *cpu = arm_env_get_cpu(env);
2776     CPUState *cs = CPU(cpu);
2777 
2778     /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
2779     if (raw_read(env, ri) != value) {
2780         tlb_flush_by_mmuidx(cs,
2781                             ARMMMUIdxBit_S12NSE1 |
2782                             ARMMMUIdxBit_S12NSE0 |
2783                             ARMMMUIdxBit_S2NS);
2784         raw_write(env, ri, value);
2785     }
2786 }
2787 
2788 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2789     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2790       .access = PL1_RW, .type = ARM_CP_ALIAS,
2791       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2792                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2793     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2794       .access = PL1_RW, .resetvalue = 0,
2795       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2796                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2797     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2798       .access = PL1_RW, .resetvalue = 0,
2799       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2800                              offsetof(CPUARMState, cp15.dfar_ns) } },
2801     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2802       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2803       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2804       .resetvalue = 0, },
2805     REGINFO_SENTINEL
2806 };
2807 
2808 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2809     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2810       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2811       .access = PL1_RW,
2812       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2813     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2814       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2815       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2816       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2817                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
2818     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2819       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2820       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2821       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2822                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
2823     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2824       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2825       .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2826       .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2827       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2828     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2829       .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2830       .raw_writefn = vmsa_ttbcr_raw_write,
2831       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2832                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2833     REGINFO_SENTINEL
2834 };
2835 
2836 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2837                                 uint64_t value)
2838 {
2839     env->cp15.c15_ticonfig = value & 0xe7;
2840     /* The OS_TYPE bit in this register changes the reported CPUID! */
2841     env->cp15.c0_cpuid = (value & (1 << 5)) ?
2842         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2843 }
2844 
2845 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2846                                 uint64_t value)
2847 {
2848     env->cp15.c15_threadid = value & 0xffff;
2849 }
2850 
2851 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2852                            uint64_t value)
2853 {
2854     /* Wait-for-interrupt (deprecated) */
2855     cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2856 }
2857 
2858 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2859                                   uint64_t value)
2860 {
2861     /* On OMAP there are registers indicating the max/min index of dcache lines
2862      * containing a dirty line; cache flush operations have to reset these.
2863      */
2864     env->cp15.c15_i_max = 0x000;
2865     env->cp15.c15_i_min = 0xff0;
2866 }
2867 
2868 static const ARMCPRegInfo omap_cp_reginfo[] = {
2869     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2870       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2871       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2872       .resetvalue = 0, },
2873     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2874       .access = PL1_RW, .type = ARM_CP_NOP },
2875     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2876       .access = PL1_RW,
2877       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2878       .writefn = omap_ticonfig_write },
2879     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2880       .access = PL1_RW,
2881       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2882     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2883       .access = PL1_RW, .resetvalue = 0xff0,
2884       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2885     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2886       .access = PL1_RW,
2887       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2888       .writefn = omap_threadid_write },
2889     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2890       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2891       .type = ARM_CP_NO_RAW,
2892       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2893     /* TODO: Peripheral port remap register:
2894      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2895      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2896      * when MMU is off.
2897      */
2898     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2899       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2900       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2901       .writefn = omap_cachemaint_write },
2902     { .name = "C9", .cp = 15, .crn = 9,
2903       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2904       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2905     REGINFO_SENTINEL
2906 };
2907 
2908 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2909                               uint64_t value)
2910 {
2911     env->cp15.c15_cpar = value & 0x3fff;
2912 }
2913 
2914 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2915     { .name = "XSCALE_CPAR",
2916       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2917       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2918       .writefn = xscale_cpar_write, },
2919     { .name = "XSCALE_AUXCR",
2920       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2921       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2922       .resetvalue = 0, },
2923     /* XScale specific cache-lockdown: since we have no cache we NOP these
2924      * and hope the guest does not really rely on cache behaviour.
2925      */
2926     { .name = "XSCALE_LOCK_ICACHE_LINE",
2927       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2928       .access = PL1_W, .type = ARM_CP_NOP },
2929     { .name = "XSCALE_UNLOCK_ICACHE",
2930       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2931       .access = PL1_W, .type = ARM_CP_NOP },
2932     { .name = "XSCALE_DCACHE_LOCK",
2933       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2934       .access = PL1_RW, .type = ARM_CP_NOP },
2935     { .name = "XSCALE_UNLOCK_DCACHE",
2936       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2937       .access = PL1_W, .type = ARM_CP_NOP },
2938     REGINFO_SENTINEL
2939 };
2940 
2941 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2942     /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2943      * implementation of this implementation-defined space.
2944      * Ideally this should eventually disappear in favour of actually
2945      * implementing the correct behaviour for all cores.
2946      */
2947     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2948       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2949       .access = PL1_RW,
2950       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2951       .resetvalue = 0 },
2952     REGINFO_SENTINEL
2953 };
2954 
2955 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2956     /* Cache status: RAZ because we have no cache so it's always clean */
2957     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2958       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2959       .resetvalue = 0 },
2960     REGINFO_SENTINEL
2961 };
2962 
2963 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2964     /* We never have a a block transfer operation in progress */
2965     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2966       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2967       .resetvalue = 0 },
2968     /* The cache ops themselves: these all NOP for QEMU */
2969     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2970       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2971     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2972       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2973     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2974       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2975     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2976       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2977     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2978       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2979     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2980       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2981     REGINFO_SENTINEL
2982 };
2983 
2984 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2985     /* The cache test-and-clean instructions always return (1 << 30)
2986      * to indicate that there are no dirty cache lines.
2987      */
2988     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2989       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2990       .resetvalue = (1 << 30) },
2991     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2992       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2993       .resetvalue = (1 << 30) },
2994     REGINFO_SENTINEL
2995 };
2996 
2997 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2998     /* Ignore ReadBuffer accesses */
2999     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3000       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3001       .access = PL1_RW, .resetvalue = 0,
3002       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3003     REGINFO_SENTINEL
3004 };
3005 
3006 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3007 {
3008     ARMCPU *cpu = arm_env_get_cpu(env);
3009     unsigned int cur_el = arm_current_el(env);
3010     bool secure = arm_is_secure(env);
3011 
3012     if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3013         return env->cp15.vpidr_el2;
3014     }
3015     return raw_read(env, ri);
3016 }
3017 
3018 static uint64_t mpidr_read_val(CPUARMState *env)
3019 {
3020     ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
3021     uint64_t mpidr = cpu->mp_affinity;
3022 
3023     if (arm_feature(env, ARM_FEATURE_V7MP)) {
3024         mpidr |= (1U << 31);
3025         /* Cores which are uniprocessor (non-coherent)
3026          * but still implement the MP extensions set
3027          * bit 30. (For instance, Cortex-R5).
3028          */
3029         if (cpu->mp_is_up) {
3030             mpidr |= (1u << 30);
3031         }
3032     }
3033     return mpidr;
3034 }
3035 
3036 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3037 {
3038     unsigned int cur_el = arm_current_el(env);
3039     bool secure = arm_is_secure(env);
3040 
3041     if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3042         return env->cp15.vmpidr_el2;
3043     }
3044     return mpidr_read_val(env);
3045 }
3046 
3047 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
3048     { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
3049       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
3050       .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
3051     REGINFO_SENTINEL
3052 };
3053 
3054 static const ARMCPRegInfo lpae_cp_reginfo[] = {
3055     /* NOP AMAIR0/1 */
3056     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3057       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3058       .access = PL1_RW, .type = ARM_CP_CONST,
3059       .resetvalue = 0 },
3060     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3061     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3062       .access = PL1_RW, .type = ARM_CP_CONST,
3063       .resetvalue = 0 },
3064     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3065       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3066       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3067                              offsetof(CPUARMState, cp15.par_ns)} },
3068     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3069       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3070       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3071                              offsetof(CPUARMState, cp15.ttbr0_ns) },
3072       .writefn = vmsa_ttbr_write, },
3073     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3074       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3075       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3076                              offsetof(CPUARMState, cp15.ttbr1_ns) },
3077       .writefn = vmsa_ttbr_write, },
3078     REGINFO_SENTINEL
3079 };
3080 
3081 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3082 {
3083     return vfp_get_fpcr(env);
3084 }
3085 
3086 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3087                             uint64_t value)
3088 {
3089     vfp_set_fpcr(env, value);
3090 }
3091 
3092 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3093 {
3094     return vfp_get_fpsr(env);
3095 }
3096 
3097 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3098                             uint64_t value)
3099 {
3100     vfp_set_fpsr(env, value);
3101 }
3102 
3103 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3104                                        bool isread)
3105 {
3106     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3107         return CP_ACCESS_TRAP;
3108     }
3109     return CP_ACCESS_OK;
3110 }
3111 
3112 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3113                             uint64_t value)
3114 {
3115     env->daif = value & PSTATE_DAIF;
3116 }
3117 
3118 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3119                                           const ARMCPRegInfo *ri,
3120                                           bool isread)
3121 {
3122     /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3123      * SCTLR_EL1.UCI is set.
3124      */
3125     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3126         return CP_ACCESS_TRAP;
3127     }
3128     return CP_ACCESS_OK;
3129 }
3130 
3131 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3132  * Page D4-1736 (DDI0487A.b)
3133  */
3134 
3135 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3136                                       uint64_t value)
3137 {
3138     CPUState *cs = ENV_GET_CPU(env);
3139     bool sec = arm_is_secure_below_el3(env);
3140 
3141     if (sec) {
3142         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3143                                             ARMMMUIdxBit_S1SE1 |
3144                                             ARMMMUIdxBit_S1SE0);
3145     } else {
3146         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3147                                             ARMMMUIdxBit_S12NSE1 |
3148                                             ARMMMUIdxBit_S12NSE0);
3149     }
3150 }
3151 
3152 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3153                                     uint64_t value)
3154 {
3155     CPUState *cs = ENV_GET_CPU(env);
3156 
3157     if (tlb_force_broadcast(env)) {
3158         tlbi_aa64_vmalle1is_write(env, NULL, value);
3159         return;
3160     }
3161 
3162     if (arm_is_secure_below_el3(env)) {
3163         tlb_flush_by_mmuidx(cs,
3164                             ARMMMUIdxBit_S1SE1 |
3165                             ARMMMUIdxBit_S1SE0);
3166     } else {
3167         tlb_flush_by_mmuidx(cs,
3168                             ARMMMUIdxBit_S12NSE1 |
3169                             ARMMMUIdxBit_S12NSE0);
3170     }
3171 }
3172 
3173 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3174                                   uint64_t value)
3175 {
3176     /* Note that the 'ALL' scope must invalidate both stage 1 and
3177      * stage 2 translations, whereas most other scopes only invalidate
3178      * stage 1 translations.
3179      */
3180     ARMCPU *cpu = arm_env_get_cpu(env);
3181     CPUState *cs = CPU(cpu);
3182 
3183     if (arm_is_secure_below_el3(env)) {
3184         tlb_flush_by_mmuidx(cs,
3185                             ARMMMUIdxBit_S1SE1 |
3186                             ARMMMUIdxBit_S1SE0);
3187     } else {
3188         if (arm_feature(env, ARM_FEATURE_EL2)) {
3189             tlb_flush_by_mmuidx(cs,
3190                                 ARMMMUIdxBit_S12NSE1 |
3191                                 ARMMMUIdxBit_S12NSE0 |
3192                                 ARMMMUIdxBit_S2NS);
3193         } else {
3194             tlb_flush_by_mmuidx(cs,
3195                                 ARMMMUIdxBit_S12NSE1 |
3196                                 ARMMMUIdxBit_S12NSE0);
3197         }
3198     }
3199 }
3200 
3201 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3202                                   uint64_t value)
3203 {
3204     ARMCPU *cpu = arm_env_get_cpu(env);
3205     CPUState *cs = CPU(cpu);
3206 
3207     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3208 }
3209 
3210 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3211                                   uint64_t value)
3212 {
3213     ARMCPU *cpu = arm_env_get_cpu(env);
3214     CPUState *cs = CPU(cpu);
3215 
3216     tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3217 }
3218 
3219 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3220                                     uint64_t value)
3221 {
3222     /* Note that the 'ALL' scope must invalidate both stage 1 and
3223      * stage 2 translations, whereas most other scopes only invalidate
3224      * stage 1 translations.
3225      */
3226     CPUState *cs = ENV_GET_CPU(env);
3227     bool sec = arm_is_secure_below_el3(env);
3228     bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3229 
3230     if (sec) {
3231         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3232                                             ARMMMUIdxBit_S1SE1 |
3233                                             ARMMMUIdxBit_S1SE0);
3234     } else if (has_el2) {
3235         tlb_flush_by_mmuidx_all_cpus_synced(cs,
3236                                             ARMMMUIdxBit_S12NSE1 |
3237                                             ARMMMUIdxBit_S12NSE0 |
3238                                             ARMMMUIdxBit_S2NS);
3239     } else {
3240           tlb_flush_by_mmuidx_all_cpus_synced(cs,
3241                                               ARMMMUIdxBit_S12NSE1 |
3242                                               ARMMMUIdxBit_S12NSE0);
3243     }
3244 }
3245 
3246 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3247                                     uint64_t value)
3248 {
3249     CPUState *cs = ENV_GET_CPU(env);
3250 
3251     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3252 }
3253 
3254 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3255                                     uint64_t value)
3256 {
3257     CPUState *cs = ENV_GET_CPU(env);
3258 
3259     tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3260 }
3261 
3262 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3263                                  uint64_t value)
3264 {
3265     /* Invalidate by VA, EL2
3266      * Currently handles both VAE2 and VALE2, since we don't support
3267      * flush-last-level-only.
3268      */
3269     ARMCPU *cpu = arm_env_get_cpu(env);
3270     CPUState *cs = CPU(cpu);
3271     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3272 
3273     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3274 }
3275 
3276 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3277                                  uint64_t value)
3278 {
3279     /* Invalidate by VA, EL3
3280      * Currently handles both VAE3 and VALE3, since we don't support
3281      * flush-last-level-only.
3282      */
3283     ARMCPU *cpu = arm_env_get_cpu(env);
3284     CPUState *cs = CPU(cpu);
3285     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3286 
3287     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3288 }
3289 
3290 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3291                                    uint64_t value)
3292 {
3293     ARMCPU *cpu = arm_env_get_cpu(env);
3294     CPUState *cs = CPU(cpu);
3295     bool sec = arm_is_secure_below_el3(env);
3296     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3297 
3298     if (sec) {
3299         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3300                                                  ARMMMUIdxBit_S1SE1 |
3301                                                  ARMMMUIdxBit_S1SE0);
3302     } else {
3303         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3304                                                  ARMMMUIdxBit_S12NSE1 |
3305                                                  ARMMMUIdxBit_S12NSE0);
3306     }
3307 }
3308 
3309 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3310                                  uint64_t value)
3311 {
3312     /* Invalidate by VA, EL1&0 (AArch64 version).
3313      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3314      * since we don't support flush-for-specific-ASID-only or
3315      * flush-last-level-only.
3316      */
3317     ARMCPU *cpu = arm_env_get_cpu(env);
3318     CPUState *cs = CPU(cpu);
3319     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3320 
3321     if (tlb_force_broadcast(env)) {
3322         tlbi_aa64_vae1is_write(env, NULL, value);
3323         return;
3324     }
3325 
3326     if (arm_is_secure_below_el3(env)) {
3327         tlb_flush_page_by_mmuidx(cs, pageaddr,
3328                                  ARMMMUIdxBit_S1SE1 |
3329                                  ARMMMUIdxBit_S1SE0);
3330     } else {
3331         tlb_flush_page_by_mmuidx(cs, pageaddr,
3332                                  ARMMMUIdxBit_S12NSE1 |
3333                                  ARMMMUIdxBit_S12NSE0);
3334     }
3335 }
3336 
3337 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3338                                    uint64_t value)
3339 {
3340     CPUState *cs = ENV_GET_CPU(env);
3341     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3342 
3343     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3344                                              ARMMMUIdxBit_S1E2);
3345 }
3346 
3347 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3348                                    uint64_t value)
3349 {
3350     CPUState *cs = ENV_GET_CPU(env);
3351     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3352 
3353     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3354                                              ARMMMUIdxBit_S1E3);
3355 }
3356 
3357 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3358                                     uint64_t value)
3359 {
3360     /* Invalidate by IPA. This has to invalidate any structures that
3361      * contain only stage 2 translation information, but does not need
3362      * to apply to structures that contain combined stage 1 and stage 2
3363      * translation information.
3364      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3365      */
3366     ARMCPU *cpu = arm_env_get_cpu(env);
3367     CPUState *cs = CPU(cpu);
3368     uint64_t pageaddr;
3369 
3370     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3371         return;
3372     }
3373 
3374     pageaddr = sextract64(value << 12, 0, 48);
3375 
3376     tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
3377 }
3378 
3379 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3380                                       uint64_t value)
3381 {
3382     CPUState *cs = ENV_GET_CPU(env);
3383     uint64_t pageaddr;
3384 
3385     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3386         return;
3387     }
3388 
3389     pageaddr = sextract64(value << 12, 0, 48);
3390 
3391     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3392                                              ARMMMUIdxBit_S2NS);
3393 }
3394 
3395 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3396                                       bool isread)
3397 {
3398     /* We don't implement EL2, so the only control on DC ZVA is the
3399      * bit in the SCTLR which can prohibit access for EL0.
3400      */
3401     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3402         return CP_ACCESS_TRAP;
3403     }
3404     return CP_ACCESS_OK;
3405 }
3406 
3407 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3408 {
3409     ARMCPU *cpu = arm_env_get_cpu(env);
3410     int dzp_bit = 1 << 4;
3411 
3412     /* DZP indicates whether DC ZVA access is allowed */
3413     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3414         dzp_bit = 0;
3415     }
3416     return cpu->dcz_blocksize | dzp_bit;
3417 }
3418 
3419 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3420                                     bool isread)
3421 {
3422     if (!(env->pstate & PSTATE_SP)) {
3423         /* Access to SP_EL0 is undefined if it's being used as
3424          * the stack pointer.
3425          */
3426         return CP_ACCESS_TRAP_UNCATEGORIZED;
3427     }
3428     return CP_ACCESS_OK;
3429 }
3430 
3431 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3432 {
3433     return env->pstate & PSTATE_SP;
3434 }
3435 
3436 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3437 {
3438     update_spsel(env, val);
3439 }
3440 
3441 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3442                         uint64_t value)
3443 {
3444     ARMCPU *cpu = arm_env_get_cpu(env);
3445 
3446     if (raw_read(env, ri) == value) {
3447         /* Skip the TLB flush if nothing actually changed; Linux likes
3448          * to do a lot of pointless SCTLR writes.
3449          */
3450         return;
3451     }
3452 
3453     if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3454         /* M bit is RAZ/WI for PMSA with no MPU implemented */
3455         value &= ~SCTLR_M;
3456     }
3457 
3458     raw_write(env, ri, value);
3459     /* ??? Lots of these bits are not implemented.  */
3460     /* This may enable/disable the MMU, so do a TLB flush.  */
3461     tlb_flush(CPU(cpu));
3462 }
3463 
3464 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3465                                      bool isread)
3466 {
3467     if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3468         return CP_ACCESS_TRAP_FP_EL2;
3469     }
3470     if (env->cp15.cptr_el[3] & CPTR_TFP) {
3471         return CP_ACCESS_TRAP_FP_EL3;
3472     }
3473     return CP_ACCESS_OK;
3474 }
3475 
3476 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3477                        uint64_t value)
3478 {
3479     env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3480 }
3481 
3482 static const ARMCPRegInfo v8_cp_reginfo[] = {
3483     /* Minimal set of EL0-visible registers. This will need to be expanded
3484      * significantly for system emulation of AArch64 CPUs.
3485      */
3486     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3487       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3488       .access = PL0_RW, .type = ARM_CP_NZCV },
3489     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3490       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3491       .type = ARM_CP_NO_RAW,
3492       .access = PL0_RW, .accessfn = aa64_daif_access,
3493       .fieldoffset = offsetof(CPUARMState, daif),
3494       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3495     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3496       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3497       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3498       .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3499     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3500       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3501       .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3502       .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3503     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3504       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3505       .access = PL0_R, .type = ARM_CP_NO_RAW,
3506       .readfn = aa64_dczid_read },
3507     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3508       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3509       .access = PL0_W, .type = ARM_CP_DC_ZVA,
3510 #ifndef CONFIG_USER_ONLY
3511       /* Avoid overhead of an access check that always passes in user-mode */
3512       .accessfn = aa64_zva_access,
3513 #endif
3514     },
3515     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3516       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3517       .access = PL1_R, .type = ARM_CP_CURRENTEL },
3518     /* Cache ops: all NOPs since we don't emulate caches */
3519     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3520       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3521       .access = PL1_W, .type = ARM_CP_NOP },
3522     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3523       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3524       .access = PL1_W, .type = ARM_CP_NOP },
3525     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3526       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3527       .access = PL0_W, .type = ARM_CP_NOP,
3528       .accessfn = aa64_cacheop_access },
3529     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3530       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3531       .access = PL1_W, .type = ARM_CP_NOP },
3532     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3533       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3534       .access = PL1_W, .type = ARM_CP_NOP },
3535     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3536       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3537       .access = PL0_W, .type = ARM_CP_NOP,
3538       .accessfn = aa64_cacheop_access },
3539     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3540       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3541       .access = PL1_W, .type = ARM_CP_NOP },
3542     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3543       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3544       .access = PL0_W, .type = ARM_CP_NOP,
3545       .accessfn = aa64_cacheop_access },
3546     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3547       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3548       .access = PL0_W, .type = ARM_CP_NOP,
3549       .accessfn = aa64_cacheop_access },
3550     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3551       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3552       .access = PL1_W, .type = ARM_CP_NOP },
3553     /* TLBI operations */
3554     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3555       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3556       .access = PL1_W, .type = ARM_CP_NO_RAW,
3557       .writefn = tlbi_aa64_vmalle1is_write },
3558     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3559       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3560       .access = PL1_W, .type = ARM_CP_NO_RAW,
3561       .writefn = tlbi_aa64_vae1is_write },
3562     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3563       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3564       .access = PL1_W, .type = ARM_CP_NO_RAW,
3565       .writefn = tlbi_aa64_vmalle1is_write },
3566     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3567       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3568       .access = PL1_W, .type = ARM_CP_NO_RAW,
3569       .writefn = tlbi_aa64_vae1is_write },
3570     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3571       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3572       .access = PL1_W, .type = ARM_CP_NO_RAW,
3573       .writefn = tlbi_aa64_vae1is_write },
3574     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3575       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3576       .access = PL1_W, .type = ARM_CP_NO_RAW,
3577       .writefn = tlbi_aa64_vae1is_write },
3578     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3579       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3580       .access = PL1_W, .type = ARM_CP_NO_RAW,
3581       .writefn = tlbi_aa64_vmalle1_write },
3582     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3583       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3584       .access = PL1_W, .type = ARM_CP_NO_RAW,
3585       .writefn = tlbi_aa64_vae1_write },
3586     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3587       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3588       .access = PL1_W, .type = ARM_CP_NO_RAW,
3589       .writefn = tlbi_aa64_vmalle1_write },
3590     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3591       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3592       .access = PL1_W, .type = ARM_CP_NO_RAW,
3593       .writefn = tlbi_aa64_vae1_write },
3594     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3595       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3596       .access = PL1_W, .type = ARM_CP_NO_RAW,
3597       .writefn = tlbi_aa64_vae1_write },
3598     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3599       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3600       .access = PL1_W, .type = ARM_CP_NO_RAW,
3601       .writefn = tlbi_aa64_vae1_write },
3602     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3603       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3604       .access = PL2_W, .type = ARM_CP_NO_RAW,
3605       .writefn = tlbi_aa64_ipas2e1is_write },
3606     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3607       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3608       .access = PL2_W, .type = ARM_CP_NO_RAW,
3609       .writefn = tlbi_aa64_ipas2e1is_write },
3610     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3611       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3612       .access = PL2_W, .type = ARM_CP_NO_RAW,
3613       .writefn = tlbi_aa64_alle1is_write },
3614     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3615       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3616       .access = PL2_W, .type = ARM_CP_NO_RAW,
3617       .writefn = tlbi_aa64_alle1is_write },
3618     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3619       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3620       .access = PL2_W, .type = ARM_CP_NO_RAW,
3621       .writefn = tlbi_aa64_ipas2e1_write },
3622     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3623       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3624       .access = PL2_W, .type = ARM_CP_NO_RAW,
3625       .writefn = tlbi_aa64_ipas2e1_write },
3626     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3627       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3628       .access = PL2_W, .type = ARM_CP_NO_RAW,
3629       .writefn = tlbi_aa64_alle1_write },
3630     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3631       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3632       .access = PL2_W, .type = ARM_CP_NO_RAW,
3633       .writefn = tlbi_aa64_alle1is_write },
3634 #ifndef CONFIG_USER_ONLY
3635     /* 64 bit address translation operations */
3636     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3637       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3638       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3639     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3640       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3641       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3642     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3643       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3644       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3645     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3646       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3647       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3648     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3649       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3650       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3651     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3652       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3653       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3654     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3655       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3656       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3657     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3658       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3659       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3660     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3661     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3662       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3663       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3664     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3665       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3666       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3667     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3668       .type = ARM_CP_ALIAS,
3669       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3670       .access = PL1_RW, .resetvalue = 0,
3671       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3672       .writefn = par_write },
3673 #endif
3674     /* TLB invalidate last level of translation table walk */
3675     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3676       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3677     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3678       .type = ARM_CP_NO_RAW, .access = PL1_W,
3679       .writefn = tlbimvaa_is_write },
3680     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3681       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3682     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3683       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3684     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3685       .type = ARM_CP_NO_RAW, .access = PL2_W,
3686       .writefn = tlbimva_hyp_write },
3687     { .name = "TLBIMVALHIS",
3688       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3689       .type = ARM_CP_NO_RAW, .access = PL2_W,
3690       .writefn = tlbimva_hyp_is_write },
3691     { .name = "TLBIIPAS2",
3692       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3693       .type = ARM_CP_NO_RAW, .access = PL2_W,
3694       .writefn = tlbiipas2_write },
3695     { .name = "TLBIIPAS2IS",
3696       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3697       .type = ARM_CP_NO_RAW, .access = PL2_W,
3698       .writefn = tlbiipas2_is_write },
3699     { .name = "TLBIIPAS2L",
3700       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3701       .type = ARM_CP_NO_RAW, .access = PL2_W,
3702       .writefn = tlbiipas2_write },
3703     { .name = "TLBIIPAS2LIS",
3704       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3705       .type = ARM_CP_NO_RAW, .access = PL2_W,
3706       .writefn = tlbiipas2_is_write },
3707     /* 32 bit cache operations */
3708     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3709       .type = ARM_CP_NOP, .access = PL1_W },
3710     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3711       .type = ARM_CP_NOP, .access = PL1_W },
3712     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3713       .type = ARM_CP_NOP, .access = PL1_W },
3714     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3715       .type = ARM_CP_NOP, .access = PL1_W },
3716     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3717       .type = ARM_CP_NOP, .access = PL1_W },
3718     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3719       .type = ARM_CP_NOP, .access = PL1_W },
3720     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3721       .type = ARM_CP_NOP, .access = PL1_W },
3722     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3723       .type = ARM_CP_NOP, .access = PL1_W },
3724     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3725       .type = ARM_CP_NOP, .access = PL1_W },
3726     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3727       .type = ARM_CP_NOP, .access = PL1_W },
3728     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3729       .type = ARM_CP_NOP, .access = PL1_W },
3730     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3731       .type = ARM_CP_NOP, .access = PL1_W },
3732     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3733       .type = ARM_CP_NOP, .access = PL1_W },
3734     /* MMU Domain access control / MPU write buffer control */
3735     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3736       .access = PL1_RW, .resetvalue = 0,
3737       .writefn = dacr_write, .raw_writefn = raw_write,
3738       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3739                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3740     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3741       .type = ARM_CP_ALIAS,
3742       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3743       .access = PL1_RW,
3744       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3745     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3746       .type = ARM_CP_ALIAS,
3747       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3748       .access = PL1_RW,
3749       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3750     /* We rely on the access checks not allowing the guest to write to the
3751      * state field when SPSel indicates that it's being used as the stack
3752      * pointer.
3753      */
3754     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3755       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3756       .access = PL1_RW, .accessfn = sp_el0_access,
3757       .type = ARM_CP_ALIAS,
3758       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3759     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3760       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3761       .access = PL2_RW, .type = ARM_CP_ALIAS,
3762       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3763     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3764       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3765       .type = ARM_CP_NO_RAW,
3766       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3767     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3768       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3769       .type = ARM_CP_ALIAS,
3770       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3771       .access = PL2_RW, .accessfn = fpexc32_access },
3772     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3773       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3774       .access = PL2_RW, .resetvalue = 0,
3775       .writefn = dacr_write, .raw_writefn = raw_write,
3776       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3777     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3778       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3779       .access = PL2_RW, .resetvalue = 0,
3780       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3781     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3782       .type = ARM_CP_ALIAS,
3783       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3784       .access = PL2_RW,
3785       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3786     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3787       .type = ARM_CP_ALIAS,
3788       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3789       .access = PL2_RW,
3790       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3791     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3792       .type = ARM_CP_ALIAS,
3793       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3794       .access = PL2_RW,
3795       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3796     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3797       .type = ARM_CP_ALIAS,
3798       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3799       .access = PL2_RW,
3800       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3801     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3802       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3803       .resetvalue = 0,
3804       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3805     { .name = "SDCR", .type = ARM_CP_ALIAS,
3806       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3807       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3808       .writefn = sdcr_write,
3809       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3810     REGINFO_SENTINEL
3811 };
3812 
3813 /* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
3814 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3815     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
3816       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3817       .access = PL2_RW,
3818       .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3819     { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
3820       .type = ARM_CP_NO_RAW,
3821       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3822       .access = PL2_RW,
3823       .type = ARM_CP_CONST, .resetvalue = 0 },
3824     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
3825       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3826       .access = PL2_RW,
3827       .type = ARM_CP_CONST, .resetvalue = 0 },
3828     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3829       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3830       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3831     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3832       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3833       .access = PL2_RW, .type = ARM_CP_CONST,
3834       .resetvalue = 0 },
3835     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3836       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3837       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3838     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3839       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3840       .access = PL2_RW, .type = ARM_CP_CONST,
3841       .resetvalue = 0 },
3842     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
3843       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3844       .access = PL2_RW, .type = ARM_CP_CONST,
3845       .resetvalue = 0 },
3846     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3847       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3848       .access = PL2_RW, .type = ARM_CP_CONST,
3849       .resetvalue = 0 },
3850     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3851       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3852       .access = PL2_RW, .type = ARM_CP_CONST,
3853       .resetvalue = 0 },
3854     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3855       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3856       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3857     { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3858       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3859       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3860       .type = ARM_CP_CONST, .resetvalue = 0 },
3861     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3862       .cp = 15, .opc1 = 6, .crm = 2,
3863       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3864       .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3865     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3866       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3867       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3868     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3869       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3870       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3871     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3872       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3873       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3874     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3875       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3876       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3877     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3878       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3879       .resetvalue = 0 },
3880     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3881       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3882       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3883     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3884       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3885       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3886     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3887       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3888       .resetvalue = 0 },
3889     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3890       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3891       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3892     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3893       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3894       .resetvalue = 0 },
3895     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3896       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3897       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3898     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3899       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3900       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3901     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3902       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3903       .access = PL2_RW, .accessfn = access_tda,
3904       .type = ARM_CP_CONST, .resetvalue = 0 },
3905     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3906       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3907       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3908       .type = ARM_CP_CONST, .resetvalue = 0 },
3909     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3910       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3911       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3912     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
3913       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3914       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3915     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
3916       .type = ARM_CP_CONST,
3917       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
3918       .access = PL2_RW, .resetvalue = 0 },
3919     REGINFO_SENTINEL
3920 };
3921 
3922 /* Ditto, but for registers which exist in ARMv8 but not v7 */
3923 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
3924     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
3925       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
3926       .access = PL2_RW,
3927       .type = ARM_CP_CONST, .resetvalue = 0 },
3928     REGINFO_SENTINEL
3929 };
3930 
3931 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3932 {
3933     ARMCPU *cpu = arm_env_get_cpu(env);
3934     uint64_t valid_mask = HCR_MASK;
3935 
3936     if (arm_feature(env, ARM_FEATURE_EL3)) {
3937         valid_mask &= ~HCR_HCD;
3938     } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3939         /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3940          * However, if we're using the SMC PSCI conduit then QEMU is
3941          * effectively acting like EL3 firmware and so the guest at
3942          * EL2 should retain the ability to prevent EL1 from being
3943          * able to make SMC calls into the ersatz firmware, so in
3944          * that case HCR.TSC should be read/write.
3945          */
3946         valid_mask &= ~HCR_TSC;
3947     }
3948 
3949     /* Clear RES0 bits.  */
3950     value &= valid_mask;
3951 
3952     /* These bits change the MMU setup:
3953      * HCR_VM enables stage 2 translation
3954      * HCR_PTW forbids certain page-table setups
3955      * HCR_DC Disables stage1 and enables stage2 translation
3956      */
3957     if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3958         tlb_flush(CPU(cpu));
3959     }
3960     env->cp15.hcr_el2 = value;
3961 
3962     /*
3963      * Updates to VI and VF require us to update the status of
3964      * virtual interrupts, which are the logical OR of these bits
3965      * and the state of the input lines from the GIC. (This requires
3966      * that we have the iothread lock, which is done by marking the
3967      * reginfo structs as ARM_CP_IO.)
3968      * Note that if a write to HCR pends a VIRQ or VFIQ it is never
3969      * possible for it to be taken immediately, because VIRQ and
3970      * VFIQ are masked unless running at EL0 or EL1, and HCR
3971      * can only be written at EL2.
3972      */
3973     g_assert(qemu_mutex_iothread_locked());
3974     arm_cpu_update_virq(cpu);
3975     arm_cpu_update_vfiq(cpu);
3976 }
3977 
3978 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
3979                           uint64_t value)
3980 {
3981     /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
3982     value = deposit64(env->cp15.hcr_el2, 32, 32, value);
3983     hcr_write(env, NULL, value);
3984 }
3985 
3986 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
3987                          uint64_t value)
3988 {
3989     /* Handle HCR write, i.e. write to low half of HCR_EL2 */
3990     value = deposit64(env->cp15.hcr_el2, 0, 32, value);
3991     hcr_write(env, NULL, value);
3992 }
3993 
3994 static const ARMCPRegInfo el2_cp_reginfo[] = {
3995     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3996       .type = ARM_CP_IO,
3997       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3998       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3999       .writefn = hcr_write },
4000     { .name = "HCR", .state = ARM_CP_STATE_AA32,
4001       .type = ARM_CP_ALIAS | ARM_CP_IO,
4002       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4003       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4004       .writefn = hcr_writelow },
4005     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4006       .type = ARM_CP_ALIAS,
4007       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4008       .access = PL2_RW,
4009       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4010     { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4011       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4012       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4013     { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4014       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4015       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4016     { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4017       .type = ARM_CP_ALIAS,
4018       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4019       .access = PL2_RW,
4020       .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4021     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4022       .type = ARM_CP_ALIAS,
4023       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4024       .access = PL2_RW,
4025       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4026     { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4027       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4028       .access = PL2_RW, .writefn = vbar_write,
4029       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4030       .resetvalue = 0 },
4031     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4032       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4033       .access = PL3_RW, .type = ARM_CP_ALIAS,
4034       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4035     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4036       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4037       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4038       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
4039     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4040       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4041       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4042       .resetvalue = 0 },
4043     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4044       .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4045       .access = PL2_RW, .type = ARM_CP_ALIAS,
4046       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4047     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4048       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4049       .access = PL2_RW, .type = ARM_CP_CONST,
4050       .resetvalue = 0 },
4051     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4052     { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4053       .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4054       .access = PL2_RW, .type = ARM_CP_CONST,
4055       .resetvalue = 0 },
4056     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4057       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4058       .access = PL2_RW, .type = ARM_CP_CONST,
4059       .resetvalue = 0 },
4060     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4061       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4062       .access = PL2_RW, .type = ARM_CP_CONST,
4063       .resetvalue = 0 },
4064     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4065       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4066       .access = PL2_RW,
4067       /* no .writefn needed as this can't cause an ASID change;
4068        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4069        */
4070       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4071     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4072       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4073       .type = ARM_CP_ALIAS,
4074       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4075       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4076     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4077       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4078       .access = PL2_RW,
4079       /* no .writefn needed as this can't cause an ASID change;
4080        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4081        */
4082       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4083     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4084       .cp = 15, .opc1 = 6, .crm = 2,
4085       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4086       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4087       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4088       .writefn = vttbr_write },
4089     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4090       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4091       .access = PL2_RW, .writefn = vttbr_write,
4092       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4093     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4094       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4095       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4096       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4097     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4098       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4099       .access = PL2_RW, .resetvalue = 0,
4100       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4101     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4102       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4103       .access = PL2_RW, .resetvalue = 0,
4104       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4105     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4106       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4107       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4108     { .name = "TLBIALLNSNH",
4109       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4110       .type = ARM_CP_NO_RAW, .access = PL2_W,
4111       .writefn = tlbiall_nsnh_write },
4112     { .name = "TLBIALLNSNHIS",
4113       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4114       .type = ARM_CP_NO_RAW, .access = PL2_W,
4115       .writefn = tlbiall_nsnh_is_write },
4116     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4117       .type = ARM_CP_NO_RAW, .access = PL2_W,
4118       .writefn = tlbiall_hyp_write },
4119     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4120       .type = ARM_CP_NO_RAW, .access = PL2_W,
4121       .writefn = tlbiall_hyp_is_write },
4122     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4123       .type = ARM_CP_NO_RAW, .access = PL2_W,
4124       .writefn = tlbimva_hyp_write },
4125     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4126       .type = ARM_CP_NO_RAW, .access = PL2_W,
4127       .writefn = tlbimva_hyp_is_write },
4128     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
4129       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4130       .type = ARM_CP_NO_RAW, .access = PL2_W,
4131       .writefn = tlbi_aa64_alle2_write },
4132     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4133       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4134       .type = ARM_CP_NO_RAW, .access = PL2_W,
4135       .writefn = tlbi_aa64_vae2_write },
4136     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4137       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4138       .access = PL2_W, .type = ARM_CP_NO_RAW,
4139       .writefn = tlbi_aa64_vae2_write },
4140     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4141       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4142       .access = PL2_W, .type = ARM_CP_NO_RAW,
4143       .writefn = tlbi_aa64_alle2is_write },
4144     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4145       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4146       .type = ARM_CP_NO_RAW, .access = PL2_W,
4147       .writefn = tlbi_aa64_vae2is_write },
4148     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
4149       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4150       .access = PL2_W, .type = ARM_CP_NO_RAW,
4151       .writefn = tlbi_aa64_vae2is_write },
4152 #ifndef CONFIG_USER_ONLY
4153     /* Unlike the other EL2-related AT operations, these must
4154      * UNDEF from EL3 if EL2 is not implemented, which is why we
4155      * define them here rather than with the rest of the AT ops.
4156      */
4157     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
4158       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4159       .access = PL2_W, .accessfn = at_s1e2_access,
4160       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4161     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
4162       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4163       .access = PL2_W, .accessfn = at_s1e2_access,
4164       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4165     /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4166      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4167      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4168      * to behave as if SCR.NS was 1.
4169      */
4170     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4171       .access = PL2_W,
4172       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4173     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4174       .access = PL2_W,
4175       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4176     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4177       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4178       /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4179        * reset values as IMPDEF. We choose to reset to 3 to comply with
4180        * both ARMv7 and ARMv8.
4181        */
4182       .access = PL2_RW, .resetvalue = 3,
4183       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4184     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4185       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4186       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4187       .writefn = gt_cntvoff_write,
4188       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4189     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4190       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4191       .writefn = gt_cntvoff_write,
4192       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4193     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4194       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4195       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4196       .type = ARM_CP_IO, .access = PL2_RW,
4197       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4198     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4199       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4200       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4201       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4202     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4203       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4204       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4205       .resetfn = gt_hyp_timer_reset,
4206       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4207     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4208       .type = ARM_CP_IO,
4209       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4210       .access = PL2_RW,
4211       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4212       .resetvalue = 0,
4213       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4214 #endif
4215     /* The only field of MDCR_EL2 that has a defined architectural reset value
4216      * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4217      * don't impelment any PMU event counters, so using zero as a reset
4218      * value for MDCR_EL2 is okay
4219      */
4220     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4221       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4222       .access = PL2_RW, .resetvalue = 0,
4223       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
4224     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4225       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4226       .access = PL2_RW, .accessfn = access_el3_aa32ns,
4227       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4228     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4229       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4230       .access = PL2_RW,
4231       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4232     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4233       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4234       .access = PL2_RW,
4235       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4236     REGINFO_SENTINEL
4237 };
4238 
4239 static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4240     { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4241       .type = ARM_CP_ALIAS | ARM_CP_IO,
4242       .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4243       .access = PL2_RW,
4244       .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4245       .writefn = hcr_writehigh },
4246     REGINFO_SENTINEL
4247 };
4248 
4249 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4250                                    bool isread)
4251 {
4252     /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4253      * At Secure EL1 it traps to EL3.
4254      */
4255     if (arm_current_el(env) == 3) {
4256         return CP_ACCESS_OK;
4257     }
4258     if (arm_is_secure_below_el3(env)) {
4259         return CP_ACCESS_TRAP_EL3;
4260     }
4261     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4262     if (isread) {
4263         return CP_ACCESS_OK;
4264     }
4265     return CP_ACCESS_TRAP_UNCATEGORIZED;
4266 }
4267 
4268 static const ARMCPRegInfo el3_cp_reginfo[] = {
4269     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4270       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4271       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4272       .resetvalue = 0, .writefn = scr_write },
4273     { .name = "SCR",  .type = ARM_CP_ALIAS,
4274       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4275       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4276       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4277       .writefn = scr_write },
4278     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4279       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4280       .access = PL3_RW, .resetvalue = 0,
4281       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4282     { .name = "SDER",
4283       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4284       .access = PL3_RW, .resetvalue = 0,
4285       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4286     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4287       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4288       .writefn = vbar_write, .resetvalue = 0,
4289       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4290     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4291       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4292       .access = PL3_RW, .resetvalue = 0,
4293       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4294     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4295       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4296       .access = PL3_RW,
4297       /* no .writefn needed as this can't cause an ASID change;
4298        * we must provide a .raw_writefn and .resetfn because we handle
4299        * reset and migration for the AArch32 TTBCR(S), which might be
4300        * using mask and base_mask.
4301        */
4302       .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
4303       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4304     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4305       .type = ARM_CP_ALIAS,
4306       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4307       .access = PL3_RW,
4308       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4309     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4310       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4311       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4312     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4313       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4314       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4315     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4316       .type = ARM_CP_ALIAS,
4317       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4318       .access = PL3_RW,
4319       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4320     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4321       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4322       .access = PL3_RW, .writefn = vbar_write,
4323       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4324       .resetvalue = 0 },
4325     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4326       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4327       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4328       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4329     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4330       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4331       .access = PL3_RW, .resetvalue = 0,
4332       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4333     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4334       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4335       .access = PL3_RW, .type = ARM_CP_CONST,
4336       .resetvalue = 0 },
4337     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4338       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4339       .access = PL3_RW, .type = ARM_CP_CONST,
4340       .resetvalue = 0 },
4341     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4342       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4343       .access = PL3_RW, .type = ARM_CP_CONST,
4344       .resetvalue = 0 },
4345     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
4346       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
4347       .access = PL3_W, .type = ARM_CP_NO_RAW,
4348       .writefn = tlbi_aa64_alle3is_write },
4349     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
4350       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
4351       .access = PL3_W, .type = ARM_CP_NO_RAW,
4352       .writefn = tlbi_aa64_vae3is_write },
4353     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
4354       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
4355       .access = PL3_W, .type = ARM_CP_NO_RAW,
4356       .writefn = tlbi_aa64_vae3is_write },
4357     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
4358       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
4359       .access = PL3_W, .type = ARM_CP_NO_RAW,
4360       .writefn = tlbi_aa64_alle3_write },
4361     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
4362       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
4363       .access = PL3_W, .type = ARM_CP_NO_RAW,
4364       .writefn = tlbi_aa64_vae3_write },
4365     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
4366       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
4367       .access = PL3_W, .type = ARM_CP_NO_RAW,
4368       .writefn = tlbi_aa64_vae3_write },
4369     REGINFO_SENTINEL
4370 };
4371 
4372 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4373                                      bool isread)
4374 {
4375     /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4376      * but the AArch32 CTR has its own reginfo struct)
4377      */
4378     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4379         return CP_ACCESS_TRAP;
4380     }
4381     return CP_ACCESS_OK;
4382 }
4383 
4384 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4385                         uint64_t value)
4386 {
4387     /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4388      * read via a bit in OSLSR_EL1.
4389      */
4390     int oslock;
4391 
4392     if (ri->state == ARM_CP_STATE_AA32) {
4393         oslock = (value == 0xC5ACCE55);
4394     } else {
4395         oslock = value & 1;
4396     }
4397 
4398     env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
4399 }
4400 
4401 static const ARMCPRegInfo debug_cp_reginfo[] = {
4402     /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4403      * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4404      * unlike DBGDRAR it is never accessible from EL0.
4405      * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4406      * accessor.
4407      */
4408     { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4409       .access = PL0_R, .accessfn = access_tdra,
4410       .type = ARM_CP_CONST, .resetvalue = 0 },
4411     { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4412       .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4413       .access = PL1_R, .accessfn = access_tdra,
4414       .type = ARM_CP_CONST, .resetvalue = 0 },
4415     { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4416       .access = PL0_R, .accessfn = access_tdra,
4417       .type = ARM_CP_CONST, .resetvalue = 0 },
4418     /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4419     { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4420       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4421       .access = PL1_RW, .accessfn = access_tda,
4422       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4423       .resetvalue = 0 },
4424     /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4425      * We don't implement the configurable EL0 access.
4426      */
4427     { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4428       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4429       .type = ARM_CP_ALIAS,
4430       .access = PL1_R, .accessfn = access_tda,
4431       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4432     { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4433       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4434       .access = PL1_W, .type = ARM_CP_NO_RAW,
4435       .accessfn = access_tdosa,
4436       .writefn = oslar_write },
4437     { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4438       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4439       .access = PL1_R, .resetvalue = 10,
4440       .accessfn = access_tdosa,
4441       .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4442     /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4443     { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4444       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4445       .access = PL1_RW, .accessfn = access_tdosa,
4446       .type = ARM_CP_NOP },
4447     /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4448      * implement vector catch debug events yet.
4449      */
4450     { .name = "DBGVCR",
4451       .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4452       .access = PL1_RW, .accessfn = access_tda,
4453       .type = ARM_CP_NOP },
4454     /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4455      * to save and restore a 32-bit guest's DBGVCR)
4456      */
4457     { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
4458       .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
4459       .access = PL2_RW, .accessfn = access_tda,
4460       .type = ARM_CP_NOP },
4461     /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4462      * Channel but Linux may try to access this register. The 32-bit
4463      * alias is DBGDCCINT.
4464      */
4465     { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
4466       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4467       .access = PL1_RW, .accessfn = access_tda,
4468       .type = ARM_CP_NOP },
4469     REGINFO_SENTINEL
4470 };
4471 
4472 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4473     /* 64 bit access versions of the (dummy) debug registers */
4474     { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4475       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4476     { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4477       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4478     REGINFO_SENTINEL
4479 };
4480 
4481 /* Return the exception level to which exceptions should be taken
4482  * via SVEAccessTrap.  If an exception should be routed through
4483  * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
4484  * take care of raising that exception.
4485  * C.f. the ARM pseudocode function CheckSVEEnabled.
4486  */
4487 int sve_exception_el(CPUARMState *env, int el)
4488 {
4489 #ifndef CONFIG_USER_ONLY
4490     if (el <= 1) {
4491         bool disabled = false;
4492 
4493         /* The CPACR.ZEN controls traps to EL1:
4494          * 0, 2 : trap EL0 and EL1 accesses
4495          * 1    : trap only EL0 accesses
4496          * 3    : trap no accesses
4497          */
4498         if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
4499             disabled = true;
4500         } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
4501             disabled = el == 0;
4502         }
4503         if (disabled) {
4504             /* route_to_el2 */
4505             return (arm_feature(env, ARM_FEATURE_EL2)
4506                     && !arm_is_secure(env)
4507                     && (env->cp15.hcr_el2 & HCR_TGE) ? 2 : 1);
4508         }
4509 
4510         /* Check CPACR.FPEN.  */
4511         if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
4512             disabled = true;
4513         } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
4514             disabled = el == 0;
4515         }
4516         if (disabled) {
4517             return 0;
4518         }
4519     }
4520 
4521     /* CPTR_EL2.  Since TZ and TFP are positive,
4522      * they will be zero when EL2 is not present.
4523      */
4524     if (el <= 2 && !arm_is_secure_below_el3(env)) {
4525         if (env->cp15.cptr_el[2] & CPTR_TZ) {
4526             return 2;
4527         }
4528         if (env->cp15.cptr_el[2] & CPTR_TFP) {
4529             return 0;
4530         }
4531     }
4532 
4533     /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
4534     if (arm_feature(env, ARM_FEATURE_EL3)
4535         && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
4536         return 3;
4537     }
4538 #endif
4539     return 0;
4540 }
4541 
4542 /*
4543  * Given that SVE is enabled, return the vector length for EL.
4544  */
4545 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
4546 {
4547     ARMCPU *cpu = arm_env_get_cpu(env);
4548     uint32_t zcr_len = cpu->sve_max_vq - 1;
4549 
4550     if (el <= 1) {
4551         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
4552     }
4553     if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
4554         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
4555     }
4556     if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
4557         zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
4558     }
4559     return zcr_len;
4560 }
4561 
4562 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4563                       uint64_t value)
4564 {
4565     int cur_el = arm_current_el(env);
4566     int old_len = sve_zcr_len_for_el(env, cur_el);
4567     int new_len;
4568 
4569     /* Bits other than [3:0] are RAZ/WI.  */
4570     raw_write(env, ri, value & 0xf);
4571 
4572     /*
4573      * Because we arrived here, we know both FP and SVE are enabled;
4574      * otherwise we would have trapped access to the ZCR_ELn register.
4575      */
4576     new_len = sve_zcr_len_for_el(env, cur_el);
4577     if (new_len < old_len) {
4578         aarch64_sve_narrow_vq(env, new_len + 1);
4579     }
4580 }
4581 
4582 static const ARMCPRegInfo zcr_el1_reginfo = {
4583     .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
4584     .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
4585     .access = PL1_RW, .type = ARM_CP_SVE,
4586     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
4587     .writefn = zcr_write, .raw_writefn = raw_write
4588 };
4589 
4590 static const ARMCPRegInfo zcr_el2_reginfo = {
4591     .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4592     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4593     .access = PL2_RW, .type = ARM_CP_SVE,
4594     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
4595     .writefn = zcr_write, .raw_writefn = raw_write
4596 };
4597 
4598 static const ARMCPRegInfo zcr_no_el2_reginfo = {
4599     .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4600     .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4601     .access = PL2_RW, .type = ARM_CP_SVE,
4602     .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
4603 };
4604 
4605 static const ARMCPRegInfo zcr_el3_reginfo = {
4606     .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
4607     .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
4608     .access = PL3_RW, .type = ARM_CP_SVE,
4609     .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
4610     .writefn = zcr_write, .raw_writefn = raw_write
4611 };
4612 
4613 void hw_watchpoint_update(ARMCPU *cpu, int n)
4614 {
4615     CPUARMState *env = &cpu->env;
4616     vaddr len = 0;
4617     vaddr wvr = env->cp15.dbgwvr[n];
4618     uint64_t wcr = env->cp15.dbgwcr[n];
4619     int mask;
4620     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4621 
4622     if (env->cpu_watchpoint[n]) {
4623         cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4624         env->cpu_watchpoint[n] = NULL;
4625     }
4626 
4627     if (!extract64(wcr, 0, 1)) {
4628         /* E bit clear : watchpoint disabled */
4629         return;
4630     }
4631 
4632     switch (extract64(wcr, 3, 2)) {
4633     case 0:
4634         /* LSC 00 is reserved and must behave as if the wp is disabled */
4635         return;
4636     case 1:
4637         flags |= BP_MEM_READ;
4638         break;
4639     case 2:
4640         flags |= BP_MEM_WRITE;
4641         break;
4642     case 3:
4643         flags |= BP_MEM_ACCESS;
4644         break;
4645     }
4646 
4647     /* Attempts to use both MASK and BAS fields simultaneously are
4648      * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4649      * thus generating a watchpoint for every byte in the masked region.
4650      */
4651     mask = extract64(wcr, 24, 4);
4652     if (mask == 1 || mask == 2) {
4653         /* Reserved values of MASK; we must act as if the mask value was
4654          * some non-reserved value, or as if the watchpoint were disabled.
4655          * We choose the latter.
4656          */
4657         return;
4658     } else if (mask) {
4659         /* Watchpoint covers an aligned area up to 2GB in size */
4660         len = 1ULL << mask;
4661         /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4662          * whether the watchpoint fires when the unmasked bits match; we opt
4663          * to generate the exceptions.
4664          */
4665         wvr &= ~(len - 1);
4666     } else {
4667         /* Watchpoint covers bytes defined by the byte address select bits */
4668         int bas = extract64(wcr, 5, 8);
4669         int basstart;
4670 
4671         if (bas == 0) {
4672             /* This must act as if the watchpoint is disabled */
4673             return;
4674         }
4675 
4676         if (extract64(wvr, 2, 1)) {
4677             /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4678              * ignored, and BAS[3:0] define which bytes to watch.
4679              */
4680             bas &= 0xf;
4681         }
4682         /* The BAS bits are supposed to be programmed to indicate a contiguous
4683          * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4684          * we fire for each byte in the word/doubleword addressed by the WVR.
4685          * We choose to ignore any non-zero bits after the first range of 1s.
4686          */
4687         basstart = ctz32(bas);
4688         len = cto32(bas >> basstart);
4689         wvr += basstart;
4690     }
4691 
4692     cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4693                           &env->cpu_watchpoint[n]);
4694 }
4695 
4696 void hw_watchpoint_update_all(ARMCPU *cpu)
4697 {
4698     int i;
4699     CPUARMState *env = &cpu->env;
4700 
4701     /* Completely clear out existing QEMU watchpoints and our array, to
4702      * avoid possible stale entries following migration load.
4703      */
4704     cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4705     memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4706 
4707     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4708         hw_watchpoint_update(cpu, i);
4709     }
4710 }
4711 
4712 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4713                          uint64_t value)
4714 {
4715     ARMCPU *cpu = arm_env_get_cpu(env);
4716     int i = ri->crm;
4717 
4718     /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4719      * register reads and behaves as if values written are sign extended.
4720      * Bits [1:0] are RES0.
4721      */
4722     value = sextract64(value, 0, 49) & ~3ULL;
4723 
4724     raw_write(env, ri, value);
4725     hw_watchpoint_update(cpu, i);
4726 }
4727 
4728 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4729                          uint64_t value)
4730 {
4731     ARMCPU *cpu = arm_env_get_cpu(env);
4732     int i = ri->crm;
4733 
4734     raw_write(env, ri, value);
4735     hw_watchpoint_update(cpu, i);
4736 }
4737 
4738 void hw_breakpoint_update(ARMCPU *cpu, int n)
4739 {
4740     CPUARMState *env = &cpu->env;
4741     uint64_t bvr = env->cp15.dbgbvr[n];
4742     uint64_t bcr = env->cp15.dbgbcr[n];
4743     vaddr addr;
4744     int bt;
4745     int flags = BP_CPU;
4746 
4747     if (env->cpu_breakpoint[n]) {
4748         cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4749         env->cpu_breakpoint[n] = NULL;
4750     }
4751 
4752     if (!extract64(bcr, 0, 1)) {
4753         /* E bit clear : watchpoint disabled */
4754         return;
4755     }
4756 
4757     bt = extract64(bcr, 20, 4);
4758 
4759     switch (bt) {
4760     case 4: /* unlinked address mismatch (reserved if AArch64) */
4761     case 5: /* linked address mismatch (reserved if AArch64) */
4762         qemu_log_mask(LOG_UNIMP,
4763                       "arm: address mismatch breakpoint types not implemented\n");
4764         return;
4765     case 0: /* unlinked address match */
4766     case 1: /* linked address match */
4767     {
4768         /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4769          * we behave as if the register was sign extended. Bits [1:0] are
4770          * RES0. The BAS field is used to allow setting breakpoints on 16
4771          * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4772          * a bp will fire if the addresses covered by the bp and the addresses
4773          * covered by the insn overlap but the insn doesn't start at the
4774          * start of the bp address range. We choose to require the insn and
4775          * the bp to have the same address. The constraints on writing to
4776          * BAS enforced in dbgbcr_write mean we have only four cases:
4777          *  0b0000  => no breakpoint
4778          *  0b0011  => breakpoint on addr
4779          *  0b1100  => breakpoint on addr + 2
4780          *  0b1111  => breakpoint on addr
4781          * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4782          */
4783         int bas = extract64(bcr, 5, 4);
4784         addr = sextract64(bvr, 0, 49) & ~3ULL;
4785         if (bas == 0) {
4786             return;
4787         }
4788         if (bas == 0xc) {
4789             addr += 2;
4790         }
4791         break;
4792     }
4793     case 2: /* unlinked context ID match */
4794     case 8: /* unlinked VMID match (reserved if no EL2) */
4795     case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4796         qemu_log_mask(LOG_UNIMP,
4797                       "arm: unlinked context breakpoint types not implemented\n");
4798         return;
4799     case 9: /* linked VMID match (reserved if no EL2) */
4800     case 11: /* linked context ID and VMID match (reserved if no EL2) */
4801     case 3: /* linked context ID match */
4802     default:
4803         /* We must generate no events for Linked context matches (unless
4804          * they are linked to by some other bp/wp, which is handled in
4805          * updates for the linking bp/wp). We choose to also generate no events
4806          * for reserved values.
4807          */
4808         return;
4809     }
4810 
4811     cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4812 }
4813 
4814 void hw_breakpoint_update_all(ARMCPU *cpu)
4815 {
4816     int i;
4817     CPUARMState *env = &cpu->env;
4818 
4819     /* Completely clear out existing QEMU breakpoints and our array, to
4820      * avoid possible stale entries following migration load.
4821      */
4822     cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4823     memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4824 
4825     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4826         hw_breakpoint_update(cpu, i);
4827     }
4828 }
4829 
4830 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4831                          uint64_t value)
4832 {
4833     ARMCPU *cpu = arm_env_get_cpu(env);
4834     int i = ri->crm;
4835 
4836     raw_write(env, ri, value);
4837     hw_breakpoint_update(cpu, i);
4838 }
4839 
4840 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4841                          uint64_t value)
4842 {
4843     ARMCPU *cpu = arm_env_get_cpu(env);
4844     int i = ri->crm;
4845 
4846     /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4847      * copy of BAS[0].
4848      */
4849     value = deposit64(value, 6, 1, extract64(value, 5, 1));
4850     value = deposit64(value, 8, 1, extract64(value, 7, 1));
4851 
4852     raw_write(env, ri, value);
4853     hw_breakpoint_update(cpu, i);
4854 }
4855 
4856 static void define_debug_regs(ARMCPU *cpu)
4857 {
4858     /* Define v7 and v8 architectural debug registers.
4859      * These are just dummy implementations for now.
4860      */
4861     int i;
4862     int wrps, brps, ctx_cmps;
4863     ARMCPRegInfo dbgdidr = {
4864         .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4865         .access = PL0_R, .accessfn = access_tda,
4866         .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4867     };
4868 
4869     /* Note that all these register fields hold "number of Xs minus 1". */
4870     brps = extract32(cpu->dbgdidr, 24, 4);
4871     wrps = extract32(cpu->dbgdidr, 28, 4);
4872     ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4873 
4874     assert(ctx_cmps <= brps);
4875 
4876     /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4877      * of the debug registers such as number of breakpoints;
4878      * check that if they both exist then they agree.
4879      */
4880     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4881         assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4882         assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4883         assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4884     }
4885 
4886     define_one_arm_cp_reg(cpu, &dbgdidr);
4887     define_arm_cp_regs(cpu, debug_cp_reginfo);
4888 
4889     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4890         define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4891     }
4892 
4893     for (i = 0; i < brps + 1; i++) {
4894         ARMCPRegInfo dbgregs[] = {
4895             { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4896               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4897               .access = PL1_RW, .accessfn = access_tda,
4898               .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4899               .writefn = dbgbvr_write, .raw_writefn = raw_write
4900             },
4901             { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4902               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4903               .access = PL1_RW, .accessfn = access_tda,
4904               .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4905               .writefn = dbgbcr_write, .raw_writefn = raw_write
4906             },
4907             REGINFO_SENTINEL
4908         };
4909         define_arm_cp_regs(cpu, dbgregs);
4910     }
4911 
4912     for (i = 0; i < wrps + 1; i++) {
4913         ARMCPRegInfo dbgregs[] = {
4914             { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4915               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4916               .access = PL1_RW, .accessfn = access_tda,
4917               .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4918               .writefn = dbgwvr_write, .raw_writefn = raw_write
4919             },
4920             { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4921               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4922               .access = PL1_RW, .accessfn = access_tda,
4923               .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4924               .writefn = dbgwcr_write, .raw_writefn = raw_write
4925             },
4926             REGINFO_SENTINEL
4927         };
4928         define_arm_cp_regs(cpu, dbgregs);
4929     }
4930 }
4931 
4932 /* We don't know until after realize whether there's a GICv3
4933  * attached, and that is what registers the gicv3 sysregs.
4934  * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
4935  * at runtime.
4936  */
4937 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
4938 {
4939     ARMCPU *cpu = arm_env_get_cpu(env);
4940     uint64_t pfr1 = cpu->id_pfr1;
4941 
4942     if (env->gicv3state) {
4943         pfr1 |= 1 << 28;
4944     }
4945     return pfr1;
4946 }
4947 
4948 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
4949 {
4950     ARMCPU *cpu = arm_env_get_cpu(env);
4951     uint64_t pfr0 = cpu->isar.id_aa64pfr0;
4952 
4953     if (env->gicv3state) {
4954         pfr0 |= 1 << 24;
4955     }
4956     return pfr0;
4957 }
4958 
4959 void register_cp_regs_for_features(ARMCPU *cpu)
4960 {
4961     /* Register all the coprocessor registers based on feature bits */
4962     CPUARMState *env = &cpu->env;
4963     if (arm_feature(env, ARM_FEATURE_M)) {
4964         /* M profile has no coprocessor registers */
4965         return;
4966     }
4967 
4968     define_arm_cp_regs(cpu, cp_reginfo);
4969     if (!arm_feature(env, ARM_FEATURE_V8)) {
4970         /* Must go early as it is full of wildcards that may be
4971          * overridden by later definitions.
4972          */
4973         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4974     }
4975 
4976     if (arm_feature(env, ARM_FEATURE_V6)) {
4977         /* The ID registers all have impdef reset values */
4978         ARMCPRegInfo v6_idregs[] = {
4979             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4980               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4981               .access = PL1_R, .type = ARM_CP_CONST,
4982               .resetvalue = cpu->id_pfr0 },
4983             /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
4984              * the value of the GIC field until after we define these regs.
4985              */
4986             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4987               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4988               .access = PL1_R, .type = ARM_CP_NO_RAW,
4989               .readfn = id_pfr1_read,
4990               .writefn = arm_cp_write_ignore },
4991             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4992               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4993               .access = PL1_R, .type = ARM_CP_CONST,
4994               .resetvalue = cpu->id_dfr0 },
4995             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4996               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4997               .access = PL1_R, .type = ARM_CP_CONST,
4998               .resetvalue = cpu->id_afr0 },
4999             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
5000               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
5001               .access = PL1_R, .type = ARM_CP_CONST,
5002               .resetvalue = cpu->id_mmfr0 },
5003             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
5004               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
5005               .access = PL1_R, .type = ARM_CP_CONST,
5006               .resetvalue = cpu->id_mmfr1 },
5007             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
5008               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
5009               .access = PL1_R, .type = ARM_CP_CONST,
5010               .resetvalue = cpu->id_mmfr2 },
5011             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
5012               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
5013               .access = PL1_R, .type = ARM_CP_CONST,
5014               .resetvalue = cpu->id_mmfr3 },
5015             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
5016               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5017               .access = PL1_R, .type = ARM_CP_CONST,
5018               .resetvalue = cpu->isar.id_isar0 },
5019             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
5020               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
5021               .access = PL1_R, .type = ARM_CP_CONST,
5022               .resetvalue = cpu->isar.id_isar1 },
5023             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
5024               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5025               .access = PL1_R, .type = ARM_CP_CONST,
5026               .resetvalue = cpu->isar.id_isar2 },
5027             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
5028               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
5029               .access = PL1_R, .type = ARM_CP_CONST,
5030               .resetvalue = cpu->isar.id_isar3 },
5031             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
5032               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
5033               .access = PL1_R, .type = ARM_CP_CONST,
5034               .resetvalue = cpu->isar.id_isar4 },
5035             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
5036               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
5037               .access = PL1_R, .type = ARM_CP_CONST,
5038               .resetvalue = cpu->isar.id_isar5 },
5039             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
5040               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
5041               .access = PL1_R, .type = ARM_CP_CONST,
5042               .resetvalue = cpu->id_mmfr4 },
5043             { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
5044               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
5045               .access = PL1_R, .type = ARM_CP_CONST,
5046               .resetvalue = cpu->isar.id_isar6 },
5047             REGINFO_SENTINEL
5048         };
5049         define_arm_cp_regs(cpu, v6_idregs);
5050         define_arm_cp_regs(cpu, v6_cp_reginfo);
5051     } else {
5052         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
5053     }
5054     if (arm_feature(env, ARM_FEATURE_V6K)) {
5055         define_arm_cp_regs(cpu, v6k_cp_reginfo);
5056     }
5057     if (arm_feature(env, ARM_FEATURE_V7MP) &&
5058         !arm_feature(env, ARM_FEATURE_PMSA)) {
5059         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
5060     }
5061     if (arm_feature(env, ARM_FEATURE_V7)) {
5062         /* v7 performance monitor control register: same implementor
5063          * field as main ID register, and we implement only the cycle
5064          * count register.
5065          */
5066 #ifndef CONFIG_USER_ONLY
5067         ARMCPRegInfo pmcr = {
5068             .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
5069             .access = PL0_RW,
5070             .type = ARM_CP_IO | ARM_CP_ALIAS,
5071             .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
5072             .accessfn = pmreg_access, .writefn = pmcr_write,
5073             .raw_writefn = raw_write,
5074         };
5075         ARMCPRegInfo pmcr64 = {
5076             .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
5077             .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
5078             .access = PL0_RW, .accessfn = pmreg_access,
5079             .type = ARM_CP_IO,
5080             .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
5081             .resetvalue = cpu->midr & 0xff000000,
5082             .writefn = pmcr_write, .raw_writefn = raw_write,
5083         };
5084         define_one_arm_cp_reg(cpu, &pmcr);
5085         define_one_arm_cp_reg(cpu, &pmcr64);
5086 #endif
5087         ARMCPRegInfo clidr = {
5088             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
5089             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
5090             .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
5091         };
5092         define_one_arm_cp_reg(cpu, &clidr);
5093         define_arm_cp_regs(cpu, v7_cp_reginfo);
5094         define_debug_regs(cpu);
5095     } else {
5096         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
5097     }
5098     if (arm_feature(env, ARM_FEATURE_V8)) {
5099         /* AArch64 ID registers, which all have impdef reset values.
5100          * Note that within the ID register ranges the unused slots
5101          * must all RAZ, not UNDEF; future architecture versions may
5102          * define new registers here.
5103          */
5104         ARMCPRegInfo v8_idregs[] = {
5105             /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
5106              * know the right value for the GIC field until after we
5107              * define these regs.
5108              */
5109             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
5110               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
5111               .access = PL1_R, .type = ARM_CP_NO_RAW,
5112               .readfn = id_aa64pfr0_read,
5113               .writefn = arm_cp_write_ignore },
5114             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
5115               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
5116               .access = PL1_R, .type = ARM_CP_CONST,
5117               .resetvalue = cpu->isar.id_aa64pfr1},
5118             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5119               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
5120               .access = PL1_R, .type = ARM_CP_CONST,
5121               .resetvalue = 0 },
5122             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5123               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
5124               .access = PL1_R, .type = ARM_CP_CONST,
5125               .resetvalue = 0 },
5126             { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
5127               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
5128               .access = PL1_R, .type = ARM_CP_CONST,
5129               /* At present, only SVEver == 0 is defined anyway.  */
5130               .resetvalue = 0 },
5131             { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5132               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
5133               .access = PL1_R, .type = ARM_CP_CONST,
5134               .resetvalue = 0 },
5135             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5136               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
5137               .access = PL1_R, .type = ARM_CP_CONST,
5138               .resetvalue = 0 },
5139             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5140               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
5141               .access = PL1_R, .type = ARM_CP_CONST,
5142               .resetvalue = 0 },
5143             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
5144               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
5145               .access = PL1_R, .type = ARM_CP_CONST,
5146               .resetvalue = cpu->id_aa64dfr0 },
5147             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
5148               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
5149               .access = PL1_R, .type = ARM_CP_CONST,
5150               .resetvalue = cpu->id_aa64dfr1 },
5151             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5152               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
5153               .access = PL1_R, .type = ARM_CP_CONST,
5154               .resetvalue = 0 },
5155             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5156               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
5157               .access = PL1_R, .type = ARM_CP_CONST,
5158               .resetvalue = 0 },
5159             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
5160               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
5161               .access = PL1_R, .type = ARM_CP_CONST,
5162               .resetvalue = cpu->id_aa64afr0 },
5163             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
5164               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
5165               .access = PL1_R, .type = ARM_CP_CONST,
5166               .resetvalue = cpu->id_aa64afr1 },
5167             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5168               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
5169               .access = PL1_R, .type = ARM_CP_CONST,
5170               .resetvalue = 0 },
5171             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5172               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
5173               .access = PL1_R, .type = ARM_CP_CONST,
5174               .resetvalue = 0 },
5175             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
5176               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
5177               .access = PL1_R, .type = ARM_CP_CONST,
5178               .resetvalue = cpu->isar.id_aa64isar0 },
5179             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
5180               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
5181               .access = PL1_R, .type = ARM_CP_CONST,
5182               .resetvalue = cpu->isar.id_aa64isar1 },
5183             { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5184               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
5185               .access = PL1_R, .type = ARM_CP_CONST,
5186               .resetvalue = 0 },
5187             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5188               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
5189               .access = PL1_R, .type = ARM_CP_CONST,
5190               .resetvalue = 0 },
5191             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5192               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
5193               .access = PL1_R, .type = ARM_CP_CONST,
5194               .resetvalue = 0 },
5195             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5196               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
5197               .access = PL1_R, .type = ARM_CP_CONST,
5198               .resetvalue = 0 },
5199             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5200               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
5201               .access = PL1_R, .type = ARM_CP_CONST,
5202               .resetvalue = 0 },
5203             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5204               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
5205               .access = PL1_R, .type = ARM_CP_CONST,
5206               .resetvalue = 0 },
5207             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
5208               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
5209               .access = PL1_R, .type = ARM_CP_CONST,
5210               .resetvalue = cpu->id_aa64mmfr0 },
5211             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
5212               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
5213               .access = PL1_R, .type = ARM_CP_CONST,
5214               .resetvalue = cpu->id_aa64mmfr1 },
5215             { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5216               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
5217               .access = PL1_R, .type = ARM_CP_CONST,
5218               .resetvalue = 0 },
5219             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5220               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
5221               .access = PL1_R, .type = ARM_CP_CONST,
5222               .resetvalue = 0 },
5223             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5224               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
5225               .access = PL1_R, .type = ARM_CP_CONST,
5226               .resetvalue = 0 },
5227             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5228               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
5229               .access = PL1_R, .type = ARM_CP_CONST,
5230               .resetvalue = 0 },
5231             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5232               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
5233               .access = PL1_R, .type = ARM_CP_CONST,
5234               .resetvalue = 0 },
5235             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5236               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
5237               .access = PL1_R, .type = ARM_CP_CONST,
5238               .resetvalue = 0 },
5239             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
5240               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
5241               .access = PL1_R, .type = ARM_CP_CONST,
5242               .resetvalue = cpu->isar.mvfr0 },
5243             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
5244               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
5245               .access = PL1_R, .type = ARM_CP_CONST,
5246               .resetvalue = cpu->isar.mvfr1 },
5247             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
5248               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
5249               .access = PL1_R, .type = ARM_CP_CONST,
5250               .resetvalue = cpu->isar.mvfr2 },
5251             { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5252               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
5253               .access = PL1_R, .type = ARM_CP_CONST,
5254               .resetvalue = 0 },
5255             { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5256               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
5257               .access = PL1_R, .type = ARM_CP_CONST,
5258               .resetvalue = 0 },
5259             { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5260               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
5261               .access = PL1_R, .type = ARM_CP_CONST,
5262               .resetvalue = 0 },
5263             { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5264               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
5265               .access = PL1_R, .type = ARM_CP_CONST,
5266               .resetvalue = 0 },
5267             { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
5268               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
5269               .access = PL1_R, .type = ARM_CP_CONST,
5270               .resetvalue = 0 },
5271             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
5272               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
5273               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5274               .resetvalue = cpu->pmceid0 },
5275             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
5276               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
5277               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5278               .resetvalue = cpu->pmceid0 },
5279             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
5280               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
5281               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5282               .resetvalue = cpu->pmceid1 },
5283             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
5284               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
5285               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
5286               .resetvalue = cpu->pmceid1 },
5287             REGINFO_SENTINEL
5288         };
5289         /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
5290         if (!arm_feature(env, ARM_FEATURE_EL3) &&
5291             !arm_feature(env, ARM_FEATURE_EL2)) {
5292             ARMCPRegInfo rvbar = {
5293                 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
5294                 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5295                 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
5296             };
5297             define_one_arm_cp_reg(cpu, &rvbar);
5298         }
5299         define_arm_cp_regs(cpu, v8_idregs);
5300         define_arm_cp_regs(cpu, v8_cp_reginfo);
5301     }
5302     if (arm_feature(env, ARM_FEATURE_EL2)) {
5303         uint64_t vmpidr_def = mpidr_read_val(env);
5304         ARMCPRegInfo vpidr_regs[] = {
5305             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
5306               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5307               .access = PL2_RW, .accessfn = access_el3_aa32ns,
5308               .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
5309               .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
5310             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
5311               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5312               .access = PL2_RW, .resetvalue = cpu->midr,
5313               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
5314             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
5315               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5316               .access = PL2_RW, .accessfn = access_el3_aa32ns,
5317               .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
5318               .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
5319             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
5320               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5321               .access = PL2_RW,
5322               .resetvalue = vmpidr_def,
5323               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
5324             REGINFO_SENTINEL
5325         };
5326         define_arm_cp_regs(cpu, vpidr_regs);
5327         define_arm_cp_regs(cpu, el2_cp_reginfo);
5328         if (arm_feature(env, ARM_FEATURE_V8)) {
5329             define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
5330         }
5331         /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
5332         if (!arm_feature(env, ARM_FEATURE_EL3)) {
5333             ARMCPRegInfo rvbar = {
5334                 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
5335                 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
5336                 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
5337             };
5338             define_one_arm_cp_reg(cpu, &rvbar);
5339         }
5340     } else {
5341         /* If EL2 is missing but higher ELs are enabled, we need to
5342          * register the no_el2 reginfos.
5343          */
5344         if (arm_feature(env, ARM_FEATURE_EL3)) {
5345             /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
5346              * of MIDR_EL1 and MPIDR_EL1.
5347              */
5348             ARMCPRegInfo vpidr_regs[] = {
5349                 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5350                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
5351                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5352                   .type = ARM_CP_CONST, .resetvalue = cpu->midr,
5353                   .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
5354                 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5355                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
5356                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
5357                   .type = ARM_CP_NO_RAW,
5358                   .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
5359                 REGINFO_SENTINEL
5360             };
5361             define_arm_cp_regs(cpu, vpidr_regs);
5362             define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
5363             if (arm_feature(env, ARM_FEATURE_V8)) {
5364                 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
5365             }
5366         }
5367     }
5368     if (arm_feature(env, ARM_FEATURE_EL3)) {
5369         define_arm_cp_regs(cpu, el3_cp_reginfo);
5370         ARMCPRegInfo el3_regs[] = {
5371             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
5372               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
5373               .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
5374             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
5375               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
5376               .access = PL3_RW,
5377               .raw_writefn = raw_write, .writefn = sctlr_write,
5378               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
5379               .resetvalue = cpu->reset_sctlr },
5380             REGINFO_SENTINEL
5381         };
5382 
5383         define_arm_cp_regs(cpu, el3_regs);
5384     }
5385     /* The behaviour of NSACR is sufficiently various that we don't
5386      * try to describe it in a single reginfo:
5387      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
5388      *     reads as constant 0xc00 from NS EL1 and NS EL2
5389      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
5390      *  if v7 without EL3, register doesn't exist
5391      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
5392      */
5393     if (arm_feature(env, ARM_FEATURE_EL3)) {
5394         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5395             ARMCPRegInfo nsacr = {
5396                 .name = "NSACR", .type = ARM_CP_CONST,
5397                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5398                 .access = PL1_RW, .accessfn = nsacr_access,
5399                 .resetvalue = 0xc00
5400             };
5401             define_one_arm_cp_reg(cpu, &nsacr);
5402         } else {
5403             ARMCPRegInfo nsacr = {
5404                 .name = "NSACR",
5405                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5406                 .access = PL3_RW | PL1_R,
5407                 .resetvalue = 0,
5408                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
5409             };
5410             define_one_arm_cp_reg(cpu, &nsacr);
5411         }
5412     } else {
5413         if (arm_feature(env, ARM_FEATURE_V8)) {
5414             ARMCPRegInfo nsacr = {
5415                 .name = "NSACR", .type = ARM_CP_CONST,
5416                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
5417                 .access = PL1_R,
5418                 .resetvalue = 0xc00
5419             };
5420             define_one_arm_cp_reg(cpu, &nsacr);
5421         }
5422     }
5423 
5424     if (arm_feature(env, ARM_FEATURE_PMSA)) {
5425         if (arm_feature(env, ARM_FEATURE_V6)) {
5426             /* PMSAv6 not implemented */
5427             assert(arm_feature(env, ARM_FEATURE_V7));
5428             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
5429             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
5430         } else {
5431             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
5432         }
5433     } else {
5434         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
5435         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
5436     }
5437     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5438         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
5439     }
5440     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
5441         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
5442     }
5443     if (arm_feature(env, ARM_FEATURE_VAPA)) {
5444         define_arm_cp_regs(cpu, vapa_cp_reginfo);
5445     }
5446     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
5447         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
5448     }
5449     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
5450         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
5451     }
5452     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
5453         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
5454     }
5455     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
5456         define_arm_cp_regs(cpu, omap_cp_reginfo);
5457     }
5458     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
5459         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
5460     }
5461     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5462         define_arm_cp_regs(cpu, xscale_cp_reginfo);
5463     }
5464     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
5465         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
5466     }
5467     if (arm_feature(env, ARM_FEATURE_LPAE)) {
5468         define_arm_cp_regs(cpu, lpae_cp_reginfo);
5469     }
5470     /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5471      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5472      * be read-only (ie write causes UNDEF exception).
5473      */
5474     {
5475         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
5476             /* Pre-v8 MIDR space.
5477              * Note that the MIDR isn't a simple constant register because
5478              * of the TI925 behaviour where writes to another register can
5479              * cause the MIDR value to change.
5480              *
5481              * Unimplemented registers in the c15 0 0 0 space default to
5482              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5483              * and friends override accordingly.
5484              */
5485             { .name = "MIDR",
5486               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
5487               .access = PL1_R, .resetvalue = cpu->midr,
5488               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
5489               .readfn = midr_read,
5490               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5491               .type = ARM_CP_OVERRIDE },
5492             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5493             { .name = "DUMMY",
5494               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
5495               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5496             { .name = "DUMMY",
5497               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
5498               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5499             { .name = "DUMMY",
5500               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
5501               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5502             { .name = "DUMMY",
5503               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
5504               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5505             { .name = "DUMMY",
5506               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
5507               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5508             REGINFO_SENTINEL
5509         };
5510         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
5511             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
5512               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
5513               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
5514               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5515               .readfn = midr_read },
5516             /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5517             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5518               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5519               .access = PL1_R, .resetvalue = cpu->midr },
5520             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5521               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
5522               .access = PL1_R, .resetvalue = cpu->midr },
5523             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
5524               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
5525               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
5526             REGINFO_SENTINEL
5527         };
5528         ARMCPRegInfo id_cp_reginfo[] = {
5529             /* These are common to v8 and pre-v8 */
5530             { .name = "CTR",
5531               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
5532               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5533             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
5534               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
5535               .access = PL0_R, .accessfn = ctr_el0_access,
5536               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5537             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5538             { .name = "TCMTR",
5539               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
5540               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5541             REGINFO_SENTINEL
5542         };
5543         /* TLBTR is specific to VMSA */
5544         ARMCPRegInfo id_tlbtr_reginfo = {
5545               .name = "TLBTR",
5546               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
5547               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
5548         };
5549         /* MPUIR is specific to PMSA V6+ */
5550         ARMCPRegInfo id_mpuir_reginfo = {
5551               .name = "MPUIR",
5552               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5553               .access = PL1_R, .type = ARM_CP_CONST,
5554               .resetvalue = cpu->pmsav7_dregion << 8
5555         };
5556         ARMCPRegInfo crn0_wi_reginfo = {
5557             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
5558             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
5559             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
5560         };
5561         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
5562             arm_feature(env, ARM_FEATURE_STRONGARM)) {
5563             ARMCPRegInfo *r;
5564             /* Register the blanket "writes ignored" value first to cover the
5565              * whole space. Then update the specific ID registers to allow write
5566              * access, so that they ignore writes rather than causing them to
5567              * UNDEF.
5568              */
5569             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5570             for (r = id_pre_v8_midr_cp_reginfo;
5571                  r->type != ARM_CP_SENTINEL; r++) {
5572                 r->access = PL1_RW;
5573             }
5574             for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
5575                 r->access = PL1_RW;
5576             }
5577             id_mpuir_reginfo.access = PL1_RW;
5578             id_tlbtr_reginfo.access = PL1_RW;
5579         }
5580         if (arm_feature(env, ARM_FEATURE_V8)) {
5581             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
5582         } else {
5583             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
5584         }
5585         define_arm_cp_regs(cpu, id_cp_reginfo);
5586         if (!arm_feature(env, ARM_FEATURE_PMSA)) {
5587             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
5588         } else if (arm_feature(env, ARM_FEATURE_V7)) {
5589             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
5590         }
5591     }
5592 
5593     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
5594         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
5595     }
5596 
5597     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
5598         ARMCPRegInfo auxcr_reginfo[] = {
5599             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
5600               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
5601               .access = PL1_RW, .type = ARM_CP_CONST,
5602               .resetvalue = cpu->reset_auxcr },
5603             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
5604               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
5605               .access = PL2_RW, .type = ARM_CP_CONST,
5606               .resetvalue = 0 },
5607             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
5608               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
5609               .access = PL3_RW, .type = ARM_CP_CONST,
5610               .resetvalue = 0 },
5611             REGINFO_SENTINEL
5612         };
5613         define_arm_cp_regs(cpu, auxcr_reginfo);
5614         if (arm_feature(env, ARM_FEATURE_V8)) {
5615             /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
5616             ARMCPRegInfo hactlr2_reginfo = {
5617                 .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
5618                 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
5619                 .access = PL2_RW, .type = ARM_CP_CONST,
5620                 .resetvalue = 0
5621             };
5622             define_one_arm_cp_reg(cpu, &hactlr2_reginfo);
5623         }
5624     }
5625 
5626     if (arm_feature(env, ARM_FEATURE_CBAR)) {
5627         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5628             /* 32 bit view is [31:18] 0...0 [43:32]. */
5629             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
5630                 | extract64(cpu->reset_cbar, 32, 12);
5631             ARMCPRegInfo cbar_reginfo[] = {
5632                 { .name = "CBAR",
5633                   .type = ARM_CP_CONST,
5634                   .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5635                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
5636                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
5637                   .type = ARM_CP_CONST,
5638                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
5639                   .access = PL1_R, .resetvalue = cbar32 },
5640                 REGINFO_SENTINEL
5641             };
5642             /* We don't implement a r/w 64 bit CBAR currently */
5643             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
5644             define_arm_cp_regs(cpu, cbar_reginfo);
5645         } else {
5646             ARMCPRegInfo cbar = {
5647                 .name = "CBAR",
5648                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5649                 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
5650                 .fieldoffset = offsetof(CPUARMState,
5651                                         cp15.c15_config_base_address)
5652             };
5653             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
5654                 cbar.access = PL1_R;
5655                 cbar.fieldoffset = 0;
5656                 cbar.type = ARM_CP_CONST;
5657             }
5658             define_one_arm_cp_reg(cpu, &cbar);
5659         }
5660     }
5661 
5662     if (arm_feature(env, ARM_FEATURE_VBAR)) {
5663         ARMCPRegInfo vbar_cp_reginfo[] = {
5664             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
5665               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
5666               .access = PL1_RW, .writefn = vbar_write,
5667               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
5668                                      offsetof(CPUARMState, cp15.vbar_ns) },
5669               .resetvalue = 0 },
5670             REGINFO_SENTINEL
5671         };
5672         define_arm_cp_regs(cpu, vbar_cp_reginfo);
5673     }
5674 
5675     /* Generic registers whose values depend on the implementation */
5676     {
5677         ARMCPRegInfo sctlr = {
5678             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
5679             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5680             .access = PL1_RW,
5681             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
5682                                    offsetof(CPUARMState, cp15.sctlr_ns) },
5683             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
5684             .raw_writefn = raw_write,
5685         };
5686         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5687             /* Normally we would always end the TB on an SCTLR write, but Linux
5688              * arch/arm/mach-pxa/sleep.S expects two instructions following
5689              * an MMU enable to execute from cache.  Imitate this behaviour.
5690              */
5691             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
5692         }
5693         define_one_arm_cp_reg(cpu, &sctlr);
5694     }
5695 
5696     if (cpu_isar_feature(aa64_sve, cpu)) {
5697         define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
5698         if (arm_feature(env, ARM_FEATURE_EL2)) {
5699             define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
5700         } else {
5701             define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
5702         }
5703         if (arm_feature(env, ARM_FEATURE_EL3)) {
5704             define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
5705         }
5706     }
5707 }
5708 
5709 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
5710 {
5711     CPUState *cs = CPU(cpu);
5712     CPUARMState *env = &cpu->env;
5713 
5714     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5715         gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
5716                                  aarch64_fpu_gdb_set_reg,
5717                                  34, "aarch64-fpu.xml", 0);
5718     } else if (arm_feature(env, ARM_FEATURE_NEON)) {
5719         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5720                                  51, "arm-neon.xml", 0);
5721     } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
5722         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5723                                  35, "arm-vfp3.xml", 0);
5724     } else if (arm_feature(env, ARM_FEATURE_VFP)) {
5725         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5726                                  19, "arm-vfp.xml", 0);
5727     }
5728     gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
5729                              arm_gen_dynamic_xml(cs),
5730                              "system-registers.xml", 0);
5731 }
5732 
5733 /* Sort alphabetically by type name, except for "any". */
5734 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5735 {
5736     ObjectClass *class_a = (ObjectClass *)a;
5737     ObjectClass *class_b = (ObjectClass *)b;
5738     const char *name_a, *name_b;
5739 
5740     name_a = object_class_get_name(class_a);
5741     name_b = object_class_get_name(class_b);
5742     if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
5743         return 1;
5744     } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
5745         return -1;
5746     } else {
5747         return strcmp(name_a, name_b);
5748     }
5749 }
5750 
5751 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
5752 {
5753     ObjectClass *oc = data;
5754     CPUListState *s = user_data;
5755     const char *typename;
5756     char *name;
5757 
5758     typename = object_class_get_name(oc);
5759     name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
5760     (*s->cpu_fprintf)(s->file, "  %s\n",
5761                       name);
5762     g_free(name);
5763 }
5764 
5765 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
5766 {
5767     CPUListState s = {
5768         .file = f,
5769         .cpu_fprintf = cpu_fprintf,
5770     };
5771     GSList *list;
5772 
5773     list = object_class_get_list(TYPE_ARM_CPU, false);
5774     list = g_slist_sort(list, arm_cpu_list_compare);
5775     (*cpu_fprintf)(f, "Available CPUs:\n");
5776     g_slist_foreach(list, arm_cpu_list_entry, &s);
5777     g_slist_free(list);
5778 }
5779 
5780 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
5781 {
5782     ObjectClass *oc = data;
5783     CpuDefinitionInfoList **cpu_list = user_data;
5784     CpuDefinitionInfoList *entry;
5785     CpuDefinitionInfo *info;
5786     const char *typename;
5787 
5788     typename = object_class_get_name(oc);
5789     info = g_malloc0(sizeof(*info));
5790     info->name = g_strndup(typename,
5791                            strlen(typename) - strlen("-" TYPE_ARM_CPU));
5792     info->q_typename = g_strdup(typename);
5793 
5794     entry = g_malloc0(sizeof(*entry));
5795     entry->value = info;
5796     entry->next = *cpu_list;
5797     *cpu_list = entry;
5798 }
5799 
5800 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
5801 {
5802     CpuDefinitionInfoList *cpu_list = NULL;
5803     GSList *list;
5804 
5805     list = object_class_get_list(TYPE_ARM_CPU, false);
5806     g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
5807     g_slist_free(list);
5808 
5809     return cpu_list;
5810 }
5811 
5812 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
5813                                    void *opaque, int state, int secstate,
5814                                    int crm, int opc1, int opc2,
5815                                    const char *name)
5816 {
5817     /* Private utility function for define_one_arm_cp_reg_with_opaque():
5818      * add a single reginfo struct to the hash table.
5819      */
5820     uint32_t *key = g_new(uint32_t, 1);
5821     ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
5822     int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
5823     int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
5824 
5825     r2->name = g_strdup(name);
5826     /* Reset the secure state to the specific incoming state.  This is
5827      * necessary as the register may have been defined with both states.
5828      */
5829     r2->secure = secstate;
5830 
5831     if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5832         /* Register is banked (using both entries in array).
5833          * Overwriting fieldoffset as the array is only used to define
5834          * banked registers but later only fieldoffset is used.
5835          */
5836         r2->fieldoffset = r->bank_fieldoffsets[ns];
5837     }
5838 
5839     if (state == ARM_CP_STATE_AA32) {
5840         if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5841             /* If the register is banked then we don't need to migrate or
5842              * reset the 32-bit instance in certain cases:
5843              *
5844              * 1) If the register has both 32-bit and 64-bit instances then we
5845              *    can count on the 64-bit instance taking care of the
5846              *    non-secure bank.
5847              * 2) If ARMv8 is enabled then we can count on a 64-bit version
5848              *    taking care of the secure bank.  This requires that separate
5849              *    32 and 64-bit definitions are provided.
5850              */
5851             if ((r->state == ARM_CP_STATE_BOTH && ns) ||
5852                 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
5853                 r2->type |= ARM_CP_ALIAS;
5854             }
5855         } else if ((secstate != r->secure) && !ns) {
5856             /* The register is not banked so we only want to allow migration of
5857              * the non-secure instance.
5858              */
5859             r2->type |= ARM_CP_ALIAS;
5860         }
5861 
5862         if (r->state == ARM_CP_STATE_BOTH) {
5863             /* We assume it is a cp15 register if the .cp field is left unset.
5864              */
5865             if (r2->cp == 0) {
5866                 r2->cp = 15;
5867             }
5868 
5869 #ifdef HOST_WORDS_BIGENDIAN
5870             if (r2->fieldoffset) {
5871                 r2->fieldoffset += sizeof(uint32_t);
5872             }
5873 #endif
5874         }
5875     }
5876     if (state == ARM_CP_STATE_AA64) {
5877         /* To allow abbreviation of ARMCPRegInfo
5878          * definitions, we treat cp == 0 as equivalent to
5879          * the value for "standard guest-visible sysreg".
5880          * STATE_BOTH definitions are also always "standard
5881          * sysreg" in their AArch64 view (the .cp value may
5882          * be non-zero for the benefit of the AArch32 view).
5883          */
5884         if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
5885             r2->cp = CP_REG_ARM64_SYSREG_CP;
5886         }
5887         *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
5888                                   r2->opc0, opc1, opc2);
5889     } else {
5890         *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
5891     }
5892     if (opaque) {
5893         r2->opaque = opaque;
5894     }
5895     /* reginfo passed to helpers is correct for the actual access,
5896      * and is never ARM_CP_STATE_BOTH:
5897      */
5898     r2->state = state;
5899     /* Make sure reginfo passed to helpers for wildcarded regs
5900      * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5901      */
5902     r2->crm = crm;
5903     r2->opc1 = opc1;
5904     r2->opc2 = opc2;
5905     /* By convention, for wildcarded registers only the first
5906      * entry is used for migration; the others are marked as
5907      * ALIAS so we don't try to transfer the register
5908      * multiple times. Special registers (ie NOP/WFI) are
5909      * never migratable and not even raw-accessible.
5910      */
5911     if ((r->type & ARM_CP_SPECIAL)) {
5912         r2->type |= ARM_CP_NO_RAW;
5913     }
5914     if (((r->crm == CP_ANY) && crm != 0) ||
5915         ((r->opc1 == CP_ANY) && opc1 != 0) ||
5916         ((r->opc2 == CP_ANY) && opc2 != 0)) {
5917         r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
5918     }
5919 
5920     /* Check that raw accesses are either forbidden or handled. Note that
5921      * we can't assert this earlier because the setup of fieldoffset for
5922      * banked registers has to be done first.
5923      */
5924     if (!(r2->type & ARM_CP_NO_RAW)) {
5925         assert(!raw_accessors_invalid(r2));
5926     }
5927 
5928     /* Overriding of an existing definition must be explicitly
5929      * requested.
5930      */
5931     if (!(r->type & ARM_CP_OVERRIDE)) {
5932         ARMCPRegInfo *oldreg;
5933         oldreg = g_hash_table_lookup(cpu->cp_regs, key);
5934         if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
5935             fprintf(stderr, "Register redefined: cp=%d %d bit "
5936                     "crn=%d crm=%d opc1=%d opc2=%d, "
5937                     "was %s, now %s\n", r2->cp, 32 + 32 * is64,
5938                     r2->crn, r2->crm, r2->opc1, r2->opc2,
5939                     oldreg->name, r2->name);
5940             g_assert_not_reached();
5941         }
5942     }
5943     g_hash_table_insert(cpu->cp_regs, key, r2);
5944 }
5945 
5946 
5947 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
5948                                        const ARMCPRegInfo *r, void *opaque)
5949 {
5950     /* Define implementations of coprocessor registers.
5951      * We store these in a hashtable because typically
5952      * there are less than 150 registers in a space which
5953      * is 16*16*16*8*8 = 262144 in size.
5954      * Wildcarding is supported for the crm, opc1 and opc2 fields.
5955      * If a register is defined twice then the second definition is
5956      * used, so this can be used to define some generic registers and
5957      * then override them with implementation specific variations.
5958      * At least one of the original and the second definition should
5959      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5960      * against accidental use.
5961      *
5962      * The state field defines whether the register is to be
5963      * visible in the AArch32 or AArch64 execution state. If the
5964      * state is set to ARM_CP_STATE_BOTH then we synthesise a
5965      * reginfo structure for the AArch32 view, which sees the lower
5966      * 32 bits of the 64 bit register.
5967      *
5968      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5969      * be wildcarded. AArch64 registers are always considered to be 64
5970      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5971      * the register, if any.
5972      */
5973     int crm, opc1, opc2, state;
5974     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
5975     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
5976     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
5977     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
5978     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
5979     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
5980     /* 64 bit registers have only CRm and Opc1 fields */
5981     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
5982     /* op0 only exists in the AArch64 encodings */
5983     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
5984     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5985     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
5986     /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5987      * encodes a minimum access level for the register. We roll this
5988      * runtime check into our general permission check code, so check
5989      * here that the reginfo's specified permissions are strict enough
5990      * to encompass the generic architectural permission check.
5991      */
5992     if (r->state != ARM_CP_STATE_AA32) {
5993         int mask = 0;
5994         switch (r->opc1) {
5995         case 0: case 1: case 2:
5996             /* min_EL EL1 */
5997             mask = PL1_RW;
5998             break;
5999         case 3:
6000             /* min_EL EL0 */
6001             mask = PL0_RW;
6002             break;
6003         case 4:
6004             /* min_EL EL2 */
6005             mask = PL2_RW;
6006             break;
6007         case 5:
6008             /* unallocated encoding, so not possible */
6009             assert(false);
6010             break;
6011         case 6:
6012             /* min_EL EL3 */
6013             mask = PL3_RW;
6014             break;
6015         case 7:
6016             /* min_EL EL1, secure mode only (we don't check the latter) */
6017             mask = PL1_RW;
6018             break;
6019         default:
6020             /* broken reginfo with out-of-range opc1 */
6021             assert(false);
6022             break;
6023         }
6024         /* assert our permissions are not too lax (stricter is fine) */
6025         assert((r->access & ~mask) == 0);
6026     }
6027 
6028     /* Check that the register definition has enough info to handle
6029      * reads and writes if they are permitted.
6030      */
6031     if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
6032         if (r->access & PL3_R) {
6033             assert((r->fieldoffset ||
6034                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
6035                    r->readfn);
6036         }
6037         if (r->access & PL3_W) {
6038             assert((r->fieldoffset ||
6039                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
6040                    r->writefn);
6041         }
6042     }
6043     /* Bad type field probably means missing sentinel at end of reg list */
6044     assert(cptype_valid(r->type));
6045     for (crm = crmmin; crm <= crmmax; crm++) {
6046         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
6047             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
6048                 for (state = ARM_CP_STATE_AA32;
6049                      state <= ARM_CP_STATE_AA64; state++) {
6050                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
6051                         continue;
6052                     }
6053                     if (state == ARM_CP_STATE_AA32) {
6054                         /* Under AArch32 CP registers can be common
6055                          * (same for secure and non-secure world) or banked.
6056                          */
6057                         char *name;
6058 
6059                         switch (r->secure) {
6060                         case ARM_CP_SECSTATE_S:
6061                         case ARM_CP_SECSTATE_NS:
6062                             add_cpreg_to_hashtable(cpu, r, opaque, state,
6063                                                    r->secure, crm, opc1, opc2,
6064                                                    r->name);
6065                             break;
6066                         default:
6067                             name = g_strdup_printf("%s_S", r->name);
6068                             add_cpreg_to_hashtable(cpu, r, opaque, state,
6069                                                    ARM_CP_SECSTATE_S,
6070                                                    crm, opc1, opc2, name);
6071                             g_free(name);
6072                             add_cpreg_to_hashtable(cpu, r, opaque, state,
6073                                                    ARM_CP_SECSTATE_NS,
6074                                                    crm, opc1, opc2, r->name);
6075                             break;
6076                         }
6077                     } else {
6078                         /* AArch64 registers get mapped to non-secure instance
6079                          * of AArch32 */
6080                         add_cpreg_to_hashtable(cpu, r, opaque, state,
6081                                                ARM_CP_SECSTATE_NS,
6082                                                crm, opc1, opc2, r->name);
6083                     }
6084                 }
6085             }
6086         }
6087     }
6088 }
6089 
6090 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
6091                                     const ARMCPRegInfo *regs, void *opaque)
6092 {
6093     /* Define a whole list of registers */
6094     const ARMCPRegInfo *r;
6095     for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
6096         define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
6097     }
6098 }
6099 
6100 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
6101 {
6102     return g_hash_table_lookup(cpregs, &encoded_cp);
6103 }
6104 
6105 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
6106                          uint64_t value)
6107 {
6108     /* Helper coprocessor write function for write-ignore registers */
6109 }
6110 
6111 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
6112 {
6113     /* Helper coprocessor write function for read-as-zero registers */
6114     return 0;
6115 }
6116 
6117 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
6118 {
6119     /* Helper coprocessor reset function for do-nothing-on-reset registers */
6120 }
6121 
6122 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
6123 {
6124     /* Return true if it is not valid for us to switch to
6125      * this CPU mode (ie all the UNPREDICTABLE cases in
6126      * the ARM ARM CPSRWriteByInstr pseudocode).
6127      */
6128 
6129     /* Changes to or from Hyp via MSR and CPS are illegal. */
6130     if (write_type == CPSRWriteByInstr &&
6131         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
6132          mode == ARM_CPU_MODE_HYP)) {
6133         return 1;
6134     }
6135 
6136     switch (mode) {
6137     case ARM_CPU_MODE_USR:
6138         return 0;
6139     case ARM_CPU_MODE_SYS:
6140     case ARM_CPU_MODE_SVC:
6141     case ARM_CPU_MODE_ABT:
6142     case ARM_CPU_MODE_UND:
6143     case ARM_CPU_MODE_IRQ:
6144     case ARM_CPU_MODE_FIQ:
6145         /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
6146          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
6147          */
6148         /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
6149          * and CPS are treated as illegal mode changes.
6150          */
6151         if (write_type == CPSRWriteByInstr &&
6152             (env->cp15.hcr_el2 & HCR_TGE) &&
6153             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
6154             !arm_is_secure_below_el3(env)) {
6155             return 1;
6156         }
6157         return 0;
6158     case ARM_CPU_MODE_HYP:
6159         return !arm_feature(env, ARM_FEATURE_EL2)
6160             || arm_current_el(env) < 2 || arm_is_secure(env);
6161     case ARM_CPU_MODE_MON:
6162         return arm_current_el(env) < 3;
6163     default:
6164         return 1;
6165     }
6166 }
6167 
6168 uint32_t cpsr_read(CPUARMState *env)
6169 {
6170     int ZF;
6171     ZF = (env->ZF == 0);
6172     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
6173         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
6174         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
6175         | ((env->condexec_bits & 0xfc) << 8)
6176         | (env->GE << 16) | (env->daif & CPSR_AIF);
6177 }
6178 
6179 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
6180                 CPSRWriteType write_type)
6181 {
6182     uint32_t changed_daif;
6183 
6184     if (mask & CPSR_NZCV) {
6185         env->ZF = (~val) & CPSR_Z;
6186         env->NF = val;
6187         env->CF = (val >> 29) & 1;
6188         env->VF = (val << 3) & 0x80000000;
6189     }
6190     if (mask & CPSR_Q)
6191         env->QF = ((val & CPSR_Q) != 0);
6192     if (mask & CPSR_T)
6193         env->thumb = ((val & CPSR_T) != 0);
6194     if (mask & CPSR_IT_0_1) {
6195         env->condexec_bits &= ~3;
6196         env->condexec_bits |= (val >> 25) & 3;
6197     }
6198     if (mask & CPSR_IT_2_7) {
6199         env->condexec_bits &= 3;
6200         env->condexec_bits |= (val >> 8) & 0xfc;
6201     }
6202     if (mask & CPSR_GE) {
6203         env->GE = (val >> 16) & 0xf;
6204     }
6205 
6206     /* In a V7 implementation that includes the security extensions but does
6207      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
6208      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
6209      * bits respectively.
6210      *
6211      * In a V8 implementation, it is permitted for privileged software to
6212      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
6213      */
6214     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
6215         arm_feature(env, ARM_FEATURE_EL3) &&
6216         !arm_feature(env, ARM_FEATURE_EL2) &&
6217         !arm_is_secure(env)) {
6218 
6219         changed_daif = (env->daif ^ val) & mask;
6220 
6221         if (changed_daif & CPSR_A) {
6222             /* Check to see if we are allowed to change the masking of async
6223              * abort exceptions from a non-secure state.
6224              */
6225             if (!(env->cp15.scr_el3 & SCR_AW)) {
6226                 qemu_log_mask(LOG_GUEST_ERROR,
6227                               "Ignoring attempt to switch CPSR_A flag from "
6228                               "non-secure world with SCR.AW bit clear\n");
6229                 mask &= ~CPSR_A;
6230             }
6231         }
6232 
6233         if (changed_daif & CPSR_F) {
6234             /* Check to see if we are allowed to change the masking of FIQ
6235              * exceptions from a non-secure state.
6236              */
6237             if (!(env->cp15.scr_el3 & SCR_FW)) {
6238                 qemu_log_mask(LOG_GUEST_ERROR,
6239                               "Ignoring attempt to switch CPSR_F flag from "
6240                               "non-secure world with SCR.FW bit clear\n");
6241                 mask &= ~CPSR_F;
6242             }
6243 
6244             /* Check whether non-maskable FIQ (NMFI) support is enabled.
6245              * If this bit is set software is not allowed to mask
6246              * FIQs, but is allowed to set CPSR_F to 0.
6247              */
6248             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
6249                 (val & CPSR_F)) {
6250                 qemu_log_mask(LOG_GUEST_ERROR,
6251                               "Ignoring attempt to enable CPSR_F flag "
6252                               "(non-maskable FIQ [NMFI] support enabled)\n");
6253                 mask &= ~CPSR_F;
6254             }
6255         }
6256     }
6257 
6258     env->daif &= ~(CPSR_AIF & mask);
6259     env->daif |= val & CPSR_AIF & mask;
6260 
6261     if (write_type != CPSRWriteRaw &&
6262         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
6263         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
6264             /* Note that we can only get here in USR mode if this is a
6265              * gdb stub write; for this case we follow the architectural
6266              * behaviour for guest writes in USR mode of ignoring an attempt
6267              * to switch mode. (Those are caught by translate.c for writes
6268              * triggered by guest instructions.)
6269              */
6270             mask &= ~CPSR_M;
6271         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
6272             /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
6273              * v7, and has defined behaviour in v8:
6274              *  + leave CPSR.M untouched
6275              *  + allow changes to the other CPSR fields
6276              *  + set PSTATE.IL
6277              * For user changes via the GDB stub, we don't set PSTATE.IL,
6278              * as this would be unnecessarily harsh for a user error.
6279              */
6280             mask &= ~CPSR_M;
6281             if (write_type != CPSRWriteByGDBStub &&
6282                 arm_feature(env, ARM_FEATURE_V8)) {
6283                 mask |= CPSR_IL;
6284                 val |= CPSR_IL;
6285             }
6286             qemu_log_mask(LOG_GUEST_ERROR,
6287                           "Illegal AArch32 mode switch attempt from %s to %s\n",
6288                           aarch32_mode_name(env->uncached_cpsr),
6289                           aarch32_mode_name(val));
6290         } else {
6291             qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
6292                           write_type == CPSRWriteExceptionReturn ?
6293                           "Exception return from AArch32" :
6294                           "AArch32 mode switch from",
6295                           aarch32_mode_name(env->uncached_cpsr),
6296                           aarch32_mode_name(val), env->regs[15]);
6297             switch_mode(env, val & CPSR_M);
6298         }
6299     }
6300     mask &= ~CACHED_CPSR_BITS;
6301     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
6302 }
6303 
6304 /* Sign/zero extend */
6305 uint32_t HELPER(sxtb16)(uint32_t x)
6306 {
6307     uint32_t res;
6308     res = (uint16_t)(int8_t)x;
6309     res |= (uint32_t)(int8_t)(x >> 16) << 16;
6310     return res;
6311 }
6312 
6313 uint32_t HELPER(uxtb16)(uint32_t x)
6314 {
6315     uint32_t res;
6316     res = (uint16_t)(uint8_t)x;
6317     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
6318     return res;
6319 }
6320 
6321 int32_t HELPER(sdiv)(int32_t num, int32_t den)
6322 {
6323     if (den == 0)
6324       return 0;
6325     if (num == INT_MIN && den == -1)
6326       return INT_MIN;
6327     return num / den;
6328 }
6329 
6330 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
6331 {
6332     if (den == 0)
6333       return 0;
6334     return num / den;
6335 }
6336 
6337 uint32_t HELPER(rbit)(uint32_t x)
6338 {
6339     return revbit32(x);
6340 }
6341 
6342 #if defined(CONFIG_USER_ONLY)
6343 
6344 /* These should probably raise undefined insn exceptions.  */
6345 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
6346 {
6347     ARMCPU *cpu = arm_env_get_cpu(env);
6348 
6349     cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
6350 }
6351 
6352 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
6353 {
6354     ARMCPU *cpu = arm_env_get_cpu(env);
6355 
6356     cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
6357     return 0;
6358 }
6359 
6360 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
6361 {
6362     /* translate.c should never generate calls here in user-only mode */
6363     g_assert_not_reached();
6364 }
6365 
6366 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
6367 {
6368     /* translate.c should never generate calls here in user-only mode */
6369     g_assert_not_reached();
6370 }
6371 
6372 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
6373 {
6374     /* The TT instructions can be used by unprivileged code, but in
6375      * user-only emulation we don't have the MPU.
6376      * Luckily since we know we are NonSecure unprivileged (and that in
6377      * turn means that the A flag wasn't specified), all the bits in the
6378      * register must be zero:
6379      *  IREGION: 0 because IRVALID is 0
6380      *  IRVALID: 0 because NS
6381      *  S: 0 because NS
6382      *  NSRW: 0 because NS
6383      *  NSR: 0 because NS
6384      *  RW: 0 because unpriv and A flag not set
6385      *  R: 0 because unpriv and A flag not set
6386      *  SRVALID: 0 because NS
6387      *  MRVALID: 0 because unpriv and A flag not set
6388      *  SREGION: 0 becaus SRVALID is 0
6389      *  MREGION: 0 because MRVALID is 0
6390      */
6391     return 0;
6392 }
6393 
6394 static void switch_mode(CPUARMState *env, int mode)
6395 {
6396     ARMCPU *cpu = arm_env_get_cpu(env);
6397 
6398     if (mode != ARM_CPU_MODE_USR) {
6399         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
6400     }
6401 }
6402 
6403 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
6404                                  uint32_t cur_el, bool secure)
6405 {
6406     return 1;
6407 }
6408 
6409 void aarch64_sync_64_to_32(CPUARMState *env)
6410 {
6411     g_assert_not_reached();
6412 }
6413 
6414 #else
6415 
6416 static void switch_mode(CPUARMState *env, int mode)
6417 {
6418     int old_mode;
6419     int i;
6420 
6421     old_mode = env->uncached_cpsr & CPSR_M;
6422     if (mode == old_mode)
6423         return;
6424 
6425     if (old_mode == ARM_CPU_MODE_FIQ) {
6426         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
6427         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
6428     } else if (mode == ARM_CPU_MODE_FIQ) {
6429         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
6430         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
6431     }
6432 
6433     i = bank_number(old_mode);
6434     env->banked_r13[i] = env->regs[13];
6435     env->banked_spsr[i] = env->spsr;
6436 
6437     i = bank_number(mode);
6438     env->regs[13] = env->banked_r13[i];
6439     env->spsr = env->banked_spsr[i];
6440 
6441     env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
6442     env->regs[14] = env->banked_r14[r14_bank_number(mode)];
6443 }
6444 
6445 /* Physical Interrupt Target EL Lookup Table
6446  *
6447  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
6448  *
6449  * The below multi-dimensional table is used for looking up the target
6450  * exception level given numerous condition criteria.  Specifically, the
6451  * target EL is based on SCR and HCR routing controls as well as the
6452  * currently executing EL and secure state.
6453  *
6454  *    Dimensions:
6455  *    target_el_table[2][2][2][2][2][4]
6456  *                    |  |  |  |  |  +--- Current EL
6457  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
6458  *                    |  |  |  +--------- HCR mask override
6459  *                    |  |  +------------ SCR exec state control
6460  *                    |  +--------------- SCR mask override
6461  *                    +------------------ 32-bit(0)/64-bit(1) EL3
6462  *
6463  *    The table values are as such:
6464  *    0-3 = EL0-EL3
6465  *     -1 = Cannot occur
6466  *
6467  * The ARM ARM target EL table includes entries indicating that an "exception
6468  * is not taken".  The two cases where this is applicable are:
6469  *    1) An exception is taken from EL3 but the SCR does not have the exception
6470  *    routed to EL3.
6471  *    2) An exception is taken from EL2 but the HCR does not have the exception
6472  *    routed to EL2.
6473  * In these two cases, the below table contain a target of EL1.  This value is
6474  * returned as it is expected that the consumer of the table data will check
6475  * for "target EL >= current EL" to ensure the exception is not taken.
6476  *
6477  *            SCR     HCR
6478  *         64  EA     AMO                 From
6479  *        BIT IRQ     IMO      Non-secure         Secure
6480  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
6481  */
6482 static const int8_t target_el_table[2][2][2][2][2][4] = {
6483     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
6484        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
6485       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
6486        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
6487      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
6488        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
6489       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
6490        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
6491     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
6492        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
6493       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
6494        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
6495      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
6496        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
6497       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
6498        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
6499 };
6500 
6501 /*
6502  * Determine the target EL for physical exceptions
6503  */
6504 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
6505                                  uint32_t cur_el, bool secure)
6506 {
6507     CPUARMState *env = cs->env_ptr;
6508     int rw;
6509     int scr;
6510     int hcr;
6511     int target_el;
6512     /* Is the highest EL AArch64? */
6513     int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
6514 
6515     if (arm_feature(env, ARM_FEATURE_EL3)) {
6516         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
6517     } else {
6518         /* Either EL2 is the highest EL (and so the EL2 register width
6519          * is given by is64); or there is no EL2 or EL3, in which case
6520          * the value of 'rw' does not affect the table lookup anyway.
6521          */
6522         rw = is64;
6523     }
6524 
6525     switch (excp_idx) {
6526     case EXCP_IRQ:
6527         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
6528         hcr = arm_hcr_el2_imo(env);
6529         break;
6530     case EXCP_FIQ:
6531         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
6532         hcr = arm_hcr_el2_fmo(env);
6533         break;
6534     default:
6535         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
6536         hcr = arm_hcr_el2_amo(env);
6537         break;
6538     };
6539 
6540     /* If HCR.TGE is set then HCR is treated as being 1 */
6541     hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
6542 
6543     /* Perform a table-lookup for the target EL given the current state */
6544     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
6545 
6546     assert(target_el > 0);
6547 
6548     return target_el;
6549 }
6550 
6551 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
6552                             ARMMMUIdx mmu_idx, bool ignfault)
6553 {
6554     CPUState *cs = CPU(cpu);
6555     CPUARMState *env = &cpu->env;
6556     MemTxAttrs attrs = {};
6557     MemTxResult txres;
6558     target_ulong page_size;
6559     hwaddr physaddr;
6560     int prot;
6561     ARMMMUFaultInfo fi = {};
6562     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
6563     int exc;
6564     bool exc_secure;
6565 
6566     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
6567                       &attrs, &prot, &page_size, &fi, NULL)) {
6568         /* MPU/SAU lookup failed */
6569         if (fi.type == ARMFault_QEMU_SFault) {
6570             qemu_log_mask(CPU_LOG_INT,
6571                           "...SecureFault with SFSR.AUVIOL during stacking\n");
6572             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
6573             env->v7m.sfar = addr;
6574             exc = ARMV7M_EXCP_SECURE;
6575             exc_secure = false;
6576         } else {
6577             qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n");
6578             env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
6579             exc = ARMV7M_EXCP_MEM;
6580             exc_secure = secure;
6581         }
6582         goto pend_fault;
6583     }
6584     address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value,
6585                          attrs, &txres);
6586     if (txres != MEMTX_OK) {
6587         /* BusFault trying to write the data */
6588         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
6589         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
6590         exc = ARMV7M_EXCP_BUS;
6591         exc_secure = false;
6592         goto pend_fault;
6593     }
6594     return true;
6595 
6596 pend_fault:
6597     /* By pending the exception at this point we are making
6598      * the IMPDEF choice "overridden exceptions pended" (see the
6599      * MergeExcInfo() pseudocode). The other choice would be to not
6600      * pend them now and then make a choice about which to throw away
6601      * later if we have two derived exceptions.
6602      * The only case when we must not pend the exception but instead
6603      * throw it away is if we are doing the push of the callee registers
6604      * and we've already generated a derived exception. Even in this
6605      * case we will still update the fault status registers.
6606      */
6607     if (!ignfault) {
6608         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
6609     }
6610     return false;
6611 }
6612 
6613 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
6614                            ARMMMUIdx mmu_idx)
6615 {
6616     CPUState *cs = CPU(cpu);
6617     CPUARMState *env = &cpu->env;
6618     MemTxAttrs attrs = {};
6619     MemTxResult txres;
6620     target_ulong page_size;
6621     hwaddr physaddr;
6622     int prot;
6623     ARMMMUFaultInfo fi = {};
6624     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
6625     int exc;
6626     bool exc_secure;
6627     uint32_t value;
6628 
6629     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
6630                       &attrs, &prot, &page_size, &fi, NULL)) {
6631         /* MPU/SAU lookup failed */
6632         if (fi.type == ARMFault_QEMU_SFault) {
6633             qemu_log_mask(CPU_LOG_INT,
6634                           "...SecureFault with SFSR.AUVIOL during unstack\n");
6635             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
6636             env->v7m.sfar = addr;
6637             exc = ARMV7M_EXCP_SECURE;
6638             exc_secure = false;
6639         } else {
6640             qemu_log_mask(CPU_LOG_INT,
6641                           "...MemManageFault with CFSR.MUNSTKERR\n");
6642             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
6643             exc = ARMV7M_EXCP_MEM;
6644             exc_secure = secure;
6645         }
6646         goto pend_fault;
6647     }
6648 
6649     value = address_space_ldl(arm_addressspace(cs, attrs), physaddr,
6650                               attrs, &txres);
6651     if (txres != MEMTX_OK) {
6652         /* BusFault trying to read the data */
6653         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
6654         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
6655         exc = ARMV7M_EXCP_BUS;
6656         exc_secure = false;
6657         goto pend_fault;
6658     }
6659 
6660     *dest = value;
6661     return true;
6662 
6663 pend_fault:
6664     /* By pending the exception at this point we are making
6665      * the IMPDEF choice "overridden exceptions pended" (see the
6666      * MergeExcInfo() pseudocode). The other choice would be to not
6667      * pend them now and then make a choice about which to throw away
6668      * later if we have two derived exceptions.
6669      */
6670     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
6671     return false;
6672 }
6673 
6674 /* Write to v7M CONTROL.SPSEL bit for the specified security bank.
6675  * This may change the current stack pointer between Main and Process
6676  * stack pointers if it is done for the CONTROL register for the current
6677  * security state.
6678  */
6679 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
6680                                                  bool new_spsel,
6681                                                  bool secstate)
6682 {
6683     bool old_is_psp = v7m_using_psp(env);
6684 
6685     env->v7m.control[secstate] =
6686         deposit32(env->v7m.control[secstate],
6687                   R_V7M_CONTROL_SPSEL_SHIFT,
6688                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
6689 
6690     if (secstate == env->v7m.secure) {
6691         bool new_is_psp = v7m_using_psp(env);
6692         uint32_t tmp;
6693 
6694         if (old_is_psp != new_is_psp) {
6695             tmp = env->v7m.other_sp;
6696             env->v7m.other_sp = env->regs[13];
6697             env->regs[13] = tmp;
6698         }
6699     }
6700 }
6701 
6702 /* Write to v7M CONTROL.SPSEL bit. This may change the current
6703  * stack pointer between Main and Process stack pointers.
6704  */
6705 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
6706 {
6707     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
6708 }
6709 
6710 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
6711 {
6712     /* Write a new value to v7m.exception, thus transitioning into or out
6713      * of Handler mode; this may result in a change of active stack pointer.
6714      */
6715     bool new_is_psp, old_is_psp = v7m_using_psp(env);
6716     uint32_t tmp;
6717 
6718     env->v7m.exception = new_exc;
6719 
6720     new_is_psp = v7m_using_psp(env);
6721 
6722     if (old_is_psp != new_is_psp) {
6723         tmp = env->v7m.other_sp;
6724         env->v7m.other_sp = env->regs[13];
6725         env->regs[13] = tmp;
6726     }
6727 }
6728 
6729 /* Switch M profile security state between NS and S */
6730 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
6731 {
6732     uint32_t new_ss_msp, new_ss_psp;
6733 
6734     if (env->v7m.secure == new_secstate) {
6735         return;
6736     }
6737 
6738     /* All the banked state is accessed by looking at env->v7m.secure
6739      * except for the stack pointer; rearrange the SP appropriately.
6740      */
6741     new_ss_msp = env->v7m.other_ss_msp;
6742     new_ss_psp = env->v7m.other_ss_psp;
6743 
6744     if (v7m_using_psp(env)) {
6745         env->v7m.other_ss_psp = env->regs[13];
6746         env->v7m.other_ss_msp = env->v7m.other_sp;
6747     } else {
6748         env->v7m.other_ss_msp = env->regs[13];
6749         env->v7m.other_ss_psp = env->v7m.other_sp;
6750     }
6751 
6752     env->v7m.secure = new_secstate;
6753 
6754     if (v7m_using_psp(env)) {
6755         env->regs[13] = new_ss_psp;
6756         env->v7m.other_sp = new_ss_msp;
6757     } else {
6758         env->regs[13] = new_ss_msp;
6759         env->v7m.other_sp = new_ss_psp;
6760     }
6761 }
6762 
6763 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
6764 {
6765     /* Handle v7M BXNS:
6766      *  - if the return value is a magic value, do exception return (like BX)
6767      *  - otherwise bit 0 of the return value is the target security state
6768      */
6769     uint32_t min_magic;
6770 
6771     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6772         /* Covers FNC_RETURN and EXC_RETURN magic */
6773         min_magic = FNC_RETURN_MIN_MAGIC;
6774     } else {
6775         /* EXC_RETURN magic only */
6776         min_magic = EXC_RETURN_MIN_MAGIC;
6777     }
6778 
6779     if (dest >= min_magic) {
6780         /* This is an exception return magic value; put it where
6781          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
6782          * Note that if we ever add gen_ss_advance() singlestep support to
6783          * M profile this should count as an "instruction execution complete"
6784          * event (compare gen_bx_excret_final_code()).
6785          */
6786         env->regs[15] = dest & ~1;
6787         env->thumb = dest & 1;
6788         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
6789         /* notreached */
6790     }
6791 
6792     /* translate.c should have made BXNS UNDEF unless we're secure */
6793     assert(env->v7m.secure);
6794 
6795     switch_v7m_security_state(env, dest & 1);
6796     env->thumb = 1;
6797     env->regs[15] = dest & ~1;
6798 }
6799 
6800 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
6801 {
6802     /* Handle v7M BLXNS:
6803      *  - bit 0 of the destination address is the target security state
6804      */
6805 
6806     /* At this point regs[15] is the address just after the BLXNS */
6807     uint32_t nextinst = env->regs[15] | 1;
6808     uint32_t sp = env->regs[13] - 8;
6809     uint32_t saved_psr;
6810 
6811     /* translate.c will have made BLXNS UNDEF unless we're secure */
6812     assert(env->v7m.secure);
6813 
6814     if (dest & 1) {
6815         /* target is Secure, so this is just a normal BLX,
6816          * except that the low bit doesn't indicate Thumb/not.
6817          */
6818         env->regs[14] = nextinst;
6819         env->thumb = 1;
6820         env->regs[15] = dest & ~1;
6821         return;
6822     }
6823 
6824     /* Target is non-secure: first push a stack frame */
6825     if (!QEMU_IS_ALIGNED(sp, 8)) {
6826         qemu_log_mask(LOG_GUEST_ERROR,
6827                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
6828     }
6829 
6830     if (sp < v7m_sp_limit(env)) {
6831         raise_exception(env, EXCP_STKOF, 0, 1);
6832     }
6833 
6834     saved_psr = env->v7m.exception;
6835     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
6836         saved_psr |= XPSR_SFPA;
6837     }
6838 
6839     /* Note that these stores can throw exceptions on MPU faults */
6840     cpu_stl_data(env, sp, nextinst);
6841     cpu_stl_data(env, sp + 4, saved_psr);
6842 
6843     env->regs[13] = sp;
6844     env->regs[14] = 0xfeffffff;
6845     if (arm_v7m_is_handler_mode(env)) {
6846         /* Write a dummy value to IPSR, to avoid leaking the current secure
6847          * exception number to non-secure code. This is guaranteed not
6848          * to cause write_v7m_exception() to actually change stacks.
6849          */
6850         write_v7m_exception(env, 1);
6851     }
6852     switch_v7m_security_state(env, 0);
6853     env->thumb = 1;
6854     env->regs[15] = dest;
6855 }
6856 
6857 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
6858                                 bool spsel)
6859 {
6860     /* Return a pointer to the location where we currently store the
6861      * stack pointer for the requested security state and thread mode.
6862      * This pointer will become invalid if the CPU state is updated
6863      * such that the stack pointers are switched around (eg changing
6864      * the SPSEL control bit).
6865      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
6866      * Unlike that pseudocode, we require the caller to pass us in the
6867      * SPSEL control bit value; this is because we also use this
6868      * function in handling of pushing of the callee-saves registers
6869      * part of the v8M stack frame (pseudocode PushCalleeStack()),
6870      * and in the tailchain codepath the SPSEL bit comes from the exception
6871      * return magic LR value from the previous exception. The pseudocode
6872      * opencodes the stack-selection in PushCalleeStack(), but we prefer
6873      * to make this utility function generic enough to do the job.
6874      */
6875     bool want_psp = threadmode && spsel;
6876 
6877     if (secure == env->v7m.secure) {
6878         if (want_psp == v7m_using_psp(env)) {
6879             return &env->regs[13];
6880         } else {
6881             return &env->v7m.other_sp;
6882         }
6883     } else {
6884         if (want_psp) {
6885             return &env->v7m.other_ss_psp;
6886         } else {
6887             return &env->v7m.other_ss_msp;
6888         }
6889     }
6890 }
6891 
6892 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
6893                                 uint32_t *pvec)
6894 {
6895     CPUState *cs = CPU(cpu);
6896     CPUARMState *env = &cpu->env;
6897     MemTxResult result;
6898     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
6899     uint32_t vector_entry;
6900     MemTxAttrs attrs = {};
6901     ARMMMUIdx mmu_idx;
6902     bool exc_secure;
6903 
6904     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
6905 
6906     /* We don't do a get_phys_addr() here because the rules for vector
6907      * loads are special: they always use the default memory map, and
6908      * the default memory map permits reads from all addresses.
6909      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
6910      * that we want this special case which would always say "yes",
6911      * we just do the SAU lookup here followed by a direct physical load.
6912      */
6913     attrs.secure = targets_secure;
6914     attrs.user = false;
6915 
6916     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
6917         V8M_SAttributes sattrs = {};
6918 
6919         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
6920         if (sattrs.ns) {
6921             attrs.secure = false;
6922         } else if (!targets_secure) {
6923             /* NS access to S memory */
6924             goto load_fail;
6925         }
6926     }
6927 
6928     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
6929                                      attrs, &result);
6930     if (result != MEMTX_OK) {
6931         goto load_fail;
6932     }
6933     *pvec = vector_entry;
6934     return true;
6935 
6936 load_fail:
6937     /* All vector table fetch fails are reported as HardFault, with
6938      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
6939      * technically the underlying exception is a MemManage or BusFault
6940      * that is escalated to HardFault.) This is a terminal exception,
6941      * so we will either take the HardFault immediately or else enter
6942      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
6943      */
6944     exc_secure = targets_secure ||
6945         !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
6946     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK;
6947     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
6948     return false;
6949 }
6950 
6951 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
6952                                   bool ignore_faults)
6953 {
6954     /* For v8M, push the callee-saves register part of the stack frame.
6955      * Compare the v8M pseudocode PushCalleeStack().
6956      * In the tailchaining case this may not be the current stack.
6957      */
6958     CPUARMState *env = &cpu->env;
6959     uint32_t *frame_sp_p;
6960     uint32_t frameptr;
6961     ARMMMUIdx mmu_idx;
6962     bool stacked_ok;
6963     uint32_t limit;
6964     bool want_psp;
6965 
6966     if (dotailchain) {
6967         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
6968         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
6969             !mode;
6970 
6971         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
6972         frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
6973                                     lr & R_V7M_EXCRET_SPSEL_MASK);
6974         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
6975         if (want_psp) {
6976             limit = env->v7m.psplim[M_REG_S];
6977         } else {
6978             limit = env->v7m.msplim[M_REG_S];
6979         }
6980     } else {
6981         mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
6982         frame_sp_p = &env->regs[13];
6983         limit = v7m_sp_limit(env);
6984     }
6985 
6986     frameptr = *frame_sp_p - 0x28;
6987     if (frameptr < limit) {
6988         /*
6989          * Stack limit failure: set SP to the limit value, and generate
6990          * STKOF UsageFault. Stack pushes below the limit must not be
6991          * performed. It is IMPDEF whether pushes above the limit are
6992          * performed; we choose not to.
6993          */
6994         qemu_log_mask(CPU_LOG_INT,
6995                       "...STKOF during callee-saves register stacking\n");
6996         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
6997         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
6998                                 env->v7m.secure);
6999         *frame_sp_p = limit;
7000         return true;
7001     }
7002 
7003     /* Write as much of the stack frame as we can. A write failure may
7004      * cause us to pend a derived exception.
7005      */
7006     stacked_ok =
7007         v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) &&
7008         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx,
7009                         ignore_faults) &&
7010         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx,
7011                         ignore_faults) &&
7012         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx,
7013                         ignore_faults) &&
7014         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx,
7015                         ignore_faults) &&
7016         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx,
7017                         ignore_faults) &&
7018         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx,
7019                         ignore_faults) &&
7020         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx,
7021                         ignore_faults) &&
7022         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx,
7023                         ignore_faults);
7024 
7025     /* Update SP regardless of whether any of the stack accesses failed. */
7026     *frame_sp_p = frameptr;
7027 
7028     return !stacked_ok;
7029 }
7030 
7031 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
7032                                 bool ignore_stackfaults)
7033 {
7034     /* Do the "take the exception" parts of exception entry,
7035      * but not the pushing of state to the stack. This is
7036      * similar to the pseudocode ExceptionTaken() function.
7037      */
7038     CPUARMState *env = &cpu->env;
7039     uint32_t addr;
7040     bool targets_secure;
7041     int exc;
7042     bool push_failed = false;
7043 
7044     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
7045     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
7046                   targets_secure ? "secure" : "nonsecure", exc);
7047 
7048     if (arm_feature(env, ARM_FEATURE_V8)) {
7049         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
7050             (lr & R_V7M_EXCRET_S_MASK)) {
7051             /* The background code (the owner of the registers in the
7052              * exception frame) is Secure. This means it may either already
7053              * have or now needs to push callee-saves registers.
7054              */
7055             if (targets_secure) {
7056                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
7057                     /* We took an exception from Secure to NonSecure
7058                      * (which means the callee-saved registers got stacked)
7059                      * and are now tailchaining to a Secure exception.
7060                      * Clear DCRS so eventual return from this Secure
7061                      * exception unstacks the callee-saved registers.
7062                      */
7063                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
7064                 }
7065             } else {
7066                 /* We're going to a non-secure exception; push the
7067                  * callee-saves registers to the stack now, if they're
7068                  * not already saved.
7069                  */
7070                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
7071                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
7072                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
7073                                                         ignore_stackfaults);
7074                 }
7075                 lr |= R_V7M_EXCRET_DCRS_MASK;
7076             }
7077         }
7078 
7079         lr &= ~R_V7M_EXCRET_ES_MASK;
7080         if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7081             lr |= R_V7M_EXCRET_ES_MASK;
7082         }
7083         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
7084         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
7085             lr |= R_V7M_EXCRET_SPSEL_MASK;
7086         }
7087 
7088         /* Clear registers if necessary to prevent non-secure exception
7089          * code being able to see register values from secure code.
7090          * Where register values become architecturally UNKNOWN we leave
7091          * them with their previous values.
7092          */
7093         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7094             if (!targets_secure) {
7095                 /* Always clear the caller-saved registers (they have been
7096                  * pushed to the stack earlier in v7m_push_stack()).
7097                  * Clear callee-saved registers if the background code is
7098                  * Secure (in which case these regs were saved in
7099                  * v7m_push_callee_stack()).
7100                  */
7101                 int i;
7102 
7103                 for (i = 0; i < 13; i++) {
7104                     /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
7105                     if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
7106                         env->regs[i] = 0;
7107                     }
7108                 }
7109                 /* Clear EAPSR */
7110                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
7111             }
7112         }
7113     }
7114 
7115     if (push_failed && !ignore_stackfaults) {
7116         /* Derived exception on callee-saves register stacking:
7117          * we might now want to take a different exception which
7118          * targets a different security state, so try again from the top.
7119          */
7120         qemu_log_mask(CPU_LOG_INT,
7121                       "...derived exception on callee-saves register stacking");
7122         v7m_exception_taken(cpu, lr, true, true);
7123         return;
7124     }
7125 
7126     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
7127         /* Vector load failed: derived exception */
7128         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
7129         v7m_exception_taken(cpu, lr, true, true);
7130         return;
7131     }
7132 
7133     /* Now we've done everything that might cause a derived exception
7134      * we can go ahead and activate whichever exception we're going to
7135      * take (which might now be the derived exception).
7136      */
7137     armv7m_nvic_acknowledge_irq(env->nvic);
7138 
7139     /* Switch to target security state -- must do this before writing SPSEL */
7140     switch_v7m_security_state(env, targets_secure);
7141     write_v7m_control_spsel(env, 0);
7142     arm_clear_exclusive(env);
7143     /* Clear IT bits */
7144     env->condexec_bits = 0;
7145     env->regs[14] = lr;
7146     env->regs[15] = addr & 0xfffffffe;
7147     env->thumb = addr & 1;
7148 }
7149 
7150 static bool v7m_push_stack(ARMCPU *cpu)
7151 {
7152     /* Do the "set up stack frame" part of exception entry,
7153      * similar to pseudocode PushStack().
7154      * Return true if we generate a derived exception (and so
7155      * should ignore further stack faults trying to process
7156      * that derived exception.)
7157      */
7158     bool stacked_ok;
7159     CPUARMState *env = &cpu->env;
7160     uint32_t xpsr = xpsr_read(env);
7161     uint32_t frameptr = env->regs[13];
7162     ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
7163 
7164     /* Align stack pointer if the guest wants that */
7165     if ((frameptr & 4) &&
7166         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
7167         frameptr -= 4;
7168         xpsr |= XPSR_SPREALIGN;
7169     }
7170 
7171     frameptr -= 0x20;
7172 
7173     if (arm_feature(env, ARM_FEATURE_V8)) {
7174         uint32_t limit = v7m_sp_limit(env);
7175 
7176         if (frameptr < limit) {
7177             /*
7178              * Stack limit failure: set SP to the limit value, and generate
7179              * STKOF UsageFault. Stack pushes below the limit must not be
7180              * performed. It is IMPDEF whether pushes above the limit are
7181              * performed; we choose not to.
7182              */
7183             qemu_log_mask(CPU_LOG_INT,
7184                           "...STKOF during stacking\n");
7185             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
7186             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
7187                                     env->v7m.secure);
7188             env->regs[13] = limit;
7189             return true;
7190         }
7191     }
7192 
7193     /* Write as much of the stack frame as we can. If we fail a stack
7194      * write this will result in a derived exception being pended
7195      * (which may be taken in preference to the one we started with
7196      * if it has higher priority).
7197      */
7198     stacked_ok =
7199         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) &&
7200         v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) &&
7201         v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) &&
7202         v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) &&
7203         v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) &&
7204         v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) &&
7205         v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) &&
7206         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false);
7207 
7208     /* Update SP regardless of whether any of the stack accesses failed. */
7209     env->regs[13] = frameptr;
7210 
7211     return !stacked_ok;
7212 }
7213 
7214 static void do_v7m_exception_exit(ARMCPU *cpu)
7215 {
7216     CPUARMState *env = &cpu->env;
7217     uint32_t excret;
7218     uint32_t xpsr;
7219     bool ufault = false;
7220     bool sfault = false;
7221     bool return_to_sp_process;
7222     bool return_to_handler;
7223     bool rettobase = false;
7224     bool exc_secure = false;
7225     bool return_to_secure;
7226 
7227     /* If we're not in Handler mode then jumps to magic exception-exit
7228      * addresses don't have magic behaviour. However for the v8M
7229      * security extensions the magic secure-function-return has to
7230      * work in thread mode too, so to avoid doing an extra check in
7231      * the generated code we allow exception-exit magic to also cause the
7232      * internal exception and bring us here in thread mode. Correct code
7233      * will never try to do this (the following insn fetch will always
7234      * fault) so we the overhead of having taken an unnecessary exception
7235      * doesn't matter.
7236      */
7237     if (!arm_v7m_is_handler_mode(env)) {
7238         return;
7239     }
7240 
7241     /* In the spec pseudocode ExceptionReturn() is called directly
7242      * from BXWritePC() and gets the full target PC value including
7243      * bit zero. In QEMU's implementation we treat it as a normal
7244      * jump-to-register (which is then caught later on), and so split
7245      * the target value up between env->regs[15] and env->thumb in
7246      * gen_bx(). Reconstitute it.
7247      */
7248     excret = env->regs[15];
7249     if (env->thumb) {
7250         excret |= 1;
7251     }
7252 
7253     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
7254                   " previous exception %d\n",
7255                   excret, env->v7m.exception);
7256 
7257     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
7258         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
7259                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
7260                       excret);
7261     }
7262 
7263     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7264         /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
7265          * we pick which FAULTMASK to clear.
7266          */
7267         if (!env->v7m.secure &&
7268             ((excret & R_V7M_EXCRET_ES_MASK) ||
7269              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
7270             sfault = 1;
7271             /* For all other purposes, treat ES as 0 (R_HXSR) */
7272             excret &= ~R_V7M_EXCRET_ES_MASK;
7273         }
7274         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
7275     }
7276 
7277     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
7278         /* Auto-clear FAULTMASK on return from other than NMI.
7279          * If the security extension is implemented then this only
7280          * happens if the raw execution priority is >= 0; the
7281          * value of the ES bit in the exception return value indicates
7282          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
7283          */
7284         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7285             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
7286                 env->v7m.faultmask[exc_secure] = 0;
7287             }
7288         } else {
7289             env->v7m.faultmask[M_REG_NS] = 0;
7290         }
7291     }
7292 
7293     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
7294                                      exc_secure)) {
7295     case -1:
7296         /* attempt to exit an exception that isn't active */
7297         ufault = true;
7298         break;
7299     case 0:
7300         /* still an irq active now */
7301         break;
7302     case 1:
7303         /* we returned to base exception level, no nesting.
7304          * (In the pseudocode this is written using "NestedActivation != 1"
7305          * where we have 'rettobase == false'.)
7306          */
7307         rettobase = true;
7308         break;
7309     default:
7310         g_assert_not_reached();
7311     }
7312 
7313     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
7314     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
7315     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
7316         (excret & R_V7M_EXCRET_S_MASK);
7317 
7318     if (arm_feature(env, ARM_FEATURE_V8)) {
7319         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
7320             /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
7321              * we choose to take the UsageFault.
7322              */
7323             if ((excret & R_V7M_EXCRET_S_MASK) ||
7324                 (excret & R_V7M_EXCRET_ES_MASK) ||
7325                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
7326                 ufault = true;
7327             }
7328         }
7329         if (excret & R_V7M_EXCRET_RES0_MASK) {
7330             ufault = true;
7331         }
7332     } else {
7333         /* For v7M we only recognize certain combinations of the low bits */
7334         switch (excret & 0xf) {
7335         case 1: /* Return to Handler */
7336             break;
7337         case 13: /* Return to Thread using Process stack */
7338         case 9: /* Return to Thread using Main stack */
7339             /* We only need to check NONBASETHRDENA for v7M, because in
7340              * v8M this bit does not exist (it is RES1).
7341              */
7342             if (!rettobase &&
7343                 !(env->v7m.ccr[env->v7m.secure] &
7344                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
7345                 ufault = true;
7346             }
7347             break;
7348         default:
7349             ufault = true;
7350         }
7351     }
7352 
7353     /*
7354      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
7355      * Handler mode (and will be until we write the new XPSR.Interrupt
7356      * field) this does not switch around the current stack pointer.
7357      * We must do this before we do any kind of tailchaining, including
7358      * for the derived exceptions on integrity check failures, or we will
7359      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
7360      */
7361     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
7362 
7363     if (sfault) {
7364         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
7365         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7366         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
7367                       "stackframe: failed EXC_RETURN.ES validity check\n");
7368         v7m_exception_taken(cpu, excret, true, false);
7369         return;
7370     }
7371 
7372     if (ufault) {
7373         /* Bad exception return: instead of popping the exception
7374          * stack, directly take a usage fault on the current stack.
7375          */
7376         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7377         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7378         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
7379                       "stackframe: failed exception return integrity check\n");
7380         v7m_exception_taken(cpu, excret, true, false);
7381         return;
7382     }
7383 
7384     /*
7385      * Tailchaining: if there is currently a pending exception that
7386      * is high enough priority to preempt execution at the level we're
7387      * about to return to, then just directly take that exception now,
7388      * avoiding an unstack-and-then-stack. Note that now we have
7389      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
7390      * our current execution priority is already the execution priority we are
7391      * returning to -- none of the state we would unstack or set based on
7392      * the EXCRET value affects it.
7393      */
7394     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
7395         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
7396         v7m_exception_taken(cpu, excret, true, false);
7397         return;
7398     }
7399 
7400     switch_v7m_security_state(env, return_to_secure);
7401 
7402     {
7403         /* The stack pointer we should be reading the exception frame from
7404          * depends on bits in the magic exception return type value (and
7405          * for v8M isn't necessarily the stack pointer we will eventually
7406          * end up resuming execution with). Get a pointer to the location
7407          * in the CPU state struct where the SP we need is currently being
7408          * stored; we will use and modify it in place.
7409          * We use this limited C variable scope so we don't accidentally
7410          * use 'frame_sp_p' after we do something that makes it invalid.
7411          */
7412         uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
7413                                               return_to_secure,
7414                                               !return_to_handler,
7415                                               return_to_sp_process);
7416         uint32_t frameptr = *frame_sp_p;
7417         bool pop_ok = true;
7418         ARMMMUIdx mmu_idx;
7419         bool return_to_priv = return_to_handler ||
7420             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
7421 
7422         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
7423                                                         return_to_priv);
7424 
7425         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
7426             arm_feature(env, ARM_FEATURE_V8)) {
7427             qemu_log_mask(LOG_GUEST_ERROR,
7428                           "M profile exception return with non-8-aligned SP "
7429                           "for destination state is UNPREDICTABLE\n");
7430         }
7431 
7432         /* Do we need to pop callee-saved registers? */
7433         if (return_to_secure &&
7434             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
7435              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
7436             uint32_t expected_sig = 0xfefa125b;
7437             uint32_t actual_sig;
7438 
7439             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
7440 
7441             if (pop_ok && expected_sig != actual_sig) {
7442                 /* Take a SecureFault on the current stack */
7443                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
7444                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7445                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
7446                               "stackframe: failed exception return integrity "
7447                               "signature check\n");
7448                 v7m_exception_taken(cpu, excret, true, false);
7449                 return;
7450             }
7451 
7452             pop_ok = pop_ok &&
7453                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
7454                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
7455                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
7456                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
7457                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
7458                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
7459                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
7460                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
7461 
7462             frameptr += 0x28;
7463         }
7464 
7465         /* Pop registers */
7466         pop_ok = pop_ok &&
7467             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
7468             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
7469             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
7470             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
7471             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
7472             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
7473             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
7474             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
7475 
7476         if (!pop_ok) {
7477             /* v7m_stack_read() pended a fault, so take it (as a tail
7478              * chained exception on the same stack frame)
7479              */
7480             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
7481             v7m_exception_taken(cpu, excret, true, false);
7482             return;
7483         }
7484 
7485         /* Returning from an exception with a PC with bit 0 set is defined
7486          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
7487          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
7488          * the lsbit, and there are several RTOSes out there which incorrectly
7489          * assume the r15 in the stack frame should be a Thumb-style "lsbit
7490          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
7491          * complain about the badly behaved guest.
7492          */
7493         if (env->regs[15] & 1) {
7494             env->regs[15] &= ~1U;
7495             if (!arm_feature(env, ARM_FEATURE_V8)) {
7496                 qemu_log_mask(LOG_GUEST_ERROR,
7497                               "M profile return from interrupt with misaligned "
7498                               "PC is UNPREDICTABLE on v7M\n");
7499             }
7500         }
7501 
7502         if (arm_feature(env, ARM_FEATURE_V8)) {
7503             /* For v8M we have to check whether the xPSR exception field
7504              * matches the EXCRET value for return to handler/thread
7505              * before we commit to changing the SP and xPSR.
7506              */
7507             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
7508             if (return_to_handler != will_be_handler) {
7509                 /* Take an INVPC UsageFault on the current stack.
7510                  * By this point we will have switched to the security state
7511                  * for the background state, so this UsageFault will target
7512                  * that state.
7513                  */
7514                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
7515                                         env->v7m.secure);
7516                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7517                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
7518                               "stackframe: failed exception return integrity "
7519                               "check\n");
7520                 v7m_exception_taken(cpu, excret, true, false);
7521                 return;
7522             }
7523         }
7524 
7525         /* Commit to consuming the stack frame */
7526         frameptr += 0x20;
7527         /* Undo stack alignment (the SPREALIGN bit indicates that the original
7528          * pre-exception SP was not 8-aligned and we added a padding word to
7529          * align it, so we undo this by ORing in the bit that increases it
7530          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
7531          * would work too but a logical OR is how the pseudocode specifies it.)
7532          */
7533         if (xpsr & XPSR_SPREALIGN) {
7534             frameptr |= 4;
7535         }
7536         *frame_sp_p = frameptr;
7537     }
7538     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
7539     xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
7540 
7541     /* The restored xPSR exception field will be zero if we're
7542      * resuming in Thread mode. If that doesn't match what the
7543      * exception return excret specified then this is a UsageFault.
7544      * v7M requires we make this check here; v8M did it earlier.
7545      */
7546     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
7547         /* Take an INVPC UsageFault by pushing the stack again;
7548          * we know we're v7M so this is never a Secure UsageFault.
7549          */
7550         bool ignore_stackfaults;
7551 
7552         assert(!arm_feature(env, ARM_FEATURE_V8));
7553         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
7554         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7555         ignore_stackfaults = v7m_push_stack(cpu);
7556         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
7557                       "failed exception return integrity check\n");
7558         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
7559         return;
7560     }
7561 
7562     /* Otherwise, we have a successful exception exit. */
7563     arm_clear_exclusive(env);
7564     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
7565 }
7566 
7567 static bool do_v7m_function_return(ARMCPU *cpu)
7568 {
7569     /* v8M security extensions magic function return.
7570      * We may either:
7571      *  (1) throw an exception (longjump)
7572      *  (2) return true if we successfully handled the function return
7573      *  (3) return false if we failed a consistency check and have
7574      *      pended a UsageFault that needs to be taken now
7575      *
7576      * At this point the magic return value is split between env->regs[15]
7577      * and env->thumb. We don't bother to reconstitute it because we don't
7578      * need it (all values are handled the same way).
7579      */
7580     CPUARMState *env = &cpu->env;
7581     uint32_t newpc, newpsr, newpsr_exc;
7582 
7583     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
7584 
7585     {
7586         bool threadmode, spsel;
7587         TCGMemOpIdx oi;
7588         ARMMMUIdx mmu_idx;
7589         uint32_t *frame_sp_p;
7590         uint32_t frameptr;
7591 
7592         /* Pull the return address and IPSR from the Secure stack */
7593         threadmode = !arm_v7m_is_handler_mode(env);
7594         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
7595 
7596         frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
7597         frameptr = *frame_sp_p;
7598 
7599         /* These loads may throw an exception (for MPU faults). We want to
7600          * do them as secure, so work out what MMU index that is.
7601          */
7602         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
7603         oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
7604         newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
7605         newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
7606 
7607         /* Consistency checks on new IPSR */
7608         newpsr_exc = newpsr & XPSR_EXCP;
7609         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
7610               (env->v7m.exception == 1 && newpsr_exc != 0))) {
7611             /* Pend the fault and tell our caller to take it */
7612             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
7613             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
7614                                     env->v7m.secure);
7615             qemu_log_mask(CPU_LOG_INT,
7616                           "...taking INVPC UsageFault: "
7617                           "IPSR consistency check failed\n");
7618             return false;
7619         }
7620 
7621         *frame_sp_p = frameptr + 8;
7622     }
7623 
7624     /* This invalidates frame_sp_p */
7625     switch_v7m_security_state(env, true);
7626     env->v7m.exception = newpsr_exc;
7627     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
7628     if (newpsr & XPSR_SFPA) {
7629         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
7630     }
7631     xpsr_write(env, 0, XPSR_IT);
7632     env->thumb = newpc & 1;
7633     env->regs[15] = newpc & ~1;
7634 
7635     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
7636     return true;
7637 }
7638 
7639 static void arm_log_exception(int idx)
7640 {
7641     if (qemu_loglevel_mask(CPU_LOG_INT)) {
7642         const char *exc = NULL;
7643         static const char * const excnames[] = {
7644             [EXCP_UDEF] = "Undefined Instruction",
7645             [EXCP_SWI] = "SVC",
7646             [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
7647             [EXCP_DATA_ABORT] = "Data Abort",
7648             [EXCP_IRQ] = "IRQ",
7649             [EXCP_FIQ] = "FIQ",
7650             [EXCP_BKPT] = "Breakpoint",
7651             [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
7652             [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
7653             [EXCP_HVC] = "Hypervisor Call",
7654             [EXCP_HYP_TRAP] = "Hypervisor Trap",
7655             [EXCP_SMC] = "Secure Monitor Call",
7656             [EXCP_VIRQ] = "Virtual IRQ",
7657             [EXCP_VFIQ] = "Virtual FIQ",
7658             [EXCP_SEMIHOST] = "Semihosting call",
7659             [EXCP_NOCP] = "v7M NOCP UsageFault",
7660             [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
7661             [EXCP_STKOF] = "v8M STKOF UsageFault",
7662         };
7663 
7664         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
7665             exc = excnames[idx];
7666         }
7667         if (!exc) {
7668             exc = "unknown";
7669         }
7670         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
7671     }
7672 }
7673 
7674 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
7675                                uint32_t addr, uint16_t *insn)
7676 {
7677     /* Load a 16-bit portion of a v7M instruction, returning true on success,
7678      * or false on failure (in which case we will have pended the appropriate
7679      * exception).
7680      * We need to do the instruction fetch's MPU and SAU checks
7681      * like this because there is no MMU index that would allow
7682      * doing the load with a single function call. Instead we must
7683      * first check that the security attributes permit the load
7684      * and that they don't mismatch on the two halves of the instruction,
7685      * and then we do the load as a secure load (ie using the security
7686      * attributes of the address, not the CPU, as architecturally required).
7687      */
7688     CPUState *cs = CPU(cpu);
7689     CPUARMState *env = &cpu->env;
7690     V8M_SAttributes sattrs = {};
7691     MemTxAttrs attrs = {};
7692     ARMMMUFaultInfo fi = {};
7693     MemTxResult txres;
7694     target_ulong page_size;
7695     hwaddr physaddr;
7696     int prot;
7697 
7698     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
7699     if (!sattrs.nsc || sattrs.ns) {
7700         /* This must be the second half of the insn, and it straddles a
7701          * region boundary with the second half not being S&NSC.
7702          */
7703         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7704         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7705         qemu_log_mask(CPU_LOG_INT,
7706                       "...really SecureFault with SFSR.INVEP\n");
7707         return false;
7708     }
7709     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
7710                       &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
7711         /* the MPU lookup failed */
7712         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
7713         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
7714         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
7715         return false;
7716     }
7717     *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
7718                                  attrs, &txres);
7719     if (txres != MEMTX_OK) {
7720         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
7721         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
7722         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
7723         return false;
7724     }
7725     return true;
7726 }
7727 
7728 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
7729 {
7730     /* Check whether this attempt to execute code in a Secure & NS-Callable
7731      * memory region is for an SG instruction; if so, then emulate the
7732      * effect of the SG instruction and return true. Otherwise pend
7733      * the correct kind of exception and return false.
7734      */
7735     CPUARMState *env = &cpu->env;
7736     ARMMMUIdx mmu_idx;
7737     uint16_t insn;
7738 
7739     /* We should never get here unless get_phys_addr_pmsav8() caused
7740      * an exception for NS executing in S&NSC memory.
7741      */
7742     assert(!env->v7m.secure);
7743     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
7744 
7745     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
7746     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
7747 
7748     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
7749         return false;
7750     }
7751 
7752     if (!env->thumb) {
7753         goto gen_invep;
7754     }
7755 
7756     if (insn != 0xe97f) {
7757         /* Not an SG instruction first half (we choose the IMPDEF
7758          * early-SG-check option).
7759          */
7760         goto gen_invep;
7761     }
7762 
7763     if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
7764         return false;
7765     }
7766 
7767     if (insn != 0xe97f) {
7768         /* Not an SG instruction second half (yes, both halves of the SG
7769          * insn have the same hex value)
7770          */
7771         goto gen_invep;
7772     }
7773 
7774     /* OK, we have confirmed that we really have an SG instruction.
7775      * We know we're NS in S memory so don't need to repeat those checks.
7776      */
7777     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
7778                   ", executing it\n", env->regs[15]);
7779     env->regs[14] &= ~1;
7780     switch_v7m_security_state(env, true);
7781     xpsr_write(env, 0, XPSR_IT);
7782     env->regs[15] += 4;
7783     return true;
7784 
7785 gen_invep:
7786     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7787     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7788     qemu_log_mask(CPU_LOG_INT,
7789                   "...really SecureFault with SFSR.INVEP\n");
7790     return false;
7791 }
7792 
7793 void arm_v7m_cpu_do_interrupt(CPUState *cs)
7794 {
7795     ARMCPU *cpu = ARM_CPU(cs);
7796     CPUARMState *env = &cpu->env;
7797     uint32_t lr;
7798     bool ignore_stackfaults;
7799 
7800     arm_log_exception(cs->exception_index);
7801 
7802     /* For exceptions we just mark as pending on the NVIC, and let that
7803        handle it.  */
7804     switch (cs->exception_index) {
7805     case EXCP_UDEF:
7806         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7807         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
7808         break;
7809     case EXCP_NOCP:
7810         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7811         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
7812         break;
7813     case EXCP_INVSTATE:
7814         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7815         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
7816         break;
7817     case EXCP_STKOF:
7818         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
7819         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
7820         break;
7821     case EXCP_SWI:
7822         /* The PC already points to the next instruction.  */
7823         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
7824         break;
7825     case EXCP_PREFETCH_ABORT:
7826     case EXCP_DATA_ABORT:
7827         /* Note that for M profile we don't have a guest facing FSR, but
7828          * the env->exception.fsr will be populated by the code that
7829          * raises the fault, in the A profile short-descriptor format.
7830          */
7831         switch (env->exception.fsr & 0xf) {
7832         case M_FAKE_FSR_NSC_EXEC:
7833             /* Exception generated when we try to execute code at an address
7834              * which is marked as Secure & Non-Secure Callable and the CPU
7835              * is in the Non-Secure state. The only instruction which can
7836              * be executed like this is SG (and that only if both halves of
7837              * the SG instruction have the same security attributes.)
7838              * Everything else must generate an INVEP SecureFault, so we
7839              * emulate the SG instruction here.
7840              */
7841             if (v7m_handle_execute_nsc(cpu)) {
7842                 return;
7843             }
7844             break;
7845         case M_FAKE_FSR_SFAULT:
7846             /* Various flavours of SecureFault for attempts to execute or
7847              * access data in the wrong security state.
7848              */
7849             switch (cs->exception_index) {
7850             case EXCP_PREFETCH_ABORT:
7851                 if (env->v7m.secure) {
7852                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
7853                     qemu_log_mask(CPU_LOG_INT,
7854                                   "...really SecureFault with SFSR.INVTRAN\n");
7855                 } else {
7856                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
7857                     qemu_log_mask(CPU_LOG_INT,
7858                                   "...really SecureFault with SFSR.INVEP\n");
7859                 }
7860                 break;
7861             case EXCP_DATA_ABORT:
7862                 /* This must be an NS access to S memory */
7863                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
7864                 qemu_log_mask(CPU_LOG_INT,
7865                               "...really SecureFault with SFSR.AUVIOL\n");
7866                 break;
7867             }
7868             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
7869             break;
7870         case 0x8: /* External Abort */
7871             switch (cs->exception_index) {
7872             case EXCP_PREFETCH_ABORT:
7873                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
7874                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
7875                 break;
7876             case EXCP_DATA_ABORT:
7877                 env->v7m.cfsr[M_REG_NS] |=
7878                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
7879                 env->v7m.bfar = env->exception.vaddress;
7880                 qemu_log_mask(CPU_LOG_INT,
7881                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
7882                               env->v7m.bfar);
7883                 break;
7884             }
7885             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
7886             break;
7887         default:
7888             /* All other FSR values are either MPU faults or "can't happen
7889              * for M profile" cases.
7890              */
7891             switch (cs->exception_index) {
7892             case EXCP_PREFETCH_ABORT:
7893                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
7894                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
7895                 break;
7896             case EXCP_DATA_ABORT:
7897                 env->v7m.cfsr[env->v7m.secure] |=
7898                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
7899                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
7900                 qemu_log_mask(CPU_LOG_INT,
7901                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
7902                               env->v7m.mmfar[env->v7m.secure]);
7903                 break;
7904             }
7905             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
7906                                     env->v7m.secure);
7907             break;
7908         }
7909         break;
7910     case EXCP_BKPT:
7911         if (semihosting_enabled()) {
7912             int nr;
7913             nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
7914             if (nr == 0xab) {
7915                 env->regs[15] += 2;
7916                 qemu_log_mask(CPU_LOG_INT,
7917                               "...handling as semihosting call 0x%x\n",
7918                               env->regs[0]);
7919                 env->regs[0] = do_arm_semihosting(env);
7920                 return;
7921             }
7922         }
7923         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
7924         break;
7925     case EXCP_IRQ:
7926         break;
7927     case EXCP_EXCEPTION_EXIT:
7928         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
7929             /* Must be v8M security extension function return */
7930             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
7931             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
7932             if (do_v7m_function_return(cpu)) {
7933                 return;
7934             }
7935         } else {
7936             do_v7m_exception_exit(cpu);
7937             return;
7938         }
7939         break;
7940     default:
7941         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
7942         return; /* Never happens.  Keep compiler happy.  */
7943     }
7944 
7945     if (arm_feature(env, ARM_FEATURE_V8)) {
7946         lr = R_V7M_EXCRET_RES1_MASK |
7947             R_V7M_EXCRET_DCRS_MASK |
7948             R_V7M_EXCRET_FTYPE_MASK;
7949         /* The S bit indicates whether we should return to Secure
7950          * or NonSecure (ie our current state).
7951          * The ES bit indicates whether we're taking this exception
7952          * to Secure or NonSecure (ie our target state). We set it
7953          * later, in v7m_exception_taken().
7954          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
7955          * This corresponds to the ARM ARM pseudocode for v8M setting
7956          * some LR bits in PushStack() and some in ExceptionTaken();
7957          * the distinction matters for the tailchain cases where we
7958          * can take an exception without pushing the stack.
7959          */
7960         if (env->v7m.secure) {
7961             lr |= R_V7M_EXCRET_S_MASK;
7962         }
7963     } else {
7964         lr = R_V7M_EXCRET_RES1_MASK |
7965             R_V7M_EXCRET_S_MASK |
7966             R_V7M_EXCRET_DCRS_MASK |
7967             R_V7M_EXCRET_FTYPE_MASK |
7968             R_V7M_EXCRET_ES_MASK;
7969         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
7970             lr |= R_V7M_EXCRET_SPSEL_MASK;
7971         }
7972     }
7973     if (!arm_v7m_is_handler_mode(env)) {
7974         lr |= R_V7M_EXCRET_MODE_MASK;
7975     }
7976 
7977     ignore_stackfaults = v7m_push_stack(cpu);
7978     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
7979 }
7980 
7981 /* Function used to synchronize QEMU's AArch64 register set with AArch32
7982  * register set.  This is necessary when switching between AArch32 and AArch64
7983  * execution state.
7984  */
7985 void aarch64_sync_32_to_64(CPUARMState *env)
7986 {
7987     int i;
7988     uint32_t mode = env->uncached_cpsr & CPSR_M;
7989 
7990     /* We can blanket copy R[0:7] to X[0:7] */
7991     for (i = 0; i < 8; i++) {
7992         env->xregs[i] = env->regs[i];
7993     }
7994 
7995     /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
7996      * Otherwise, they come from the banked user regs.
7997      */
7998     if (mode == ARM_CPU_MODE_FIQ) {
7999         for (i = 8; i < 13; i++) {
8000             env->xregs[i] = env->usr_regs[i - 8];
8001         }
8002     } else {
8003         for (i = 8; i < 13; i++) {
8004             env->xregs[i] = env->regs[i];
8005         }
8006     }
8007 
8008     /* Registers x13-x23 are the various mode SP and FP registers. Registers
8009      * r13 and r14 are only copied if we are in that mode, otherwise we copy
8010      * from the mode banked register.
8011      */
8012     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8013         env->xregs[13] = env->regs[13];
8014         env->xregs[14] = env->regs[14];
8015     } else {
8016         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
8017         /* HYP is an exception in that it is copied from r14 */
8018         if (mode == ARM_CPU_MODE_HYP) {
8019             env->xregs[14] = env->regs[14];
8020         } else {
8021             env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
8022         }
8023     }
8024 
8025     if (mode == ARM_CPU_MODE_HYP) {
8026         env->xregs[15] = env->regs[13];
8027     } else {
8028         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
8029     }
8030 
8031     if (mode == ARM_CPU_MODE_IRQ) {
8032         env->xregs[16] = env->regs[14];
8033         env->xregs[17] = env->regs[13];
8034     } else {
8035         env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
8036         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
8037     }
8038 
8039     if (mode == ARM_CPU_MODE_SVC) {
8040         env->xregs[18] = env->regs[14];
8041         env->xregs[19] = env->regs[13];
8042     } else {
8043         env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
8044         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
8045     }
8046 
8047     if (mode == ARM_CPU_MODE_ABT) {
8048         env->xregs[20] = env->regs[14];
8049         env->xregs[21] = env->regs[13];
8050     } else {
8051         env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
8052         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
8053     }
8054 
8055     if (mode == ARM_CPU_MODE_UND) {
8056         env->xregs[22] = env->regs[14];
8057         env->xregs[23] = env->regs[13];
8058     } else {
8059         env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
8060         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
8061     }
8062 
8063     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
8064      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
8065      * FIQ bank for r8-r14.
8066      */
8067     if (mode == ARM_CPU_MODE_FIQ) {
8068         for (i = 24; i < 31; i++) {
8069             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
8070         }
8071     } else {
8072         for (i = 24; i < 29; i++) {
8073             env->xregs[i] = env->fiq_regs[i - 24];
8074         }
8075         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
8076         env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
8077     }
8078 
8079     env->pc = env->regs[15];
8080 }
8081 
8082 /* Function used to synchronize QEMU's AArch32 register set with AArch64
8083  * register set.  This is necessary when switching between AArch32 and AArch64
8084  * execution state.
8085  */
8086 void aarch64_sync_64_to_32(CPUARMState *env)
8087 {
8088     int i;
8089     uint32_t mode = env->uncached_cpsr & CPSR_M;
8090 
8091     /* We can blanket copy X[0:7] to R[0:7] */
8092     for (i = 0; i < 8; i++) {
8093         env->regs[i] = env->xregs[i];
8094     }
8095 
8096     /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
8097      * Otherwise, we copy x8-x12 into the banked user regs.
8098      */
8099     if (mode == ARM_CPU_MODE_FIQ) {
8100         for (i = 8; i < 13; i++) {
8101             env->usr_regs[i - 8] = env->xregs[i];
8102         }
8103     } else {
8104         for (i = 8; i < 13; i++) {
8105             env->regs[i] = env->xregs[i];
8106         }
8107     }
8108 
8109     /* Registers r13 & r14 depend on the current mode.
8110      * If we are in a given mode, we copy the corresponding x registers to r13
8111      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
8112      * for the mode.
8113      */
8114     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
8115         env->regs[13] = env->xregs[13];
8116         env->regs[14] = env->xregs[14];
8117     } else {
8118         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
8119 
8120         /* HYP is an exception in that it does not have its own banked r14 but
8121          * shares the USR r14
8122          */
8123         if (mode == ARM_CPU_MODE_HYP) {
8124             env->regs[14] = env->xregs[14];
8125         } else {
8126             env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
8127         }
8128     }
8129 
8130     if (mode == ARM_CPU_MODE_HYP) {
8131         env->regs[13] = env->xregs[15];
8132     } else {
8133         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
8134     }
8135 
8136     if (mode == ARM_CPU_MODE_IRQ) {
8137         env->regs[14] = env->xregs[16];
8138         env->regs[13] = env->xregs[17];
8139     } else {
8140         env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
8141         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
8142     }
8143 
8144     if (mode == ARM_CPU_MODE_SVC) {
8145         env->regs[14] = env->xregs[18];
8146         env->regs[13] = env->xregs[19];
8147     } else {
8148         env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
8149         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
8150     }
8151 
8152     if (mode == ARM_CPU_MODE_ABT) {
8153         env->regs[14] = env->xregs[20];
8154         env->regs[13] = env->xregs[21];
8155     } else {
8156         env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
8157         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
8158     }
8159 
8160     if (mode == ARM_CPU_MODE_UND) {
8161         env->regs[14] = env->xregs[22];
8162         env->regs[13] = env->xregs[23];
8163     } else {
8164         env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
8165         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
8166     }
8167 
8168     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
8169      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
8170      * FIQ bank for r8-r14.
8171      */
8172     if (mode == ARM_CPU_MODE_FIQ) {
8173         for (i = 24; i < 31; i++) {
8174             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
8175         }
8176     } else {
8177         for (i = 24; i < 29; i++) {
8178             env->fiq_regs[i - 24] = env->xregs[i];
8179         }
8180         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
8181         env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
8182     }
8183 
8184     env->regs[15] = env->pc;
8185 }
8186 
8187 static void take_aarch32_exception(CPUARMState *env, int new_mode,
8188                                    uint32_t mask, uint32_t offset,
8189                                    uint32_t newpc)
8190 {
8191     /* Change the CPU state so as to actually take the exception. */
8192     switch_mode(env, new_mode);
8193     /*
8194      * For exceptions taken to AArch32 we must clear the SS bit in both
8195      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
8196      */
8197     env->uncached_cpsr &= ~PSTATE_SS;
8198     env->spsr = cpsr_read(env);
8199     /* Clear IT bits.  */
8200     env->condexec_bits = 0;
8201     /* Switch to the new mode, and to the correct instruction set.  */
8202     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
8203     /* Set new mode endianness */
8204     env->uncached_cpsr &= ~CPSR_E;
8205     if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
8206         env->uncached_cpsr |= CPSR_E;
8207     }
8208     /* J and IL must always be cleared for exception entry */
8209     env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
8210     env->daif |= mask;
8211 
8212     if (new_mode == ARM_CPU_MODE_HYP) {
8213         env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
8214         env->elr_el[2] = env->regs[15];
8215     } else {
8216         /*
8217          * this is a lie, as there was no c1_sys on V4T/V5, but who cares
8218          * and we should just guard the thumb mode on V4
8219          */
8220         if (arm_feature(env, ARM_FEATURE_V4T)) {
8221             env->thumb =
8222                 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
8223         }
8224         env->regs[14] = env->regs[15] + offset;
8225     }
8226     env->regs[15] = newpc;
8227 }
8228 
8229 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
8230 {
8231     /*
8232      * Handle exception entry to Hyp mode; this is sufficiently
8233      * different to entry to other AArch32 modes that we handle it
8234      * separately here.
8235      *
8236      * The vector table entry used is always the 0x14 Hyp mode entry point,
8237      * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
8238      * The offset applied to the preferred return address is always zero
8239      * (see DDI0487C.a section G1.12.3).
8240      * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
8241      */
8242     uint32_t addr, mask;
8243     ARMCPU *cpu = ARM_CPU(cs);
8244     CPUARMState *env = &cpu->env;
8245 
8246     switch (cs->exception_index) {
8247     case EXCP_UDEF:
8248         addr = 0x04;
8249         break;
8250     case EXCP_SWI:
8251         addr = 0x14;
8252         break;
8253     case EXCP_BKPT:
8254         /* Fall through to prefetch abort.  */
8255     case EXCP_PREFETCH_ABORT:
8256         env->cp15.ifar_s = env->exception.vaddress;
8257         qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
8258                       (uint32_t)env->exception.vaddress);
8259         addr = 0x0c;
8260         break;
8261     case EXCP_DATA_ABORT:
8262         env->cp15.dfar_s = env->exception.vaddress;
8263         qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
8264                       (uint32_t)env->exception.vaddress);
8265         addr = 0x10;
8266         break;
8267     case EXCP_IRQ:
8268         addr = 0x18;
8269         break;
8270     case EXCP_FIQ:
8271         addr = 0x1c;
8272         break;
8273     case EXCP_HVC:
8274         addr = 0x08;
8275         break;
8276     case EXCP_HYP_TRAP:
8277         addr = 0x14;
8278     default:
8279         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8280     }
8281 
8282     if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
8283         if (!arm_feature(env, ARM_FEATURE_V8)) {
8284             /*
8285              * QEMU syndrome values are v8-style. v7 has the IL bit
8286              * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
8287              * If this is a v7 CPU, squash the IL bit in those cases.
8288              */
8289             if (cs->exception_index == EXCP_PREFETCH_ABORT ||
8290                 (cs->exception_index == EXCP_DATA_ABORT &&
8291                  !(env->exception.syndrome & ARM_EL_ISV)) ||
8292                 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
8293                 env->exception.syndrome &= ~ARM_EL_IL;
8294             }
8295         }
8296         env->cp15.esr_el[2] = env->exception.syndrome;
8297     }
8298 
8299     if (arm_current_el(env) != 2 && addr < 0x14) {
8300         addr = 0x14;
8301     }
8302 
8303     mask = 0;
8304     if (!(env->cp15.scr_el3 & SCR_EA)) {
8305         mask |= CPSR_A;
8306     }
8307     if (!(env->cp15.scr_el3 & SCR_IRQ)) {
8308         mask |= CPSR_I;
8309     }
8310     if (!(env->cp15.scr_el3 & SCR_FIQ)) {
8311         mask |= CPSR_F;
8312     }
8313 
8314     addr += env->cp15.hvbar;
8315 
8316     take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
8317 }
8318 
8319 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
8320 {
8321     ARMCPU *cpu = ARM_CPU(cs);
8322     CPUARMState *env = &cpu->env;
8323     uint32_t addr;
8324     uint32_t mask;
8325     int new_mode;
8326     uint32_t offset;
8327     uint32_t moe;
8328 
8329     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8330     switch (syn_get_ec(env->exception.syndrome)) {
8331     case EC_BREAKPOINT:
8332     case EC_BREAKPOINT_SAME_EL:
8333         moe = 1;
8334         break;
8335     case EC_WATCHPOINT:
8336     case EC_WATCHPOINT_SAME_EL:
8337         moe = 10;
8338         break;
8339     case EC_AA32_BKPT:
8340         moe = 3;
8341         break;
8342     case EC_VECTORCATCH:
8343         moe = 5;
8344         break;
8345     default:
8346         moe = 0;
8347         break;
8348     }
8349 
8350     if (moe) {
8351         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8352     }
8353 
8354     if (env->exception.target_el == 2) {
8355         arm_cpu_do_interrupt_aarch32_hyp(cs);
8356         return;
8357     }
8358 
8359     switch (cs->exception_index) {
8360     case EXCP_UDEF:
8361         new_mode = ARM_CPU_MODE_UND;
8362         addr = 0x04;
8363         mask = CPSR_I;
8364         if (env->thumb)
8365             offset = 2;
8366         else
8367             offset = 4;
8368         break;
8369     case EXCP_SWI:
8370         new_mode = ARM_CPU_MODE_SVC;
8371         addr = 0x08;
8372         mask = CPSR_I;
8373         /* The PC already points to the next instruction.  */
8374         offset = 0;
8375         break;
8376     case EXCP_BKPT:
8377         /* Fall through to prefetch abort.  */
8378     case EXCP_PREFETCH_ABORT:
8379         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
8380         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
8381         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
8382                       env->exception.fsr, (uint32_t)env->exception.vaddress);
8383         new_mode = ARM_CPU_MODE_ABT;
8384         addr = 0x0c;
8385         mask = CPSR_A | CPSR_I;
8386         offset = 4;
8387         break;
8388     case EXCP_DATA_ABORT:
8389         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8390         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8391         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8392                       env->exception.fsr,
8393                       (uint32_t)env->exception.vaddress);
8394         new_mode = ARM_CPU_MODE_ABT;
8395         addr = 0x10;
8396         mask = CPSR_A | CPSR_I;
8397         offset = 8;
8398         break;
8399     case EXCP_IRQ:
8400         new_mode = ARM_CPU_MODE_IRQ;
8401         addr = 0x18;
8402         /* Disable IRQ and imprecise data aborts.  */
8403         mask = CPSR_A | CPSR_I;
8404         offset = 4;
8405         if (env->cp15.scr_el3 & SCR_IRQ) {
8406             /* IRQ routed to monitor mode */
8407             new_mode = ARM_CPU_MODE_MON;
8408             mask |= CPSR_F;
8409         }
8410         break;
8411     case EXCP_FIQ:
8412         new_mode = ARM_CPU_MODE_FIQ;
8413         addr = 0x1c;
8414         /* Disable FIQ, IRQ and imprecise data aborts.  */
8415         mask = CPSR_A | CPSR_I | CPSR_F;
8416         if (env->cp15.scr_el3 & SCR_FIQ) {
8417             /* FIQ routed to monitor mode */
8418             new_mode = ARM_CPU_MODE_MON;
8419         }
8420         offset = 4;
8421         break;
8422     case EXCP_VIRQ:
8423         new_mode = ARM_CPU_MODE_IRQ;
8424         addr = 0x18;
8425         /* Disable IRQ and imprecise data aborts.  */
8426         mask = CPSR_A | CPSR_I;
8427         offset = 4;
8428         break;
8429     case EXCP_VFIQ:
8430         new_mode = ARM_CPU_MODE_FIQ;
8431         addr = 0x1c;
8432         /* Disable FIQ, IRQ and imprecise data aborts.  */
8433         mask = CPSR_A | CPSR_I | CPSR_F;
8434         offset = 4;
8435         break;
8436     case EXCP_SMC:
8437         new_mode = ARM_CPU_MODE_MON;
8438         addr = 0x08;
8439         mask = CPSR_A | CPSR_I | CPSR_F;
8440         offset = 0;
8441         break;
8442     default:
8443         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8444         return; /* Never happens.  Keep compiler happy.  */
8445     }
8446 
8447     if (new_mode == ARM_CPU_MODE_MON) {
8448         addr += env->cp15.mvbar;
8449     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
8450         /* High vectors. When enabled, base address cannot be remapped. */
8451         addr += 0xffff0000;
8452     } else {
8453         /* ARM v7 architectures provide a vector base address register to remap
8454          * the interrupt vector table.
8455          * This register is only followed in non-monitor mode, and is banked.
8456          * Note: only bits 31:5 are valid.
8457          */
8458         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
8459     }
8460 
8461     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
8462         env->cp15.scr_el3 &= ~SCR_NS;
8463     }
8464 
8465     take_aarch32_exception(env, new_mode, mask, offset, addr);
8466 }
8467 
8468 /* Handle exception entry to a target EL which is using AArch64 */
8469 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
8470 {
8471     ARMCPU *cpu = ARM_CPU(cs);
8472     CPUARMState *env = &cpu->env;
8473     unsigned int new_el = env->exception.target_el;
8474     target_ulong addr = env->cp15.vbar_el[new_el];
8475     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
8476     unsigned int cur_el = arm_current_el(env);
8477 
8478     /*
8479      * Note that new_el can never be 0.  If cur_el is 0, then
8480      * el0_a64 is is_a64(), else el0_a64 is ignored.
8481      */
8482     aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
8483 
8484     if (cur_el < new_el) {
8485         /* Entry vector offset depends on whether the implemented EL
8486          * immediately lower than the target level is using AArch32 or AArch64
8487          */
8488         bool is_aa64;
8489 
8490         switch (new_el) {
8491         case 3:
8492             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
8493             break;
8494         case 2:
8495             is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
8496             break;
8497         case 1:
8498             is_aa64 = is_a64(env);
8499             break;
8500         default:
8501             g_assert_not_reached();
8502         }
8503 
8504         if (is_aa64) {
8505             addr += 0x400;
8506         } else {
8507             addr += 0x600;
8508         }
8509     } else if (pstate_read(env) & PSTATE_SP) {
8510         addr += 0x200;
8511     }
8512 
8513     switch (cs->exception_index) {
8514     case EXCP_PREFETCH_ABORT:
8515     case EXCP_DATA_ABORT:
8516         env->cp15.far_el[new_el] = env->exception.vaddress;
8517         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
8518                       env->cp15.far_el[new_el]);
8519         /* fall through */
8520     case EXCP_BKPT:
8521     case EXCP_UDEF:
8522     case EXCP_SWI:
8523     case EXCP_HVC:
8524     case EXCP_HYP_TRAP:
8525     case EXCP_SMC:
8526         if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
8527             /*
8528              * QEMU internal FP/SIMD syndromes from AArch32 include the
8529              * TA and coproc fields which are only exposed if the exception
8530              * is taken to AArch32 Hyp mode. Mask them out to get a valid
8531              * AArch64 format syndrome.
8532              */
8533             env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
8534         }
8535         env->cp15.esr_el[new_el] = env->exception.syndrome;
8536         break;
8537     case EXCP_IRQ:
8538     case EXCP_VIRQ:
8539         addr += 0x80;
8540         break;
8541     case EXCP_FIQ:
8542     case EXCP_VFIQ:
8543         addr += 0x100;
8544         break;
8545     case EXCP_SEMIHOST:
8546         qemu_log_mask(CPU_LOG_INT,
8547                       "...handling as semihosting call 0x%" PRIx64 "\n",
8548                       env->xregs[0]);
8549         env->xregs[0] = do_arm_semihosting(env);
8550         return;
8551     default:
8552         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8553     }
8554 
8555     if (is_a64(env)) {
8556         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
8557         aarch64_save_sp(env, arm_current_el(env));
8558         env->elr_el[new_el] = env->pc;
8559     } else {
8560         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
8561         env->elr_el[new_el] = env->regs[15];
8562 
8563         aarch64_sync_32_to_64(env);
8564 
8565         env->condexec_bits = 0;
8566     }
8567     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
8568                   env->elr_el[new_el]);
8569 
8570     pstate_write(env, PSTATE_DAIF | new_mode);
8571     env->aarch64 = 1;
8572     aarch64_restore_sp(env, new_el);
8573 
8574     env->pc = addr;
8575 
8576     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
8577                   new_el, env->pc, pstate_read(env));
8578 }
8579 
8580 static inline bool check_for_semihosting(CPUState *cs)
8581 {
8582     /* Check whether this exception is a semihosting call; if so
8583      * then handle it and return true; otherwise return false.
8584      */
8585     ARMCPU *cpu = ARM_CPU(cs);
8586     CPUARMState *env = &cpu->env;
8587 
8588     if (is_a64(env)) {
8589         if (cs->exception_index == EXCP_SEMIHOST) {
8590             /* This is always the 64-bit semihosting exception.
8591              * The "is this usermode" and "is semihosting enabled"
8592              * checks have been done at translate time.
8593              */
8594             qemu_log_mask(CPU_LOG_INT,
8595                           "...handling as semihosting call 0x%" PRIx64 "\n",
8596                           env->xregs[0]);
8597             env->xregs[0] = do_arm_semihosting(env);
8598             return true;
8599         }
8600         return false;
8601     } else {
8602         uint32_t imm;
8603 
8604         /* Only intercept calls from privileged modes, to provide some
8605          * semblance of security.
8606          */
8607         if (cs->exception_index != EXCP_SEMIHOST &&
8608             (!semihosting_enabled() ||
8609              ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
8610             return false;
8611         }
8612 
8613         switch (cs->exception_index) {
8614         case EXCP_SEMIHOST:
8615             /* This is always a semihosting call; the "is this usermode"
8616              * and "is semihosting enabled" checks have been done at
8617              * translate time.
8618              */
8619             break;
8620         case EXCP_SWI:
8621             /* Check for semihosting interrupt.  */
8622             if (env->thumb) {
8623                 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
8624                     & 0xff;
8625                 if (imm == 0xab) {
8626                     break;
8627                 }
8628             } else {
8629                 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
8630                     & 0xffffff;
8631                 if (imm == 0x123456) {
8632                     break;
8633                 }
8634             }
8635             return false;
8636         case EXCP_BKPT:
8637             /* See if this is a semihosting syscall.  */
8638             if (env->thumb) {
8639                 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
8640                     & 0xff;
8641                 if (imm == 0xab) {
8642                     env->regs[15] += 2;
8643                     break;
8644                 }
8645             }
8646             return false;
8647         default:
8648             return false;
8649         }
8650 
8651         qemu_log_mask(CPU_LOG_INT,
8652                       "...handling as semihosting call 0x%x\n",
8653                       env->regs[0]);
8654         env->regs[0] = do_arm_semihosting(env);
8655         return true;
8656     }
8657 }
8658 
8659 /* Handle a CPU exception for A and R profile CPUs.
8660  * Do any appropriate logging, handle PSCI calls, and then hand off
8661  * to the AArch64-entry or AArch32-entry function depending on the
8662  * target exception level's register width.
8663  */
8664 void arm_cpu_do_interrupt(CPUState *cs)
8665 {
8666     ARMCPU *cpu = ARM_CPU(cs);
8667     CPUARMState *env = &cpu->env;
8668     unsigned int new_el = env->exception.target_el;
8669 
8670     assert(!arm_feature(env, ARM_FEATURE_M));
8671 
8672     arm_log_exception(cs->exception_index);
8673     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
8674                   new_el);
8675     if (qemu_loglevel_mask(CPU_LOG_INT)
8676         && !excp_is_internal(cs->exception_index)) {
8677         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
8678                       syn_get_ec(env->exception.syndrome),
8679                       env->exception.syndrome);
8680     }
8681 
8682     if (arm_is_psci_call(cpu, cs->exception_index)) {
8683         arm_handle_psci_call(cpu);
8684         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
8685         return;
8686     }
8687 
8688     /* Semihosting semantics depend on the register width of the
8689      * code that caused the exception, not the target exception level,
8690      * so must be handled here.
8691      */
8692     if (check_for_semihosting(cs)) {
8693         return;
8694     }
8695 
8696     /* Hooks may change global state so BQL should be held, also the
8697      * BQL needs to be held for any modification of
8698      * cs->interrupt_request.
8699      */
8700     g_assert(qemu_mutex_iothread_locked());
8701 
8702     arm_call_pre_el_change_hook(cpu);
8703 
8704     assert(!excp_is_internal(cs->exception_index));
8705     if (arm_el_is_aa64(env, new_el)) {
8706         arm_cpu_do_interrupt_aarch64(cs);
8707     } else {
8708         arm_cpu_do_interrupt_aarch32(cs);
8709     }
8710 
8711     arm_call_el_change_hook(cpu);
8712 
8713     if (!kvm_enabled()) {
8714         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
8715     }
8716 }
8717 
8718 /* Return the exception level which controls this address translation regime */
8719 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
8720 {
8721     switch (mmu_idx) {
8722     case ARMMMUIdx_S2NS:
8723     case ARMMMUIdx_S1E2:
8724         return 2;
8725     case ARMMMUIdx_S1E3:
8726         return 3;
8727     case ARMMMUIdx_S1SE0:
8728         return arm_el_is_aa64(env, 3) ? 1 : 3;
8729     case ARMMMUIdx_S1SE1:
8730     case ARMMMUIdx_S1NSE0:
8731     case ARMMMUIdx_S1NSE1:
8732     case ARMMMUIdx_MPrivNegPri:
8733     case ARMMMUIdx_MUserNegPri:
8734     case ARMMMUIdx_MPriv:
8735     case ARMMMUIdx_MUser:
8736     case ARMMMUIdx_MSPrivNegPri:
8737     case ARMMMUIdx_MSUserNegPri:
8738     case ARMMMUIdx_MSPriv:
8739     case ARMMMUIdx_MSUser:
8740         return 1;
8741     default:
8742         g_assert_not_reached();
8743     }
8744 }
8745 
8746 /* Return the SCTLR value which controls this address translation regime */
8747 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
8748 {
8749     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
8750 }
8751 
8752 /* Return true if the specified stage of address translation is disabled */
8753 static inline bool regime_translation_disabled(CPUARMState *env,
8754                                                ARMMMUIdx mmu_idx)
8755 {
8756     if (arm_feature(env, ARM_FEATURE_M)) {
8757         switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
8758                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
8759         case R_V7M_MPU_CTRL_ENABLE_MASK:
8760             /* Enabled, but not for HardFault and NMI */
8761             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
8762         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
8763             /* Enabled for all cases */
8764             return false;
8765         case 0:
8766         default:
8767             /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
8768              * we warned about that in armv7m_nvic.c when the guest set it.
8769              */
8770             return true;
8771         }
8772     }
8773 
8774     if (mmu_idx == ARMMMUIdx_S2NS) {
8775         /* HCR.DC means HCR.VM behaves as 1 */
8776         return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
8777     }
8778 
8779     if (env->cp15.hcr_el2 & HCR_TGE) {
8780         /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
8781         if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
8782             return true;
8783         }
8784     }
8785 
8786     if ((env->cp15.hcr_el2 & HCR_DC) &&
8787         (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
8788         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
8789         return true;
8790     }
8791 
8792     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
8793 }
8794 
8795 static inline bool regime_translation_big_endian(CPUARMState *env,
8796                                                  ARMMMUIdx mmu_idx)
8797 {
8798     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
8799 }
8800 
8801 /* Return the TCR controlling this translation regime */
8802 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
8803 {
8804     if (mmu_idx == ARMMMUIdx_S2NS) {
8805         return &env->cp15.vtcr_el2;
8806     }
8807     return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
8808 }
8809 
8810 /* Convert a possible stage1+2 MMU index into the appropriate
8811  * stage 1 MMU index
8812  */
8813 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
8814 {
8815     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8816         mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
8817     }
8818     return mmu_idx;
8819 }
8820 
8821 /* Returns TBI0 value for current regime el */
8822 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
8823 {
8824     TCR *tcr;
8825     uint32_t el;
8826 
8827     /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8828      * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8829      */
8830     mmu_idx = stage_1_mmu_idx(mmu_idx);
8831 
8832     tcr = regime_tcr(env, mmu_idx);
8833     el = regime_el(env, mmu_idx);
8834 
8835     if (el > 1) {
8836         return extract64(tcr->raw_tcr, 20, 1);
8837     } else {
8838         return extract64(tcr->raw_tcr, 37, 1);
8839     }
8840 }
8841 
8842 /* Returns TBI1 value for current regime el */
8843 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
8844 {
8845     TCR *tcr;
8846     uint32_t el;
8847 
8848     /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
8849      * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
8850      */
8851     mmu_idx = stage_1_mmu_idx(mmu_idx);
8852 
8853     tcr = regime_tcr(env, mmu_idx);
8854     el = regime_el(env, mmu_idx);
8855 
8856     if (el > 1) {
8857         return 0;
8858     } else {
8859         return extract64(tcr->raw_tcr, 38, 1);
8860     }
8861 }
8862 
8863 /* Return the TTBR associated with this translation regime */
8864 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
8865                                    int ttbrn)
8866 {
8867     if (mmu_idx == ARMMMUIdx_S2NS) {
8868         return env->cp15.vttbr_el2;
8869     }
8870     if (ttbrn == 0) {
8871         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
8872     } else {
8873         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
8874     }
8875 }
8876 
8877 /* Return true if the translation regime is using LPAE format page tables */
8878 static inline bool regime_using_lpae_format(CPUARMState *env,
8879                                             ARMMMUIdx mmu_idx)
8880 {
8881     int el = regime_el(env, mmu_idx);
8882     if (el == 2 || arm_el_is_aa64(env, el)) {
8883         return true;
8884     }
8885     if (arm_feature(env, ARM_FEATURE_LPAE)
8886         && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
8887         return true;
8888     }
8889     return false;
8890 }
8891 
8892 /* Returns true if the stage 1 translation regime is using LPAE format page
8893  * tables. Used when raising alignment exceptions, whose FSR changes depending
8894  * on whether the long or short descriptor format is in use. */
8895 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
8896 {
8897     mmu_idx = stage_1_mmu_idx(mmu_idx);
8898 
8899     return regime_using_lpae_format(env, mmu_idx);
8900 }
8901 
8902 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
8903 {
8904     switch (mmu_idx) {
8905     case ARMMMUIdx_S1SE0:
8906     case ARMMMUIdx_S1NSE0:
8907     case ARMMMUIdx_MUser:
8908     case ARMMMUIdx_MSUser:
8909     case ARMMMUIdx_MUserNegPri:
8910     case ARMMMUIdx_MSUserNegPri:
8911         return true;
8912     default:
8913         return false;
8914     case ARMMMUIdx_S12NSE0:
8915     case ARMMMUIdx_S12NSE1:
8916         g_assert_not_reached();
8917     }
8918 }
8919 
8920 /* Translate section/page access permissions to page
8921  * R/W protection flags
8922  *
8923  * @env:         CPUARMState
8924  * @mmu_idx:     MMU index indicating required translation regime
8925  * @ap:          The 3-bit access permissions (AP[2:0])
8926  * @domain_prot: The 2-bit domain access permissions
8927  */
8928 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
8929                                 int ap, int domain_prot)
8930 {
8931     bool is_user = regime_is_user(env, mmu_idx);
8932 
8933     if (domain_prot == 3) {
8934         return PAGE_READ | PAGE_WRITE;
8935     }
8936 
8937     switch (ap) {
8938     case 0:
8939         if (arm_feature(env, ARM_FEATURE_V7)) {
8940             return 0;
8941         }
8942         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
8943         case SCTLR_S:
8944             return is_user ? 0 : PAGE_READ;
8945         case SCTLR_R:
8946             return PAGE_READ;
8947         default:
8948             return 0;
8949         }
8950     case 1:
8951         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8952     case 2:
8953         if (is_user) {
8954             return PAGE_READ;
8955         } else {
8956             return PAGE_READ | PAGE_WRITE;
8957         }
8958     case 3:
8959         return PAGE_READ | PAGE_WRITE;
8960     case 4: /* Reserved.  */
8961         return 0;
8962     case 5:
8963         return is_user ? 0 : PAGE_READ;
8964     case 6:
8965         return PAGE_READ;
8966     case 7:
8967         if (!arm_feature(env, ARM_FEATURE_V6K)) {
8968             return 0;
8969         }
8970         return PAGE_READ;
8971     default:
8972         g_assert_not_reached();
8973     }
8974 }
8975 
8976 /* Translate section/page access permissions to page
8977  * R/W protection flags.
8978  *
8979  * @ap:      The 2-bit simple AP (AP[2:1])
8980  * @is_user: TRUE if accessing from PL0
8981  */
8982 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
8983 {
8984     switch (ap) {
8985     case 0:
8986         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8987     case 1:
8988         return PAGE_READ | PAGE_WRITE;
8989     case 2:
8990         return is_user ? 0 : PAGE_READ;
8991     case 3:
8992         return PAGE_READ;
8993     default:
8994         g_assert_not_reached();
8995     }
8996 }
8997 
8998 static inline int
8999 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
9000 {
9001     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
9002 }
9003 
9004 /* Translate S2 section/page access permissions to protection flags
9005  *
9006  * @env:     CPUARMState
9007  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
9008  * @xn:      XN (execute-never) bit
9009  */
9010 static int get_S2prot(CPUARMState *env, int s2ap, int xn)
9011 {
9012     int prot = 0;
9013 
9014     if (s2ap & 1) {
9015         prot |= PAGE_READ;
9016     }
9017     if (s2ap & 2) {
9018         prot |= PAGE_WRITE;
9019     }
9020     if (!xn) {
9021         if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
9022             prot |= PAGE_EXEC;
9023         }
9024     }
9025     return prot;
9026 }
9027 
9028 /* Translate section/page access permissions to protection flags
9029  *
9030  * @env:     CPUARMState
9031  * @mmu_idx: MMU index indicating required translation regime
9032  * @is_aa64: TRUE if AArch64
9033  * @ap:      The 2-bit simple AP (AP[2:1])
9034  * @ns:      NS (non-secure) bit
9035  * @xn:      XN (execute-never) bit
9036  * @pxn:     PXN (privileged execute-never) bit
9037  */
9038 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
9039                       int ap, int ns, int xn, int pxn)
9040 {
9041     bool is_user = regime_is_user(env, mmu_idx);
9042     int prot_rw, user_rw;
9043     bool have_wxn;
9044     int wxn = 0;
9045 
9046     assert(mmu_idx != ARMMMUIdx_S2NS);
9047 
9048     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
9049     if (is_user) {
9050         prot_rw = user_rw;
9051     } else {
9052         prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
9053     }
9054 
9055     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
9056         return prot_rw;
9057     }
9058 
9059     /* TODO have_wxn should be replaced with
9060      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
9061      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
9062      * compatible processors have EL2, which is required for [U]WXN.
9063      */
9064     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
9065 
9066     if (have_wxn) {
9067         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
9068     }
9069 
9070     if (is_aa64) {
9071         switch (regime_el(env, mmu_idx)) {
9072         case 1:
9073             if (!is_user) {
9074                 xn = pxn || (user_rw & PAGE_WRITE);
9075             }
9076             break;
9077         case 2:
9078         case 3:
9079             break;
9080         }
9081     } else if (arm_feature(env, ARM_FEATURE_V7)) {
9082         switch (regime_el(env, mmu_idx)) {
9083         case 1:
9084         case 3:
9085             if (is_user) {
9086                 xn = xn || !(user_rw & PAGE_READ);
9087             } else {
9088                 int uwxn = 0;
9089                 if (have_wxn) {
9090                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
9091                 }
9092                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
9093                      (uwxn && (user_rw & PAGE_WRITE));
9094             }
9095             break;
9096         case 2:
9097             break;
9098         }
9099     } else {
9100         xn = wxn = 0;
9101     }
9102 
9103     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
9104         return prot_rw;
9105     }
9106     return prot_rw | PAGE_EXEC;
9107 }
9108 
9109 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
9110                                      uint32_t *table, uint32_t address)
9111 {
9112     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
9113     TCR *tcr = regime_tcr(env, mmu_idx);
9114 
9115     if (address & tcr->mask) {
9116         if (tcr->raw_tcr & TTBCR_PD1) {
9117             /* Translation table walk disabled for TTBR1 */
9118             return false;
9119         }
9120         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
9121     } else {
9122         if (tcr->raw_tcr & TTBCR_PD0) {
9123             /* Translation table walk disabled for TTBR0 */
9124             return false;
9125         }
9126         *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
9127     }
9128     *table |= (address >> 18) & 0x3ffc;
9129     return true;
9130 }
9131 
9132 /* Translate a S1 pagetable walk through S2 if needed.  */
9133 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
9134                                hwaddr addr, MemTxAttrs txattrs,
9135                                ARMMMUFaultInfo *fi)
9136 {
9137     if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
9138         !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
9139         target_ulong s2size;
9140         hwaddr s2pa;
9141         int s2prot;
9142         int ret;
9143         ARMCacheAttrs cacheattrs = {};
9144         ARMCacheAttrs *pcacheattrs = NULL;
9145 
9146         if (env->cp15.hcr_el2 & HCR_PTW) {
9147             /*
9148              * PTW means we must fault if this S1 walk touches S2 Device
9149              * memory; otherwise we don't care about the attributes and can
9150              * save the S2 translation the effort of computing them.
9151              */
9152             pcacheattrs = &cacheattrs;
9153         }
9154 
9155         ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
9156                                  &txattrs, &s2prot, &s2size, fi, pcacheattrs);
9157         if (ret) {
9158             assert(fi->type != ARMFault_None);
9159             fi->s2addr = addr;
9160             fi->stage2 = true;
9161             fi->s1ptw = true;
9162             return ~0;
9163         }
9164         if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
9165             /* Access was to Device memory: generate Permission fault */
9166             fi->type = ARMFault_Permission;
9167             fi->s2addr = addr;
9168             fi->stage2 = true;
9169             fi->s1ptw = true;
9170             return ~0;
9171         }
9172         addr = s2pa;
9173     }
9174     return addr;
9175 }
9176 
9177 /* All loads done in the course of a page table walk go through here. */
9178 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
9179                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
9180 {
9181     ARMCPU *cpu = ARM_CPU(cs);
9182     CPUARMState *env = &cpu->env;
9183     MemTxAttrs attrs = {};
9184     MemTxResult result = MEMTX_OK;
9185     AddressSpace *as;
9186     uint32_t data;
9187 
9188     attrs.secure = is_secure;
9189     as = arm_addressspace(cs, attrs);
9190     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
9191     if (fi->s1ptw) {
9192         return 0;
9193     }
9194     if (regime_translation_big_endian(env, mmu_idx)) {
9195         data = address_space_ldl_be(as, addr, attrs, &result);
9196     } else {
9197         data = address_space_ldl_le(as, addr, attrs, &result);
9198     }
9199     if (result == MEMTX_OK) {
9200         return data;
9201     }
9202     fi->type = ARMFault_SyncExternalOnWalk;
9203     fi->ea = arm_extabort_type(result);
9204     return 0;
9205 }
9206 
9207 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
9208                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
9209 {
9210     ARMCPU *cpu = ARM_CPU(cs);
9211     CPUARMState *env = &cpu->env;
9212     MemTxAttrs attrs = {};
9213     MemTxResult result = MEMTX_OK;
9214     AddressSpace *as;
9215     uint64_t data;
9216 
9217     attrs.secure = is_secure;
9218     as = arm_addressspace(cs, attrs);
9219     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
9220     if (fi->s1ptw) {
9221         return 0;
9222     }
9223     if (regime_translation_big_endian(env, mmu_idx)) {
9224         data = address_space_ldq_be(as, addr, attrs, &result);
9225     } else {
9226         data = address_space_ldq_le(as, addr, attrs, &result);
9227     }
9228     if (result == MEMTX_OK) {
9229         return data;
9230     }
9231     fi->type = ARMFault_SyncExternalOnWalk;
9232     fi->ea = arm_extabort_type(result);
9233     return 0;
9234 }
9235 
9236 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
9237                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
9238                              hwaddr *phys_ptr, int *prot,
9239                              target_ulong *page_size,
9240                              ARMMMUFaultInfo *fi)
9241 {
9242     CPUState *cs = CPU(arm_env_get_cpu(env));
9243     int level = 1;
9244     uint32_t table;
9245     uint32_t desc;
9246     int type;
9247     int ap;
9248     int domain = 0;
9249     int domain_prot;
9250     hwaddr phys_addr;
9251     uint32_t dacr;
9252 
9253     /* Pagetable walk.  */
9254     /* Lookup l1 descriptor.  */
9255     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
9256         /* Section translation fault if page walk is disabled by PD0 or PD1 */
9257         fi->type = ARMFault_Translation;
9258         goto do_fault;
9259     }
9260     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9261                        mmu_idx, fi);
9262     if (fi->type != ARMFault_None) {
9263         goto do_fault;
9264     }
9265     type = (desc & 3);
9266     domain = (desc >> 5) & 0x0f;
9267     if (regime_el(env, mmu_idx) == 1) {
9268         dacr = env->cp15.dacr_ns;
9269     } else {
9270         dacr = env->cp15.dacr_s;
9271     }
9272     domain_prot = (dacr >> (domain * 2)) & 3;
9273     if (type == 0) {
9274         /* Section translation fault.  */
9275         fi->type = ARMFault_Translation;
9276         goto do_fault;
9277     }
9278     if (type != 2) {
9279         level = 2;
9280     }
9281     if (domain_prot == 0 || domain_prot == 2) {
9282         fi->type = ARMFault_Domain;
9283         goto do_fault;
9284     }
9285     if (type == 2) {
9286         /* 1Mb section.  */
9287         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9288         ap = (desc >> 10) & 3;
9289         *page_size = 1024 * 1024;
9290     } else {
9291         /* Lookup l2 entry.  */
9292         if (type == 1) {
9293             /* Coarse pagetable.  */
9294             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9295         } else {
9296             /* Fine pagetable.  */
9297             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
9298         }
9299         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9300                            mmu_idx, fi);
9301         if (fi->type != ARMFault_None) {
9302             goto do_fault;
9303         }
9304         switch (desc & 3) {
9305         case 0: /* Page translation fault.  */
9306             fi->type = ARMFault_Translation;
9307             goto do_fault;
9308         case 1: /* 64k page.  */
9309             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9310             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
9311             *page_size = 0x10000;
9312             break;
9313         case 2: /* 4k page.  */
9314             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9315             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
9316             *page_size = 0x1000;
9317             break;
9318         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
9319             if (type == 1) {
9320                 /* ARMv6/XScale extended small page format */
9321                 if (arm_feature(env, ARM_FEATURE_XSCALE)
9322                     || arm_feature(env, ARM_FEATURE_V6)) {
9323                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9324                     *page_size = 0x1000;
9325                 } else {
9326                     /* UNPREDICTABLE in ARMv5; we choose to take a
9327                      * page translation fault.
9328                      */
9329                     fi->type = ARMFault_Translation;
9330                     goto do_fault;
9331                 }
9332             } else {
9333                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
9334                 *page_size = 0x400;
9335             }
9336             ap = (desc >> 4) & 3;
9337             break;
9338         default:
9339             /* Never happens, but compiler isn't smart enough to tell.  */
9340             abort();
9341         }
9342     }
9343     *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9344     *prot |= *prot ? PAGE_EXEC : 0;
9345     if (!(*prot & (1 << access_type))) {
9346         /* Access permission fault.  */
9347         fi->type = ARMFault_Permission;
9348         goto do_fault;
9349     }
9350     *phys_ptr = phys_addr;
9351     return false;
9352 do_fault:
9353     fi->domain = domain;
9354     fi->level = level;
9355     return true;
9356 }
9357 
9358 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
9359                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
9360                              hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
9361                              target_ulong *page_size, ARMMMUFaultInfo *fi)
9362 {
9363     CPUState *cs = CPU(arm_env_get_cpu(env));
9364     int level = 1;
9365     uint32_t table;
9366     uint32_t desc;
9367     uint32_t xn;
9368     uint32_t pxn = 0;
9369     int type;
9370     int ap;
9371     int domain = 0;
9372     int domain_prot;
9373     hwaddr phys_addr;
9374     uint32_t dacr;
9375     bool ns;
9376 
9377     /* Pagetable walk.  */
9378     /* Lookup l1 descriptor.  */
9379     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
9380         /* Section translation fault if page walk is disabled by PD0 or PD1 */
9381         fi->type = ARMFault_Translation;
9382         goto do_fault;
9383     }
9384     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9385                        mmu_idx, fi);
9386     if (fi->type != ARMFault_None) {
9387         goto do_fault;
9388     }
9389     type = (desc & 3);
9390     if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
9391         /* Section translation fault, or attempt to use the encoding
9392          * which is Reserved on implementations without PXN.
9393          */
9394         fi->type = ARMFault_Translation;
9395         goto do_fault;
9396     }
9397     if ((type == 1) || !(desc & (1 << 18))) {
9398         /* Page or Section.  */
9399         domain = (desc >> 5) & 0x0f;
9400     }
9401     if (regime_el(env, mmu_idx) == 1) {
9402         dacr = env->cp15.dacr_ns;
9403     } else {
9404         dacr = env->cp15.dacr_s;
9405     }
9406     if (type == 1) {
9407         level = 2;
9408     }
9409     domain_prot = (dacr >> (domain * 2)) & 3;
9410     if (domain_prot == 0 || domain_prot == 2) {
9411         /* Section or Page domain fault */
9412         fi->type = ARMFault_Domain;
9413         goto do_fault;
9414     }
9415     if (type != 1) {
9416         if (desc & (1 << 18)) {
9417             /* Supersection.  */
9418             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
9419             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
9420             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
9421             *page_size = 0x1000000;
9422         } else {
9423             /* Section.  */
9424             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9425             *page_size = 0x100000;
9426         }
9427         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
9428         xn = desc & (1 << 4);
9429         pxn = desc & 1;
9430         ns = extract32(desc, 19, 1);
9431     } else {
9432         if (arm_feature(env, ARM_FEATURE_PXN)) {
9433             pxn = (desc >> 2) & 1;
9434         }
9435         ns = extract32(desc, 3, 1);
9436         /* Lookup l2 entry.  */
9437         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9438         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9439                            mmu_idx, fi);
9440         if (fi->type != ARMFault_None) {
9441             goto do_fault;
9442         }
9443         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
9444         switch (desc & 3) {
9445         case 0: /* Page translation fault.  */
9446             fi->type = ARMFault_Translation;
9447             goto do_fault;
9448         case 1: /* 64k page.  */
9449             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9450             xn = desc & (1 << 15);
9451             *page_size = 0x10000;
9452             break;
9453         case 2: case 3: /* 4k page.  */
9454             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9455             xn = desc & 1;
9456             *page_size = 0x1000;
9457             break;
9458         default:
9459             /* Never happens, but compiler isn't smart enough to tell.  */
9460             abort();
9461         }
9462     }
9463     if (domain_prot == 3) {
9464         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9465     } else {
9466         if (pxn && !regime_is_user(env, mmu_idx)) {
9467             xn = 1;
9468         }
9469         if (xn && access_type == MMU_INST_FETCH) {
9470             fi->type = ARMFault_Permission;
9471             goto do_fault;
9472         }
9473 
9474         if (arm_feature(env, ARM_FEATURE_V6K) &&
9475                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
9476             /* The simplified model uses AP[0] as an access control bit.  */
9477             if ((ap & 1) == 0) {
9478                 /* Access flag fault.  */
9479                 fi->type = ARMFault_AccessFlag;
9480                 goto do_fault;
9481             }
9482             *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
9483         } else {
9484             *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9485         }
9486         if (*prot && !xn) {
9487             *prot |= PAGE_EXEC;
9488         }
9489         if (!(*prot & (1 << access_type))) {
9490             /* Access permission fault.  */
9491             fi->type = ARMFault_Permission;
9492             goto do_fault;
9493         }
9494     }
9495     if (ns) {
9496         /* The NS bit will (as required by the architecture) have no effect if
9497          * the CPU doesn't support TZ or this is a non-secure translation
9498          * regime, because the attribute will already be non-secure.
9499          */
9500         attrs->secure = false;
9501     }
9502     *phys_ptr = phys_addr;
9503     return false;
9504 do_fault:
9505     fi->domain = domain;
9506     fi->level = level;
9507     return true;
9508 }
9509 
9510 /*
9511  * check_s2_mmu_setup
9512  * @cpu:        ARMCPU
9513  * @is_aa64:    True if the translation regime is in AArch64 state
9514  * @startlevel: Suggested starting level
9515  * @inputsize:  Bitsize of IPAs
9516  * @stride:     Page-table stride (See the ARM ARM)
9517  *
9518  * Returns true if the suggested S2 translation parameters are OK and
9519  * false otherwise.
9520  */
9521 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
9522                                int inputsize, int stride)
9523 {
9524     const int grainsize = stride + 3;
9525     int startsizecheck;
9526 
9527     /* Negative levels are never allowed.  */
9528     if (level < 0) {
9529         return false;
9530     }
9531 
9532     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
9533     if (startsizecheck < 1 || startsizecheck > stride + 4) {
9534         return false;
9535     }
9536 
9537     if (is_aa64) {
9538         CPUARMState *env = &cpu->env;
9539         unsigned int pamax = arm_pamax(cpu);
9540 
9541         switch (stride) {
9542         case 13: /* 64KB Pages.  */
9543             if (level == 0 || (level == 1 && pamax <= 42)) {
9544                 return false;
9545             }
9546             break;
9547         case 11: /* 16KB Pages.  */
9548             if (level == 0 || (level == 1 && pamax <= 40)) {
9549                 return false;
9550             }
9551             break;
9552         case 9: /* 4KB Pages.  */
9553             if (level == 0 && pamax <= 42) {
9554                 return false;
9555             }
9556             break;
9557         default:
9558             g_assert_not_reached();
9559         }
9560 
9561         /* Inputsize checks.  */
9562         if (inputsize > pamax &&
9563             (arm_el_is_aa64(env, 1) || inputsize > 40)) {
9564             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
9565             return false;
9566         }
9567     } else {
9568         /* AArch32 only supports 4KB pages. Assert on that.  */
9569         assert(stride == 9);
9570 
9571         if (level == 0) {
9572             return false;
9573         }
9574     }
9575     return true;
9576 }
9577 
9578 /* Translate from the 4-bit stage 2 representation of
9579  * memory attributes (without cache-allocation hints) to
9580  * the 8-bit representation of the stage 1 MAIR registers
9581  * (which includes allocation hints).
9582  *
9583  * ref: shared/translation/attrs/S2AttrDecode()
9584  *      .../S2ConvertAttrsHints()
9585  */
9586 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
9587 {
9588     uint8_t hiattr = extract32(s2attrs, 2, 2);
9589     uint8_t loattr = extract32(s2attrs, 0, 2);
9590     uint8_t hihint = 0, lohint = 0;
9591 
9592     if (hiattr != 0) { /* normal memory */
9593         if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
9594             hiattr = loattr = 1; /* non-cacheable */
9595         } else {
9596             if (hiattr != 1) { /* Write-through or write-back */
9597                 hihint = 3; /* RW allocate */
9598             }
9599             if (loattr != 1) { /* Write-through or write-back */
9600                 lohint = 3; /* RW allocate */
9601             }
9602         }
9603     }
9604 
9605     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
9606 }
9607 
9608 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
9609                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
9610                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
9611                                target_ulong *page_size_ptr,
9612                                ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
9613 {
9614     ARMCPU *cpu = arm_env_get_cpu(env);
9615     CPUState *cs = CPU(cpu);
9616     /* Read an LPAE long-descriptor translation table. */
9617     ARMFaultType fault_type = ARMFault_Translation;
9618     uint32_t level;
9619     uint32_t epd = 0;
9620     int32_t t0sz, t1sz;
9621     uint32_t tg;
9622     uint64_t ttbr;
9623     int ttbr_select;
9624     hwaddr descaddr, indexmask, indexmask_grainsize;
9625     uint32_t tableattrs;
9626     target_ulong page_size;
9627     uint32_t attrs;
9628     int32_t stride = 9;
9629     int32_t addrsize;
9630     int inputsize;
9631     int32_t tbi = 0;
9632     TCR *tcr = regime_tcr(env, mmu_idx);
9633     int ap, ns, xn, pxn;
9634     uint32_t el = regime_el(env, mmu_idx);
9635     bool ttbr1_valid = true;
9636     uint64_t descaddrmask;
9637     bool aarch64 = arm_el_is_aa64(env, el);
9638 
9639     /* TODO:
9640      * This code does not handle the different format TCR for VTCR_EL2.
9641      * This code also does not support shareability levels.
9642      * Attribute and permission bit handling should also be checked when adding
9643      * support for those page table walks.
9644      */
9645     if (aarch64) {
9646         level = 0;
9647         addrsize = 64;
9648         if (el > 1) {
9649             if (mmu_idx != ARMMMUIdx_S2NS) {
9650                 tbi = extract64(tcr->raw_tcr, 20, 1);
9651             }
9652         } else {
9653             if (extract64(address, 55, 1)) {
9654                 tbi = extract64(tcr->raw_tcr, 38, 1);
9655             } else {
9656                 tbi = extract64(tcr->raw_tcr, 37, 1);
9657             }
9658         }
9659         tbi *= 8;
9660 
9661         /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
9662          * invalid.
9663          */
9664         if (el > 1) {
9665             ttbr1_valid = false;
9666         }
9667     } else {
9668         level = 1;
9669         addrsize = 32;
9670         /* There is no TTBR1 for EL2 */
9671         if (el == 2) {
9672             ttbr1_valid = false;
9673         }
9674     }
9675 
9676     /* Determine whether this address is in the region controlled by
9677      * TTBR0 or TTBR1 (or if it is in neither region and should fault).
9678      * This is a Non-secure PL0/1 stage 1 translation, so controlled by
9679      * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
9680      */
9681     if (aarch64) {
9682         /* AArch64 translation.  */
9683         t0sz = extract32(tcr->raw_tcr, 0, 6);
9684         t0sz = MIN(t0sz, 39);
9685         t0sz = MAX(t0sz, 16);
9686     } else if (mmu_idx != ARMMMUIdx_S2NS) {
9687         /* AArch32 stage 1 translation.  */
9688         t0sz = extract32(tcr->raw_tcr, 0, 3);
9689     } else {
9690         /* AArch32 stage 2 translation.  */
9691         bool sext = extract32(tcr->raw_tcr, 4, 1);
9692         bool sign = extract32(tcr->raw_tcr, 3, 1);
9693         /* Address size is 40-bit for a stage 2 translation,
9694          * and t0sz can be negative (from -8 to 7),
9695          * so we need to adjust it to use the TTBR selecting logic below.
9696          */
9697         addrsize = 40;
9698         t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8;
9699 
9700         /* If the sign-extend bit is not the same as t0sz[3], the result
9701          * is unpredictable. Flag this as a guest error.  */
9702         if (sign != sext) {
9703             qemu_log_mask(LOG_GUEST_ERROR,
9704                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9705         }
9706     }
9707     t1sz = extract32(tcr->raw_tcr, 16, 6);
9708     if (aarch64) {
9709         t1sz = MIN(t1sz, 39);
9710         t1sz = MAX(t1sz, 16);
9711     }
9712     if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) {
9713         /* there is a ttbr0 region and we are in it (high bits all zero) */
9714         ttbr_select = 0;
9715     } else if (ttbr1_valid && t1sz &&
9716                !extract64(~address, addrsize - t1sz, t1sz - tbi)) {
9717         /* there is a ttbr1 region and we are in it (high bits all one) */
9718         ttbr_select = 1;
9719     } else if (!t0sz) {
9720         /* ttbr0 region is "everything not in the ttbr1 region" */
9721         ttbr_select = 0;
9722     } else if (!t1sz && ttbr1_valid) {
9723         /* ttbr1 region is "everything not in the ttbr0 region" */
9724         ttbr_select = 1;
9725     } else {
9726         /* in the gap between the two regions, this is a Translation fault */
9727         fault_type = ARMFault_Translation;
9728         goto do_fault;
9729     }
9730 
9731     /* Note that QEMU ignores shareability and cacheability attributes,
9732      * so we don't need to do anything with the SH, ORGN, IRGN fields
9733      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
9734      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
9735      * implement any ASID-like capability so we can ignore it (instead
9736      * we will always flush the TLB any time the ASID is changed).
9737      */
9738     if (ttbr_select == 0) {
9739         ttbr = regime_ttbr(env, mmu_idx, 0);
9740         if (el < 2) {
9741             epd = extract32(tcr->raw_tcr, 7, 1);
9742         }
9743         inputsize = addrsize - t0sz;
9744 
9745         tg = extract32(tcr->raw_tcr, 14, 2);
9746         if (tg == 1) { /* 64KB pages */
9747             stride = 13;
9748         }
9749         if (tg == 2) { /* 16KB pages */
9750             stride = 11;
9751         }
9752     } else {
9753         /* We should only be here if TTBR1 is valid */
9754         assert(ttbr1_valid);
9755 
9756         ttbr = regime_ttbr(env, mmu_idx, 1);
9757         epd = extract32(tcr->raw_tcr, 23, 1);
9758         inputsize = addrsize - t1sz;
9759 
9760         tg = extract32(tcr->raw_tcr, 30, 2);
9761         if (tg == 3)  { /* 64KB pages */
9762             stride = 13;
9763         }
9764         if (tg == 1) { /* 16KB pages */
9765             stride = 11;
9766         }
9767     }
9768 
9769     /* Here we should have set up all the parameters for the translation:
9770      * inputsize, ttbr, epd, stride, tbi
9771      */
9772 
9773     if (epd) {
9774         /* Translation table walk disabled => Translation fault on TLB miss
9775          * Note: This is always 0 on 64-bit EL2 and EL3.
9776          */
9777         goto do_fault;
9778     }
9779 
9780     if (mmu_idx != ARMMMUIdx_S2NS) {
9781         /* The starting level depends on the virtual address size (which can
9782          * be up to 48 bits) and the translation granule size. It indicates
9783          * the number of strides (stride bits at a time) needed to
9784          * consume the bits of the input address. In the pseudocode this is:
9785          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
9786          * where their 'inputsize' is our 'inputsize', 'grainsize' is
9787          * our 'stride + 3' and 'stride' is our 'stride'.
9788          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
9789          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
9790          * = 4 - (inputsize - 4) / stride;
9791          */
9792         level = 4 - (inputsize - 4) / stride;
9793     } else {
9794         /* For stage 2 translations the starting level is specified by the
9795          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
9796          */
9797         uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
9798         uint32_t startlevel;
9799         bool ok;
9800 
9801         if (!aarch64 || stride == 9) {
9802             /* AArch32 or 4KB pages */
9803             startlevel = 2 - sl0;
9804         } else {
9805             /* 16KB or 64KB pages */
9806             startlevel = 3 - sl0;
9807         }
9808 
9809         /* Check that the starting level is valid. */
9810         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
9811                                 inputsize, stride);
9812         if (!ok) {
9813             fault_type = ARMFault_Translation;
9814             goto do_fault;
9815         }
9816         level = startlevel;
9817     }
9818 
9819     indexmask_grainsize = (1ULL << (stride + 3)) - 1;
9820     indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
9821 
9822     /* Now we can extract the actual base address from the TTBR */
9823     descaddr = extract64(ttbr, 0, 48);
9824     descaddr &= ~indexmask;
9825 
9826     /* The address field in the descriptor goes up to bit 39 for ARMv7
9827      * but up to bit 47 for ARMv8, but we use the descaddrmask
9828      * up to bit 39 for AArch32, because we don't need other bits in that case
9829      * to construct next descriptor address (anyway they should be all zeroes).
9830      */
9831     descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
9832                    ~indexmask_grainsize;
9833 
9834     /* Secure accesses start with the page table in secure memory and
9835      * can be downgraded to non-secure at any step. Non-secure accesses
9836      * remain non-secure. We implement this by just ORing in the NSTable/NS
9837      * bits at each step.
9838      */
9839     tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
9840     for (;;) {
9841         uint64_t descriptor;
9842         bool nstable;
9843 
9844         descaddr |= (address >> (stride * (4 - level))) & indexmask;
9845         descaddr &= ~7ULL;
9846         nstable = extract32(tableattrs, 4, 1);
9847         descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
9848         if (fi->type != ARMFault_None) {
9849             goto do_fault;
9850         }
9851 
9852         if (!(descriptor & 1) ||
9853             (!(descriptor & 2) && (level == 3))) {
9854             /* Invalid, or the Reserved level 3 encoding */
9855             goto do_fault;
9856         }
9857         descaddr = descriptor & descaddrmask;
9858 
9859         if ((descriptor & 2) && (level < 3)) {
9860             /* Table entry. The top five bits are attributes which  may
9861              * propagate down through lower levels of the table (and
9862              * which are all arranged so that 0 means "no effect", so
9863              * we can gather them up by ORing in the bits at each level).
9864              */
9865             tableattrs |= extract64(descriptor, 59, 5);
9866             level++;
9867             indexmask = indexmask_grainsize;
9868             continue;
9869         }
9870         /* Block entry at level 1 or 2, or page entry at level 3.
9871          * These are basically the same thing, although the number
9872          * of bits we pull in from the vaddr varies.
9873          */
9874         page_size = (1ULL << ((stride * (4 - level)) + 3));
9875         descaddr |= (address & (page_size - 1));
9876         /* Extract attributes from the descriptor */
9877         attrs = extract64(descriptor, 2, 10)
9878             | (extract64(descriptor, 52, 12) << 10);
9879 
9880         if (mmu_idx == ARMMMUIdx_S2NS) {
9881             /* Stage 2 table descriptors do not include any attribute fields */
9882             break;
9883         }
9884         /* Merge in attributes from table descriptors */
9885         attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
9886         attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
9887         /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9888          * means "force PL1 access only", which means forcing AP[1] to 0.
9889          */
9890         if (extract32(tableattrs, 2, 1)) {
9891             attrs &= ~(1 << 4);
9892         }
9893         attrs |= nstable << 3; /* NS */
9894         break;
9895     }
9896     /* Here descaddr is the final physical address, and attributes
9897      * are all in attrs.
9898      */
9899     fault_type = ARMFault_AccessFlag;
9900     if ((attrs & (1 << 8)) == 0) {
9901         /* Access flag */
9902         goto do_fault;
9903     }
9904 
9905     ap = extract32(attrs, 4, 2);
9906     xn = extract32(attrs, 12, 1);
9907 
9908     if (mmu_idx == ARMMMUIdx_S2NS) {
9909         ns = true;
9910         *prot = get_S2prot(env, ap, xn);
9911     } else {
9912         ns = extract32(attrs, 3, 1);
9913         pxn = extract32(attrs, 11, 1);
9914         *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
9915     }
9916 
9917     fault_type = ARMFault_Permission;
9918     if (!(*prot & (1 << access_type))) {
9919         goto do_fault;
9920     }
9921 
9922     if (ns) {
9923         /* The NS bit will (as required by the architecture) have no effect if
9924          * the CPU doesn't support TZ or this is a non-secure translation
9925          * regime, because the attribute will already be non-secure.
9926          */
9927         txattrs->secure = false;
9928     }
9929 
9930     if (cacheattrs != NULL) {
9931         if (mmu_idx == ARMMMUIdx_S2NS) {
9932             cacheattrs->attrs = convert_stage2_attrs(env,
9933                                                      extract32(attrs, 0, 4));
9934         } else {
9935             /* Index into MAIR registers for cache attributes */
9936             uint8_t attrindx = extract32(attrs, 0, 3);
9937             uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
9938             assert(attrindx <= 7);
9939             cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
9940         }
9941         cacheattrs->shareability = extract32(attrs, 6, 2);
9942     }
9943 
9944     *phys_ptr = descaddr;
9945     *page_size_ptr = page_size;
9946     return false;
9947 
9948 do_fault:
9949     fi->type = fault_type;
9950     fi->level = level;
9951     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
9952     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
9953     return true;
9954 }
9955 
9956 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
9957                                                 ARMMMUIdx mmu_idx,
9958                                                 int32_t address, int *prot)
9959 {
9960     if (!arm_feature(env, ARM_FEATURE_M)) {
9961         *prot = PAGE_READ | PAGE_WRITE;
9962         switch (address) {
9963         case 0xF0000000 ... 0xFFFFFFFF:
9964             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
9965                 /* hivecs execing is ok */
9966                 *prot |= PAGE_EXEC;
9967             }
9968             break;
9969         case 0x00000000 ... 0x7FFFFFFF:
9970             *prot |= PAGE_EXEC;
9971             break;
9972         }
9973     } else {
9974         /* Default system address map for M profile cores.
9975          * The architecture specifies which regions are execute-never;
9976          * at the MPU level no other checks are defined.
9977          */
9978         switch (address) {
9979         case 0x00000000 ... 0x1fffffff: /* ROM */
9980         case 0x20000000 ... 0x3fffffff: /* SRAM */
9981         case 0x60000000 ... 0x7fffffff: /* RAM */
9982         case 0x80000000 ... 0x9fffffff: /* RAM */
9983             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9984             break;
9985         case 0x40000000 ... 0x5fffffff: /* Peripheral */
9986         case 0xa0000000 ... 0xbfffffff: /* Device */
9987         case 0xc0000000 ... 0xdfffffff: /* Device */
9988         case 0xe0000000 ... 0xffffffff: /* System */
9989             *prot = PAGE_READ | PAGE_WRITE;
9990             break;
9991         default:
9992             g_assert_not_reached();
9993         }
9994     }
9995 }
9996 
9997 static bool pmsav7_use_background_region(ARMCPU *cpu,
9998                                          ARMMMUIdx mmu_idx, bool is_user)
9999 {
10000     /* Return true if we should use the default memory map as a
10001      * "background" region if there are no hits against any MPU regions.
10002      */
10003     CPUARMState *env = &cpu->env;
10004 
10005     if (is_user) {
10006         return false;
10007     }
10008 
10009     if (arm_feature(env, ARM_FEATURE_M)) {
10010         return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
10011             & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
10012     } else {
10013         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
10014     }
10015 }
10016 
10017 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
10018 {
10019     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
10020     return arm_feature(env, ARM_FEATURE_M) &&
10021         extract32(address, 20, 12) == 0xe00;
10022 }
10023 
10024 static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
10025 {
10026     /* True if address is in the M profile system region
10027      * 0xe0000000 - 0xffffffff
10028      */
10029     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
10030 }
10031 
10032 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
10033                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
10034                                  hwaddr *phys_ptr, int *prot,
10035                                  target_ulong *page_size,
10036                                  ARMMMUFaultInfo *fi)
10037 {
10038     ARMCPU *cpu = arm_env_get_cpu(env);
10039     int n;
10040     bool is_user = regime_is_user(env, mmu_idx);
10041 
10042     *phys_ptr = address;
10043     *page_size = TARGET_PAGE_SIZE;
10044     *prot = 0;
10045 
10046     if (regime_translation_disabled(env, mmu_idx) ||
10047         m_is_ppb_region(env, address)) {
10048         /* MPU disabled or M profile PPB access: use default memory map.
10049          * The other case which uses the default memory map in the
10050          * v7M ARM ARM pseudocode is exception vector reads from the vector
10051          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
10052          * which always does a direct read using address_space_ldl(), rather
10053          * than going via this function, so we don't need to check that here.
10054          */
10055         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10056     } else { /* MPU enabled */
10057         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10058             /* region search */
10059             uint32_t base = env->pmsav7.drbar[n];
10060             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
10061             uint32_t rmask;
10062             bool srdis = false;
10063 
10064             if (!(env->pmsav7.drsr[n] & 0x1)) {
10065                 continue;
10066             }
10067 
10068             if (!rsize) {
10069                 qemu_log_mask(LOG_GUEST_ERROR,
10070                               "DRSR[%d]: Rsize field cannot be 0\n", n);
10071                 continue;
10072             }
10073             rsize++;
10074             rmask = (1ull << rsize) - 1;
10075 
10076             if (base & rmask) {
10077                 qemu_log_mask(LOG_GUEST_ERROR,
10078                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
10079                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
10080                               n, base, rmask);
10081                 continue;
10082             }
10083 
10084             if (address < base || address > base + rmask) {
10085                 /*
10086                  * Address not in this region. We must check whether the
10087                  * region covers addresses in the same page as our address.
10088                  * In that case we must not report a size that covers the
10089                  * whole page for a subsequent hit against a different MPU
10090                  * region or the background region, because it would result in
10091                  * incorrect TLB hits for subsequent accesses to addresses that
10092                  * are in this MPU region.
10093                  */
10094                 if (ranges_overlap(base, rmask,
10095                                    address & TARGET_PAGE_MASK,
10096                                    TARGET_PAGE_SIZE)) {
10097                     *page_size = 1;
10098                 }
10099                 continue;
10100             }
10101 
10102             /* Region matched */
10103 
10104             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
10105                 int i, snd;
10106                 uint32_t srdis_mask;
10107 
10108                 rsize -= 3; /* sub region size (power of 2) */
10109                 snd = ((address - base) >> rsize) & 0x7;
10110                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
10111 
10112                 srdis_mask = srdis ? 0x3 : 0x0;
10113                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
10114                     /* This will check in groups of 2, 4 and then 8, whether
10115                      * the subregion bits are consistent. rsize is incremented
10116                      * back up to give the region size, considering consistent
10117                      * adjacent subregions as one region. Stop testing if rsize
10118                      * is already big enough for an entire QEMU page.
10119                      */
10120                     int snd_rounded = snd & ~(i - 1);
10121                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
10122                                                      snd_rounded + 8, i);
10123                     if (srdis_mask ^ srdis_multi) {
10124                         break;
10125                     }
10126                     srdis_mask = (srdis_mask << i) | srdis_mask;
10127                     rsize++;
10128                 }
10129             }
10130             if (srdis) {
10131                 continue;
10132             }
10133             if (rsize < TARGET_PAGE_BITS) {
10134                 *page_size = 1 << rsize;
10135             }
10136             break;
10137         }
10138 
10139         if (n == -1) { /* no hits */
10140             if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10141                 /* background fault */
10142                 fi->type = ARMFault_Background;
10143                 return true;
10144             }
10145             get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10146         } else { /* a MPU hit! */
10147             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
10148             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
10149 
10150             if (m_is_system_region(env, address)) {
10151                 /* System space is always execute never */
10152                 xn = 1;
10153             }
10154 
10155             if (is_user) { /* User mode AP bit decoding */
10156                 switch (ap) {
10157                 case 0:
10158                 case 1:
10159                 case 5:
10160                     break; /* no access */
10161                 case 3:
10162                     *prot |= PAGE_WRITE;
10163                     /* fall through */
10164                 case 2:
10165                 case 6:
10166                     *prot |= PAGE_READ | PAGE_EXEC;
10167                     break;
10168                 case 7:
10169                     /* for v7M, same as 6; for R profile a reserved value */
10170                     if (arm_feature(env, ARM_FEATURE_M)) {
10171                         *prot |= PAGE_READ | PAGE_EXEC;
10172                         break;
10173                     }
10174                     /* fall through */
10175                 default:
10176                     qemu_log_mask(LOG_GUEST_ERROR,
10177                                   "DRACR[%d]: Bad value for AP bits: 0x%"
10178                                   PRIx32 "\n", n, ap);
10179                 }
10180             } else { /* Priv. mode AP bits decoding */
10181                 switch (ap) {
10182                 case 0:
10183                     break; /* no access */
10184                 case 1:
10185                 case 2:
10186                 case 3:
10187                     *prot |= PAGE_WRITE;
10188                     /* fall through */
10189                 case 5:
10190                 case 6:
10191                     *prot |= PAGE_READ | PAGE_EXEC;
10192                     break;
10193                 case 7:
10194                     /* for v7M, same as 6; for R profile a reserved value */
10195                     if (arm_feature(env, ARM_FEATURE_M)) {
10196                         *prot |= PAGE_READ | PAGE_EXEC;
10197                         break;
10198                     }
10199                     /* fall through */
10200                 default:
10201                     qemu_log_mask(LOG_GUEST_ERROR,
10202                                   "DRACR[%d]: Bad value for AP bits: 0x%"
10203                                   PRIx32 "\n", n, ap);
10204                 }
10205             }
10206 
10207             /* execute never */
10208             if (xn) {
10209                 *prot &= ~PAGE_EXEC;
10210             }
10211         }
10212     }
10213 
10214     fi->type = ARMFault_Permission;
10215     fi->level = 1;
10216     return !(*prot & (1 << access_type));
10217 }
10218 
10219 static bool v8m_is_sau_exempt(CPUARMState *env,
10220                               uint32_t address, MMUAccessType access_type)
10221 {
10222     /* The architecture specifies that certain address ranges are
10223      * exempt from v8M SAU/IDAU checks.
10224      */
10225     return
10226         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
10227         (address >= 0xe0000000 && address <= 0xe0002fff) ||
10228         (address >= 0xe000e000 && address <= 0xe000efff) ||
10229         (address >= 0xe002e000 && address <= 0xe002efff) ||
10230         (address >= 0xe0040000 && address <= 0xe0041fff) ||
10231         (address >= 0xe00ff000 && address <= 0xe00fffff);
10232 }
10233 
10234 static void v8m_security_lookup(CPUARMState *env, uint32_t address,
10235                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10236                                 V8M_SAttributes *sattrs)
10237 {
10238     /* Look up the security attributes for this address. Compare the
10239      * pseudocode SecurityCheck() function.
10240      * We assume the caller has zero-initialized *sattrs.
10241      */
10242     ARMCPU *cpu = arm_env_get_cpu(env);
10243     int r;
10244     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
10245     int idau_region = IREGION_NOTVALID;
10246     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10247     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
10248 
10249     if (cpu->idau) {
10250         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
10251         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
10252 
10253         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
10254                    &idau_nsc);
10255     }
10256 
10257     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
10258         /* 0xf0000000..0xffffffff is always S for insn fetches */
10259         return;
10260     }
10261 
10262     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
10263         sattrs->ns = !regime_is_secure(env, mmu_idx);
10264         return;
10265     }
10266 
10267     if (idau_region != IREGION_NOTVALID) {
10268         sattrs->irvalid = true;
10269         sattrs->iregion = idau_region;
10270     }
10271 
10272     switch (env->sau.ctrl & 3) {
10273     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
10274         break;
10275     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
10276         sattrs->ns = true;
10277         break;
10278     default: /* SAU.ENABLE == 1 */
10279         for (r = 0; r < cpu->sau_sregion; r++) {
10280             if (env->sau.rlar[r] & 1) {
10281                 uint32_t base = env->sau.rbar[r] & ~0x1f;
10282                 uint32_t limit = env->sau.rlar[r] | 0x1f;
10283 
10284                 if (base <= address && limit >= address) {
10285                     if (base > addr_page_base || limit < addr_page_limit) {
10286                         sattrs->subpage = true;
10287                     }
10288                     if (sattrs->srvalid) {
10289                         /* If we hit in more than one region then we must report
10290                          * as Secure, not NS-Callable, with no valid region
10291                          * number info.
10292                          */
10293                         sattrs->ns = false;
10294                         sattrs->nsc = false;
10295                         sattrs->sregion = 0;
10296                         sattrs->srvalid = false;
10297                         break;
10298                     } else {
10299                         if (env->sau.rlar[r] & 2) {
10300                             sattrs->nsc = true;
10301                         } else {
10302                             sattrs->ns = true;
10303                         }
10304                         sattrs->srvalid = true;
10305                         sattrs->sregion = r;
10306                     }
10307                 } else {
10308                     /*
10309                      * Address not in this region. We must check whether the
10310                      * region covers addresses in the same page as our address.
10311                      * In that case we must not report a size that covers the
10312                      * whole page for a subsequent hit against a different MPU
10313                      * region or the background region, because it would result
10314                      * in incorrect TLB hits for subsequent accesses to
10315                      * addresses that are in this MPU region.
10316                      */
10317                     if (limit >= base &&
10318                         ranges_overlap(base, limit - base + 1,
10319                                        addr_page_base,
10320                                        TARGET_PAGE_SIZE)) {
10321                         sattrs->subpage = true;
10322                     }
10323                 }
10324             }
10325         }
10326 
10327         /* The IDAU will override the SAU lookup results if it specifies
10328          * higher security than the SAU does.
10329          */
10330         if (!idau_ns) {
10331             if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
10332                 sattrs->ns = false;
10333                 sattrs->nsc = idau_nsc;
10334             }
10335         }
10336         break;
10337     }
10338 }
10339 
10340 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
10341                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
10342                               hwaddr *phys_ptr, MemTxAttrs *txattrs,
10343                               int *prot, bool *is_subpage,
10344                               ARMMMUFaultInfo *fi, uint32_t *mregion)
10345 {
10346     /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
10347      * that a full phys-to-virt translation does).
10348      * mregion is (if not NULL) set to the region number which matched,
10349      * or -1 if no region number is returned (MPU off, address did not
10350      * hit a region, address hit in multiple regions).
10351      * We set is_subpage to true if the region hit doesn't cover the
10352      * entire TARGET_PAGE the address is within.
10353      */
10354     ARMCPU *cpu = arm_env_get_cpu(env);
10355     bool is_user = regime_is_user(env, mmu_idx);
10356     uint32_t secure = regime_is_secure(env, mmu_idx);
10357     int n;
10358     int matchregion = -1;
10359     bool hit = false;
10360     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10361     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
10362 
10363     *is_subpage = false;
10364     *phys_ptr = address;
10365     *prot = 0;
10366     if (mregion) {
10367         *mregion = -1;
10368     }
10369 
10370     /* Unlike the ARM ARM pseudocode, we don't need to check whether this
10371      * was an exception vector read from the vector table (which is always
10372      * done using the default system address map), because those accesses
10373      * are done in arm_v7m_load_vector(), which always does a direct
10374      * read using address_space_ldl(), rather than going via this function.
10375      */
10376     if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
10377         hit = true;
10378     } else if (m_is_ppb_region(env, address)) {
10379         hit = true;
10380     } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10381         hit = true;
10382     } else {
10383         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10384             /* region search */
10385             /* Note that the base address is bits [31:5] from the register
10386              * with bits [4:0] all zeroes, but the limit address is bits
10387              * [31:5] from the register with bits [4:0] all ones.
10388              */
10389             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
10390             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
10391 
10392             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
10393                 /* Region disabled */
10394                 continue;
10395             }
10396 
10397             if (address < base || address > limit) {
10398                 /*
10399                  * Address not in this region. We must check whether the
10400                  * region covers addresses in the same page as our address.
10401                  * In that case we must not report a size that covers the
10402                  * whole page for a subsequent hit against a different MPU
10403                  * region or the background region, because it would result in
10404                  * incorrect TLB hits for subsequent accesses to addresses that
10405                  * are in this MPU region.
10406                  */
10407                 if (limit >= base &&
10408                     ranges_overlap(base, limit - base + 1,
10409                                    addr_page_base,
10410                                    TARGET_PAGE_SIZE)) {
10411                     *is_subpage = true;
10412                 }
10413                 continue;
10414             }
10415 
10416             if (base > addr_page_base || limit < addr_page_limit) {
10417                 *is_subpage = true;
10418             }
10419 
10420             if (hit) {
10421                 /* Multiple regions match -- always a failure (unlike
10422                  * PMSAv7 where highest-numbered-region wins)
10423                  */
10424                 fi->type = ARMFault_Permission;
10425                 fi->level = 1;
10426                 return true;
10427             }
10428 
10429             matchregion = n;
10430             hit = true;
10431         }
10432     }
10433 
10434     if (!hit) {
10435         /* background fault */
10436         fi->type = ARMFault_Background;
10437         return true;
10438     }
10439 
10440     if (matchregion == -1) {
10441         /* hit using the background region */
10442         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10443     } else {
10444         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
10445         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
10446 
10447         if (m_is_system_region(env, address)) {
10448             /* System space is always execute never */
10449             xn = 1;
10450         }
10451 
10452         *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
10453         if (*prot && !xn) {
10454             *prot |= PAGE_EXEC;
10455         }
10456         /* We don't need to look the attribute up in the MAIR0/MAIR1
10457          * registers because that only tells us about cacheability.
10458          */
10459         if (mregion) {
10460             *mregion = matchregion;
10461         }
10462     }
10463 
10464     fi->type = ARMFault_Permission;
10465     fi->level = 1;
10466     return !(*prot & (1 << access_type));
10467 }
10468 
10469 
10470 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
10471                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
10472                                  hwaddr *phys_ptr, MemTxAttrs *txattrs,
10473                                  int *prot, target_ulong *page_size,
10474                                  ARMMMUFaultInfo *fi)
10475 {
10476     uint32_t secure = regime_is_secure(env, mmu_idx);
10477     V8M_SAttributes sattrs = {};
10478     bool ret;
10479     bool mpu_is_subpage;
10480 
10481     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10482         v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
10483         if (access_type == MMU_INST_FETCH) {
10484             /* Instruction fetches always use the MMU bank and the
10485              * transaction attribute determined by the fetch address,
10486              * regardless of CPU state. This is painful for QEMU
10487              * to handle, because it would mean we need to encode
10488              * into the mmu_idx not just the (user, negpri) information
10489              * for the current security state but also that for the
10490              * other security state, which would balloon the number
10491              * of mmu_idx values needed alarmingly.
10492              * Fortunately we can avoid this because it's not actually
10493              * possible to arbitrarily execute code from memory with
10494              * the wrong security attribute: it will always generate
10495              * an exception of some kind or another, apart from the
10496              * special case of an NS CPU executing an SG instruction
10497              * in S&NSC memory. So we always just fail the translation
10498              * here and sort things out in the exception handler
10499              * (including possibly emulating an SG instruction).
10500              */
10501             if (sattrs.ns != !secure) {
10502                 if (sattrs.nsc) {
10503                     fi->type = ARMFault_QEMU_NSCExec;
10504                 } else {
10505                     fi->type = ARMFault_QEMU_SFault;
10506                 }
10507                 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10508                 *phys_ptr = address;
10509                 *prot = 0;
10510                 return true;
10511             }
10512         } else {
10513             /* For data accesses we always use the MMU bank indicated
10514              * by the current CPU state, but the security attributes
10515              * might downgrade a secure access to nonsecure.
10516              */
10517             if (sattrs.ns) {
10518                 txattrs->secure = false;
10519             } else if (!secure) {
10520                 /* NS access to S memory must fault.
10521                  * Architecturally we should first check whether the
10522                  * MPU information for this address indicates that we
10523                  * are doing an unaligned access to Device memory, which
10524                  * should generate a UsageFault instead. QEMU does not
10525                  * currently check for that kind of unaligned access though.
10526                  * If we added it we would need to do so as a special case
10527                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
10528                  */
10529                 fi->type = ARMFault_QEMU_SFault;
10530                 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10531                 *phys_ptr = address;
10532                 *prot = 0;
10533                 return true;
10534             }
10535         }
10536     }
10537 
10538     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
10539                             txattrs, prot, &mpu_is_subpage, fi, NULL);
10540     *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
10541     return ret;
10542 }
10543 
10544 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
10545                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
10546                                  hwaddr *phys_ptr, int *prot,
10547                                  ARMMMUFaultInfo *fi)
10548 {
10549     int n;
10550     uint32_t mask;
10551     uint32_t base;
10552     bool is_user = regime_is_user(env, mmu_idx);
10553 
10554     if (regime_translation_disabled(env, mmu_idx)) {
10555         /* MPU disabled.  */
10556         *phys_ptr = address;
10557         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10558         return false;
10559     }
10560 
10561     *phys_ptr = address;
10562     for (n = 7; n >= 0; n--) {
10563         base = env->cp15.c6_region[n];
10564         if ((base & 1) == 0) {
10565             continue;
10566         }
10567         mask = 1 << ((base >> 1) & 0x1f);
10568         /* Keep this shift separate from the above to avoid an
10569            (undefined) << 32.  */
10570         mask = (mask << 1) - 1;
10571         if (((base ^ address) & ~mask) == 0) {
10572             break;
10573         }
10574     }
10575     if (n < 0) {
10576         fi->type = ARMFault_Background;
10577         return true;
10578     }
10579 
10580     if (access_type == MMU_INST_FETCH) {
10581         mask = env->cp15.pmsav5_insn_ap;
10582     } else {
10583         mask = env->cp15.pmsav5_data_ap;
10584     }
10585     mask = (mask >> (n * 4)) & 0xf;
10586     switch (mask) {
10587     case 0:
10588         fi->type = ARMFault_Permission;
10589         fi->level = 1;
10590         return true;
10591     case 1:
10592         if (is_user) {
10593             fi->type = ARMFault_Permission;
10594             fi->level = 1;
10595             return true;
10596         }
10597         *prot = PAGE_READ | PAGE_WRITE;
10598         break;
10599     case 2:
10600         *prot = PAGE_READ;
10601         if (!is_user) {
10602             *prot |= PAGE_WRITE;
10603         }
10604         break;
10605     case 3:
10606         *prot = PAGE_READ | PAGE_WRITE;
10607         break;
10608     case 5:
10609         if (is_user) {
10610             fi->type = ARMFault_Permission;
10611             fi->level = 1;
10612             return true;
10613         }
10614         *prot = PAGE_READ;
10615         break;
10616     case 6:
10617         *prot = PAGE_READ;
10618         break;
10619     default:
10620         /* Bad permission.  */
10621         fi->type = ARMFault_Permission;
10622         fi->level = 1;
10623         return true;
10624     }
10625     *prot |= PAGE_EXEC;
10626     return false;
10627 }
10628 
10629 /* Combine either inner or outer cacheability attributes for normal
10630  * memory, according to table D4-42 and pseudocode procedure
10631  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10632  *
10633  * NB: only stage 1 includes allocation hints (RW bits), leading to
10634  * some asymmetry.
10635  */
10636 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
10637 {
10638     if (s1 == 4 || s2 == 4) {
10639         /* non-cacheable has precedence */
10640         return 4;
10641     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
10642         /* stage 1 write-through takes precedence */
10643         return s1;
10644     } else if (extract32(s2, 2, 2) == 2) {
10645         /* stage 2 write-through takes precedence, but the allocation hint
10646          * is still taken from stage 1
10647          */
10648         return (2 << 2) | extract32(s1, 0, 2);
10649     } else { /* write-back */
10650         return s1;
10651     }
10652 }
10653 
10654 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10655  * and CombineS1S2Desc()
10656  *
10657  * @s1:      Attributes from stage 1 walk
10658  * @s2:      Attributes from stage 2 walk
10659  */
10660 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
10661 {
10662     uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
10663     uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
10664     ARMCacheAttrs ret;
10665 
10666     /* Combine shareability attributes (table D4-43) */
10667     if (s1.shareability == 2 || s2.shareability == 2) {
10668         /* if either are outer-shareable, the result is outer-shareable */
10669         ret.shareability = 2;
10670     } else if (s1.shareability == 3 || s2.shareability == 3) {
10671         /* if either are inner-shareable, the result is inner-shareable */
10672         ret.shareability = 3;
10673     } else {
10674         /* both non-shareable */
10675         ret.shareability = 0;
10676     }
10677 
10678     /* Combine memory type and cacheability attributes */
10679     if (s1hi == 0 || s2hi == 0) {
10680         /* Device has precedence over normal */
10681         if (s1lo == 0 || s2lo == 0) {
10682             /* nGnRnE has precedence over anything */
10683             ret.attrs = 0;
10684         } else if (s1lo == 4 || s2lo == 4) {
10685             /* non-Reordering has precedence over Reordering */
10686             ret.attrs = 4;  /* nGnRE */
10687         } else if (s1lo == 8 || s2lo == 8) {
10688             /* non-Gathering has precedence over Gathering */
10689             ret.attrs = 8;  /* nGRE */
10690         } else {
10691             ret.attrs = 0xc; /* GRE */
10692         }
10693 
10694         /* Any location for which the resultant memory type is any
10695          * type of Device memory is always treated as Outer Shareable.
10696          */
10697         ret.shareability = 2;
10698     } else { /* Normal memory */
10699         /* Outer/inner cacheability combine independently */
10700         ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
10701                   | combine_cacheattr_nibble(s1lo, s2lo);
10702 
10703         if (ret.attrs == 0x44) {
10704             /* Any location for which the resultant memory type is Normal
10705              * Inner Non-cacheable, Outer Non-cacheable is always treated
10706              * as Outer Shareable.
10707              */
10708             ret.shareability = 2;
10709         }
10710     }
10711 
10712     return ret;
10713 }
10714 
10715 
10716 /* get_phys_addr - get the physical address for this virtual address
10717  *
10718  * Find the physical address corresponding to the given virtual address,
10719  * by doing a translation table walk on MMU based systems or using the
10720  * MPU state on MPU based systems.
10721  *
10722  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10723  * prot and page_size may not be filled in, and the populated fsr value provides
10724  * information on why the translation aborted, in the format of a
10725  * DFSR/IFSR fault register, with the following caveats:
10726  *  * we honour the short vs long DFSR format differences.
10727  *  * the WnR bit is never set (the caller must do this).
10728  *  * for PSMAv5 based systems we don't bother to return a full FSR format
10729  *    value.
10730  *
10731  * @env: CPUARMState
10732  * @address: virtual address to get physical address for
10733  * @access_type: 0 for read, 1 for write, 2 for execute
10734  * @mmu_idx: MMU index indicating required translation regime
10735  * @phys_ptr: set to the physical address corresponding to the virtual address
10736  * @attrs: set to the memory transaction attributes to use
10737  * @prot: set to the permissions for the page containing phys_ptr
10738  * @page_size: set to the size of the page containing phys_ptr
10739  * @fi: set to fault info if the translation fails
10740  * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10741  */
10742 static bool get_phys_addr(CPUARMState *env, target_ulong address,
10743                           MMUAccessType access_type, ARMMMUIdx mmu_idx,
10744                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10745                           target_ulong *page_size,
10746                           ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
10747 {
10748     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
10749         /* Call ourselves recursively to do the stage 1 and then stage 2
10750          * translations.
10751          */
10752         if (arm_feature(env, ARM_FEATURE_EL2)) {
10753             hwaddr ipa;
10754             int s2_prot;
10755             int ret;
10756             ARMCacheAttrs cacheattrs2 = {};
10757 
10758             ret = get_phys_addr(env, address, access_type,
10759                                 stage_1_mmu_idx(mmu_idx), &ipa, attrs,
10760                                 prot, page_size, fi, cacheattrs);
10761 
10762             /* If S1 fails or S2 is disabled, return early.  */
10763             if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
10764                 *phys_ptr = ipa;
10765                 return ret;
10766             }
10767 
10768             /* S1 is done. Now do S2 translation.  */
10769             ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
10770                                      phys_ptr, attrs, &s2_prot,
10771                                      page_size, fi,
10772                                      cacheattrs != NULL ? &cacheattrs2 : NULL);
10773             fi->s2addr = ipa;
10774             /* Combine the S1 and S2 perms.  */
10775             *prot &= s2_prot;
10776 
10777             /* Combine the S1 and S2 cache attributes, if needed */
10778             if (!ret && cacheattrs != NULL) {
10779                 if (env->cp15.hcr_el2 & HCR_DC) {
10780                     /*
10781                      * HCR.DC forces the first stage attributes to
10782                      *  Normal Non-Shareable,
10783                      *  Inner Write-Back Read-Allocate Write-Allocate,
10784                      *  Outer Write-Back Read-Allocate Write-Allocate.
10785                      */
10786                     cacheattrs->attrs = 0xff;
10787                     cacheattrs->shareability = 0;
10788                 }
10789                 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
10790             }
10791 
10792             return ret;
10793         } else {
10794             /*
10795              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
10796              */
10797             mmu_idx = stage_1_mmu_idx(mmu_idx);
10798         }
10799     }
10800 
10801     /* The page table entries may downgrade secure to non-secure, but
10802      * cannot upgrade an non-secure translation regime's attributes
10803      * to secure.
10804      */
10805     attrs->secure = regime_is_secure(env, mmu_idx);
10806     attrs->user = regime_is_user(env, mmu_idx);
10807 
10808     /* Fast Context Switch Extension. This doesn't exist at all in v8.
10809      * In v7 and earlier it affects all stage 1 translations.
10810      */
10811     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
10812         && !arm_feature(env, ARM_FEATURE_V8)) {
10813         if (regime_el(env, mmu_idx) == 3) {
10814             address += env->cp15.fcseidr_s;
10815         } else {
10816             address += env->cp15.fcseidr_ns;
10817         }
10818     }
10819 
10820     if (arm_feature(env, ARM_FEATURE_PMSA)) {
10821         bool ret;
10822         *page_size = TARGET_PAGE_SIZE;
10823 
10824         if (arm_feature(env, ARM_FEATURE_V8)) {
10825             /* PMSAv8 */
10826             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
10827                                        phys_ptr, attrs, prot, page_size, fi);
10828         } else if (arm_feature(env, ARM_FEATURE_V7)) {
10829             /* PMSAv7 */
10830             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
10831                                        phys_ptr, prot, page_size, fi);
10832         } else {
10833             /* Pre-v7 MPU */
10834             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
10835                                        phys_ptr, prot, fi);
10836         }
10837         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
10838                       " mmu_idx %u -> %s (prot %c%c%c)\n",
10839                       access_type == MMU_DATA_LOAD ? "reading" :
10840                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
10841                       (uint32_t)address, mmu_idx,
10842                       ret ? "Miss" : "Hit",
10843                       *prot & PAGE_READ ? 'r' : '-',
10844                       *prot & PAGE_WRITE ? 'w' : '-',
10845                       *prot & PAGE_EXEC ? 'x' : '-');
10846 
10847         return ret;
10848     }
10849 
10850     /* Definitely a real MMU, not an MPU */
10851 
10852     if (regime_translation_disabled(env, mmu_idx)) {
10853         /* MMU disabled. */
10854         *phys_ptr = address;
10855         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10856         *page_size = TARGET_PAGE_SIZE;
10857         return 0;
10858     }
10859 
10860     if (regime_using_lpae_format(env, mmu_idx)) {
10861         return get_phys_addr_lpae(env, address, access_type, mmu_idx,
10862                                   phys_ptr, attrs, prot, page_size,
10863                                   fi, cacheattrs);
10864     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
10865         return get_phys_addr_v6(env, address, access_type, mmu_idx,
10866                                 phys_ptr, attrs, prot, page_size, fi);
10867     } else {
10868         return get_phys_addr_v5(env, address, access_type, mmu_idx,
10869                                     phys_ptr, prot, page_size, fi);
10870     }
10871 }
10872 
10873 /* Walk the page table and (if the mapping exists) add the page
10874  * to the TLB. Return false on success, or true on failure. Populate
10875  * fsr with ARM DFSR/IFSR fault register format value on failure.
10876  */
10877 bool arm_tlb_fill(CPUState *cs, vaddr address,
10878                   MMUAccessType access_type, int mmu_idx,
10879                   ARMMMUFaultInfo *fi)
10880 {
10881     ARMCPU *cpu = ARM_CPU(cs);
10882     CPUARMState *env = &cpu->env;
10883     hwaddr phys_addr;
10884     target_ulong page_size;
10885     int prot;
10886     int ret;
10887     MemTxAttrs attrs = {};
10888 
10889     ret = get_phys_addr(env, address, access_type,
10890                         core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
10891                         &attrs, &prot, &page_size, fi, NULL);
10892     if (!ret) {
10893         /*
10894          * Map a single [sub]page. Regions smaller than our declared
10895          * target page size are handled specially, so for those we
10896          * pass in the exact addresses.
10897          */
10898         if (page_size >= TARGET_PAGE_SIZE) {
10899             phys_addr &= TARGET_PAGE_MASK;
10900             address &= TARGET_PAGE_MASK;
10901         }
10902         tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
10903                                 prot, mmu_idx, page_size);
10904         return 0;
10905     }
10906 
10907     return ret;
10908 }
10909 
10910 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
10911                                          MemTxAttrs *attrs)
10912 {
10913     ARMCPU *cpu = ARM_CPU(cs);
10914     CPUARMState *env = &cpu->env;
10915     hwaddr phys_addr;
10916     target_ulong page_size;
10917     int prot;
10918     bool ret;
10919     ARMMMUFaultInfo fi = {};
10920     ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
10921 
10922     *attrs = (MemTxAttrs) {};
10923 
10924     ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
10925                         attrs, &prot, &page_size, &fi, NULL);
10926 
10927     if (ret) {
10928         return -1;
10929     }
10930     return phys_addr;
10931 }
10932 
10933 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
10934 {
10935     uint32_t mask;
10936     unsigned el = arm_current_el(env);
10937 
10938     /* First handle registers which unprivileged can read */
10939 
10940     switch (reg) {
10941     case 0 ... 7: /* xPSR sub-fields */
10942         mask = 0;
10943         if ((reg & 1) && el) {
10944             mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
10945         }
10946         if (!(reg & 4)) {
10947             mask |= XPSR_NZCV | XPSR_Q; /* APSR */
10948         }
10949         /* EPSR reads as zero */
10950         return xpsr_read(env) & mask;
10951         break;
10952     case 20: /* CONTROL */
10953         return env->v7m.control[env->v7m.secure];
10954     case 0x94: /* CONTROL_NS */
10955         /* We have to handle this here because unprivileged Secure code
10956          * can read the NS CONTROL register.
10957          */
10958         if (!env->v7m.secure) {
10959             return 0;
10960         }
10961         return env->v7m.control[M_REG_NS];
10962     }
10963 
10964     if (el == 0) {
10965         return 0; /* unprivileged reads others as zero */
10966     }
10967 
10968     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10969         switch (reg) {
10970         case 0x88: /* MSP_NS */
10971             if (!env->v7m.secure) {
10972                 return 0;
10973             }
10974             return env->v7m.other_ss_msp;
10975         case 0x89: /* PSP_NS */
10976             if (!env->v7m.secure) {
10977                 return 0;
10978             }
10979             return env->v7m.other_ss_psp;
10980         case 0x8a: /* MSPLIM_NS */
10981             if (!env->v7m.secure) {
10982                 return 0;
10983             }
10984             return env->v7m.msplim[M_REG_NS];
10985         case 0x8b: /* PSPLIM_NS */
10986             if (!env->v7m.secure) {
10987                 return 0;
10988             }
10989             return env->v7m.psplim[M_REG_NS];
10990         case 0x90: /* PRIMASK_NS */
10991             if (!env->v7m.secure) {
10992                 return 0;
10993             }
10994             return env->v7m.primask[M_REG_NS];
10995         case 0x91: /* BASEPRI_NS */
10996             if (!env->v7m.secure) {
10997                 return 0;
10998             }
10999             return env->v7m.basepri[M_REG_NS];
11000         case 0x93: /* FAULTMASK_NS */
11001             if (!env->v7m.secure) {
11002                 return 0;
11003             }
11004             return env->v7m.faultmask[M_REG_NS];
11005         case 0x98: /* SP_NS */
11006         {
11007             /* This gives the non-secure SP selected based on whether we're
11008              * currently in handler mode or not, using the NS CONTROL.SPSEL.
11009              */
11010             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
11011 
11012             if (!env->v7m.secure) {
11013                 return 0;
11014             }
11015             if (!arm_v7m_is_handler_mode(env) && spsel) {
11016                 return env->v7m.other_ss_psp;
11017             } else {
11018                 return env->v7m.other_ss_msp;
11019             }
11020         }
11021         default:
11022             break;
11023         }
11024     }
11025 
11026     switch (reg) {
11027     case 8: /* MSP */
11028         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
11029     case 9: /* PSP */
11030         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
11031     case 10: /* MSPLIM */
11032         if (!arm_feature(env, ARM_FEATURE_V8)) {
11033             goto bad_reg;
11034         }
11035         return env->v7m.msplim[env->v7m.secure];
11036     case 11: /* PSPLIM */
11037         if (!arm_feature(env, ARM_FEATURE_V8)) {
11038             goto bad_reg;
11039         }
11040         return env->v7m.psplim[env->v7m.secure];
11041     case 16: /* PRIMASK */
11042         return env->v7m.primask[env->v7m.secure];
11043     case 17: /* BASEPRI */
11044     case 18: /* BASEPRI_MAX */
11045         return env->v7m.basepri[env->v7m.secure];
11046     case 19: /* FAULTMASK */
11047         return env->v7m.faultmask[env->v7m.secure];
11048     default:
11049     bad_reg:
11050         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
11051                                        " register %d\n", reg);
11052         return 0;
11053     }
11054 }
11055 
11056 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
11057 {
11058     /* We're passed bits [11..0] of the instruction; extract
11059      * SYSm and the mask bits.
11060      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
11061      * we choose to treat them as if the mask bits were valid.
11062      * NB that the pseudocode 'mask' variable is bits [11..10],
11063      * whereas ours is [11..8].
11064      */
11065     uint32_t mask = extract32(maskreg, 8, 4);
11066     uint32_t reg = extract32(maskreg, 0, 8);
11067 
11068     if (arm_current_el(env) == 0 && reg > 7) {
11069         /* only xPSR sub-fields may be written by unprivileged */
11070         return;
11071     }
11072 
11073     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
11074         switch (reg) {
11075         case 0x88: /* MSP_NS */
11076             if (!env->v7m.secure) {
11077                 return;
11078             }
11079             env->v7m.other_ss_msp = val;
11080             return;
11081         case 0x89: /* PSP_NS */
11082             if (!env->v7m.secure) {
11083                 return;
11084             }
11085             env->v7m.other_ss_psp = val;
11086             return;
11087         case 0x8a: /* MSPLIM_NS */
11088             if (!env->v7m.secure) {
11089                 return;
11090             }
11091             env->v7m.msplim[M_REG_NS] = val & ~7;
11092             return;
11093         case 0x8b: /* PSPLIM_NS */
11094             if (!env->v7m.secure) {
11095                 return;
11096             }
11097             env->v7m.psplim[M_REG_NS] = val & ~7;
11098             return;
11099         case 0x90: /* PRIMASK_NS */
11100             if (!env->v7m.secure) {
11101                 return;
11102             }
11103             env->v7m.primask[M_REG_NS] = val & 1;
11104             return;
11105         case 0x91: /* BASEPRI_NS */
11106             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
11107                 return;
11108             }
11109             env->v7m.basepri[M_REG_NS] = val & 0xff;
11110             return;
11111         case 0x93: /* FAULTMASK_NS */
11112             if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
11113                 return;
11114             }
11115             env->v7m.faultmask[M_REG_NS] = val & 1;
11116             return;
11117         case 0x94: /* CONTROL_NS */
11118             if (!env->v7m.secure) {
11119                 return;
11120             }
11121             write_v7m_control_spsel_for_secstate(env,
11122                                                  val & R_V7M_CONTROL_SPSEL_MASK,
11123                                                  M_REG_NS);
11124             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
11125                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
11126                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
11127             }
11128             return;
11129         case 0x98: /* SP_NS */
11130         {
11131             /* This gives the non-secure SP selected based on whether we're
11132              * currently in handler mode or not, using the NS CONTROL.SPSEL.
11133              */
11134             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
11135             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
11136             uint32_t limit;
11137 
11138             if (!env->v7m.secure) {
11139                 return;
11140             }
11141 
11142             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
11143 
11144             if (val < limit) {
11145                 CPUState *cs = CPU(arm_env_get_cpu(env));
11146 
11147                 cpu_restore_state(cs, GETPC(), true);
11148                 raise_exception(env, EXCP_STKOF, 0, 1);
11149             }
11150 
11151             if (is_psp) {
11152                 env->v7m.other_ss_psp = val;
11153             } else {
11154                 env->v7m.other_ss_msp = val;
11155             }
11156             return;
11157         }
11158         default:
11159             break;
11160         }
11161     }
11162 
11163     switch (reg) {
11164     case 0 ... 7: /* xPSR sub-fields */
11165         /* only APSR is actually writable */
11166         if (!(reg & 4)) {
11167             uint32_t apsrmask = 0;
11168 
11169             if (mask & 8) {
11170                 apsrmask |= XPSR_NZCV | XPSR_Q;
11171             }
11172             if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
11173                 apsrmask |= XPSR_GE;
11174             }
11175             xpsr_write(env, val, apsrmask);
11176         }
11177         break;
11178     case 8: /* MSP */
11179         if (v7m_using_psp(env)) {
11180             env->v7m.other_sp = val;
11181         } else {
11182             env->regs[13] = val;
11183         }
11184         break;
11185     case 9: /* PSP */
11186         if (v7m_using_psp(env)) {
11187             env->regs[13] = val;
11188         } else {
11189             env->v7m.other_sp = val;
11190         }
11191         break;
11192     case 10: /* MSPLIM */
11193         if (!arm_feature(env, ARM_FEATURE_V8)) {
11194             goto bad_reg;
11195         }
11196         env->v7m.msplim[env->v7m.secure] = val & ~7;
11197         break;
11198     case 11: /* PSPLIM */
11199         if (!arm_feature(env, ARM_FEATURE_V8)) {
11200             goto bad_reg;
11201         }
11202         env->v7m.psplim[env->v7m.secure] = val & ~7;
11203         break;
11204     case 16: /* PRIMASK */
11205         env->v7m.primask[env->v7m.secure] = val & 1;
11206         break;
11207     case 17: /* BASEPRI */
11208         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
11209             goto bad_reg;
11210         }
11211         env->v7m.basepri[env->v7m.secure] = val & 0xff;
11212         break;
11213     case 18: /* BASEPRI_MAX */
11214         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
11215             goto bad_reg;
11216         }
11217         val &= 0xff;
11218         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
11219                          || env->v7m.basepri[env->v7m.secure] == 0)) {
11220             env->v7m.basepri[env->v7m.secure] = val;
11221         }
11222         break;
11223     case 19: /* FAULTMASK */
11224         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
11225             goto bad_reg;
11226         }
11227         env->v7m.faultmask[env->v7m.secure] = val & 1;
11228         break;
11229     case 20: /* CONTROL */
11230         /* Writing to the SPSEL bit only has an effect if we are in
11231          * thread mode; other bits can be updated by any privileged code.
11232          * write_v7m_control_spsel() deals with updating the SPSEL bit in
11233          * env->v7m.control, so we only need update the others.
11234          * For v7M, we must just ignore explicit writes to SPSEL in handler
11235          * mode; for v8M the write is permitted but will have no effect.
11236          */
11237         if (arm_feature(env, ARM_FEATURE_V8) ||
11238             !arm_v7m_is_handler_mode(env)) {
11239             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
11240         }
11241         if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
11242             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
11243             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
11244         }
11245         break;
11246     default:
11247     bad_reg:
11248         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
11249                                        " register %d\n", reg);
11250         return;
11251     }
11252 }
11253 
11254 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
11255 {
11256     /* Implement the TT instruction. op is bits [7:6] of the insn. */
11257     bool forceunpriv = op & 1;
11258     bool alt = op & 2;
11259     V8M_SAttributes sattrs = {};
11260     uint32_t tt_resp;
11261     bool r, rw, nsr, nsrw, mrvalid;
11262     int prot;
11263     ARMMMUFaultInfo fi = {};
11264     MemTxAttrs attrs = {};
11265     hwaddr phys_addr;
11266     ARMMMUIdx mmu_idx;
11267     uint32_t mregion;
11268     bool targetpriv;
11269     bool targetsec = env->v7m.secure;
11270     bool is_subpage;
11271 
11272     /* Work out what the security state and privilege level we're
11273      * interested in is...
11274      */
11275     if (alt) {
11276         targetsec = !targetsec;
11277     }
11278 
11279     if (forceunpriv) {
11280         targetpriv = false;
11281     } else {
11282         targetpriv = arm_v7m_is_handler_mode(env) ||
11283             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
11284     }
11285 
11286     /* ...and then figure out which MMU index this is */
11287     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
11288 
11289     /* We know that the MPU and SAU don't care about the access type
11290      * for our purposes beyond that we don't want to claim to be
11291      * an insn fetch, so we arbitrarily call this a read.
11292      */
11293 
11294     /* MPU region info only available for privileged or if
11295      * inspecting the other MPU state.
11296      */
11297     if (arm_current_el(env) != 0 || alt) {
11298         /* We can ignore the return value as prot is always set */
11299         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
11300                           &phys_addr, &attrs, &prot, &is_subpage,
11301                           &fi, &mregion);
11302         if (mregion == -1) {
11303             mrvalid = false;
11304             mregion = 0;
11305         } else {
11306             mrvalid = true;
11307         }
11308         r = prot & PAGE_READ;
11309         rw = prot & PAGE_WRITE;
11310     } else {
11311         r = false;
11312         rw = false;
11313         mrvalid = false;
11314         mregion = 0;
11315     }
11316 
11317     if (env->v7m.secure) {
11318         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
11319         nsr = sattrs.ns && r;
11320         nsrw = sattrs.ns && rw;
11321     } else {
11322         sattrs.ns = true;
11323         nsr = false;
11324         nsrw = false;
11325     }
11326 
11327     tt_resp = (sattrs.iregion << 24) |
11328         (sattrs.irvalid << 23) |
11329         ((!sattrs.ns) << 22) |
11330         (nsrw << 21) |
11331         (nsr << 20) |
11332         (rw << 19) |
11333         (r << 18) |
11334         (sattrs.srvalid << 17) |
11335         (mrvalid << 16) |
11336         (sattrs.sregion << 8) |
11337         mregion;
11338 
11339     return tt_resp;
11340 }
11341 
11342 #endif
11343 
11344 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
11345 {
11346     /* Implement DC ZVA, which zeroes a fixed-length block of memory.
11347      * Note that we do not implement the (architecturally mandated)
11348      * alignment fault for attempts to use this on Device memory
11349      * (which matches the usual QEMU behaviour of not implementing either
11350      * alignment faults or any memory attribute handling).
11351      */
11352 
11353     ARMCPU *cpu = arm_env_get_cpu(env);
11354     uint64_t blocklen = 4 << cpu->dcz_blocksize;
11355     uint64_t vaddr = vaddr_in & ~(blocklen - 1);
11356 
11357 #ifndef CONFIG_USER_ONLY
11358     {
11359         /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
11360          * the block size so we might have to do more than one TLB lookup.
11361          * We know that in fact for any v8 CPU the page size is at least 4K
11362          * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
11363          * 1K as an artefact of legacy v5 subpage support being present in the
11364          * same QEMU executable.
11365          */
11366         int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
11367         void *hostaddr[maxidx];
11368         int try, i;
11369         unsigned mmu_idx = cpu_mmu_index(env, false);
11370         TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
11371 
11372         for (try = 0; try < 2; try++) {
11373 
11374             for (i = 0; i < maxidx; i++) {
11375                 hostaddr[i] = tlb_vaddr_to_host(env,
11376                                                 vaddr + TARGET_PAGE_SIZE * i,
11377                                                 1, mmu_idx);
11378                 if (!hostaddr[i]) {
11379                     break;
11380                 }
11381             }
11382             if (i == maxidx) {
11383                 /* If it's all in the TLB it's fair game for just writing to;
11384                  * we know we don't need to update dirty status, etc.
11385                  */
11386                 for (i = 0; i < maxidx - 1; i++) {
11387                     memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
11388                 }
11389                 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
11390                 return;
11391             }
11392             /* OK, try a store and see if we can populate the tlb. This
11393              * might cause an exception if the memory isn't writable,
11394              * in which case we will longjmp out of here. We must for
11395              * this purpose use the actual register value passed to us
11396              * so that we get the fault address right.
11397              */
11398             helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
11399             /* Now we can populate the other TLB entries, if any */
11400             for (i = 0; i < maxidx; i++) {
11401                 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
11402                 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
11403                     helper_ret_stb_mmu(env, va, 0, oi, GETPC());
11404                 }
11405             }
11406         }
11407 
11408         /* Slow path (probably attempt to do this to an I/O device or
11409          * similar, or clearing of a block of code we have translations
11410          * cached for). Just do a series of byte writes as the architecture
11411          * demands. It's not worth trying to use a cpu_physical_memory_map(),
11412          * memset(), unmap() sequence here because:
11413          *  + we'd need to account for the blocksize being larger than a page
11414          *  + the direct-RAM access case is almost always going to be dealt
11415          *    with in the fastpath code above, so there's no speed benefit
11416          *  + we would have to deal with the map returning NULL because the
11417          *    bounce buffer was in use
11418          */
11419         for (i = 0; i < blocklen; i++) {
11420             helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
11421         }
11422     }
11423 #else
11424     memset(g2h(vaddr), 0, blocklen);
11425 #endif
11426 }
11427 
11428 /* Note that signed overflow is undefined in C.  The following routines are
11429    careful to use unsigned types where modulo arithmetic is required.
11430    Failure to do so _will_ break on newer gcc.  */
11431 
11432 /* Signed saturating arithmetic.  */
11433 
11434 /* Perform 16-bit signed saturating addition.  */
11435 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
11436 {
11437     uint16_t res;
11438 
11439     res = a + b;
11440     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
11441         if (a & 0x8000)
11442             res = 0x8000;
11443         else
11444             res = 0x7fff;
11445     }
11446     return res;
11447 }
11448 
11449 /* Perform 8-bit signed saturating addition.  */
11450 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
11451 {
11452     uint8_t res;
11453 
11454     res = a + b;
11455     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
11456         if (a & 0x80)
11457             res = 0x80;
11458         else
11459             res = 0x7f;
11460     }
11461     return res;
11462 }
11463 
11464 /* Perform 16-bit signed saturating subtraction.  */
11465 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
11466 {
11467     uint16_t res;
11468 
11469     res = a - b;
11470     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
11471         if (a & 0x8000)
11472             res = 0x8000;
11473         else
11474             res = 0x7fff;
11475     }
11476     return res;
11477 }
11478 
11479 /* Perform 8-bit signed saturating subtraction.  */
11480 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
11481 {
11482     uint8_t res;
11483 
11484     res = a - b;
11485     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
11486         if (a & 0x80)
11487             res = 0x80;
11488         else
11489             res = 0x7f;
11490     }
11491     return res;
11492 }
11493 
11494 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
11495 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
11496 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
11497 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
11498 #define PFX q
11499 
11500 #include "op_addsub.h"
11501 
11502 /* Unsigned saturating arithmetic.  */
11503 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
11504 {
11505     uint16_t res;
11506     res = a + b;
11507     if (res < a)
11508         res = 0xffff;
11509     return res;
11510 }
11511 
11512 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
11513 {
11514     if (a > b)
11515         return a - b;
11516     else
11517         return 0;
11518 }
11519 
11520 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
11521 {
11522     uint8_t res;
11523     res = a + b;
11524     if (res < a)
11525         res = 0xff;
11526     return res;
11527 }
11528 
11529 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
11530 {
11531     if (a > b)
11532         return a - b;
11533     else
11534         return 0;
11535 }
11536 
11537 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
11538 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
11539 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
11540 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
11541 #define PFX uq
11542 
11543 #include "op_addsub.h"
11544 
11545 /* Signed modulo arithmetic.  */
11546 #define SARITH16(a, b, n, op) do { \
11547     int32_t sum; \
11548     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
11549     RESULT(sum, n, 16); \
11550     if (sum >= 0) \
11551         ge |= 3 << (n * 2); \
11552     } while(0)
11553 
11554 #define SARITH8(a, b, n, op) do { \
11555     int32_t sum; \
11556     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
11557     RESULT(sum, n, 8); \
11558     if (sum >= 0) \
11559         ge |= 1 << n; \
11560     } while(0)
11561 
11562 
11563 #define ADD16(a, b, n) SARITH16(a, b, n, +)
11564 #define SUB16(a, b, n) SARITH16(a, b, n, -)
11565 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
11566 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
11567 #define PFX s
11568 #define ARITH_GE
11569 
11570 #include "op_addsub.h"
11571 
11572 /* Unsigned modulo arithmetic.  */
11573 #define ADD16(a, b, n) do { \
11574     uint32_t sum; \
11575     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
11576     RESULT(sum, n, 16); \
11577     if ((sum >> 16) == 1) \
11578         ge |= 3 << (n * 2); \
11579     } while(0)
11580 
11581 #define ADD8(a, b, n) do { \
11582     uint32_t sum; \
11583     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
11584     RESULT(sum, n, 8); \
11585     if ((sum >> 8) == 1) \
11586         ge |= 1 << n; \
11587     } while(0)
11588 
11589 #define SUB16(a, b, n) do { \
11590     uint32_t sum; \
11591     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
11592     RESULT(sum, n, 16); \
11593     if ((sum >> 16) == 0) \
11594         ge |= 3 << (n * 2); \
11595     } while(0)
11596 
11597 #define SUB8(a, b, n) do { \
11598     uint32_t sum; \
11599     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
11600     RESULT(sum, n, 8); \
11601     if ((sum >> 8) == 0) \
11602         ge |= 1 << n; \
11603     } while(0)
11604 
11605 #define PFX u
11606 #define ARITH_GE
11607 
11608 #include "op_addsub.h"
11609 
11610 /* Halved signed arithmetic.  */
11611 #define ADD16(a, b, n) \
11612   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
11613 #define SUB16(a, b, n) \
11614   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
11615 #define ADD8(a, b, n) \
11616   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
11617 #define SUB8(a, b, n) \
11618   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
11619 #define PFX sh
11620 
11621 #include "op_addsub.h"
11622 
11623 /* Halved unsigned arithmetic.  */
11624 #define ADD16(a, b, n) \
11625   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11626 #define SUB16(a, b, n) \
11627   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
11628 #define ADD8(a, b, n) \
11629   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11630 #define SUB8(a, b, n) \
11631   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
11632 #define PFX uh
11633 
11634 #include "op_addsub.h"
11635 
11636 static inline uint8_t do_usad(uint8_t a, uint8_t b)
11637 {
11638     if (a > b)
11639         return a - b;
11640     else
11641         return b - a;
11642 }
11643 
11644 /* Unsigned sum of absolute byte differences.  */
11645 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
11646 {
11647     uint32_t sum;
11648     sum = do_usad(a, b);
11649     sum += do_usad(a >> 8, b >> 8);
11650     sum += do_usad(a >> 16, b >>16);
11651     sum += do_usad(a >> 24, b >> 24);
11652     return sum;
11653 }
11654 
11655 /* For ARMv6 SEL instruction.  */
11656 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
11657 {
11658     uint32_t mask;
11659 
11660     mask = 0;
11661     if (flags & 1)
11662         mask |= 0xff;
11663     if (flags & 2)
11664         mask |= 0xff00;
11665     if (flags & 4)
11666         mask |= 0xff0000;
11667     if (flags & 8)
11668         mask |= 0xff000000;
11669     return (a & mask) | (b & ~mask);
11670 }
11671 
11672 /* VFP support.  We follow the convention used for VFP instructions:
11673    Single precision routines have a "s" suffix, double precision a
11674    "d" suffix.  */
11675 
11676 /* Convert host exception flags to vfp form.  */
11677 static inline int vfp_exceptbits_from_host(int host_bits)
11678 {
11679     int target_bits = 0;
11680 
11681     if (host_bits & float_flag_invalid)
11682         target_bits |= 1;
11683     if (host_bits & float_flag_divbyzero)
11684         target_bits |= 2;
11685     if (host_bits & float_flag_overflow)
11686         target_bits |= 4;
11687     if (host_bits & (float_flag_underflow | float_flag_output_denormal))
11688         target_bits |= 8;
11689     if (host_bits & float_flag_inexact)
11690         target_bits |= 0x10;
11691     if (host_bits & float_flag_input_denormal)
11692         target_bits |= 0x80;
11693     return target_bits;
11694 }
11695 
11696 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
11697 {
11698     int i;
11699     uint32_t fpscr;
11700 
11701     fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
11702             | (env->vfp.vec_len << 16)
11703             | (env->vfp.vec_stride << 20);
11704 
11705     i = get_float_exception_flags(&env->vfp.fp_status);
11706     i |= get_float_exception_flags(&env->vfp.standard_fp_status);
11707     /* FZ16 does not generate an input denormal exception.  */
11708     i |= (get_float_exception_flags(&env->vfp.fp_status_f16)
11709           & ~float_flag_input_denormal);
11710 
11711     fpscr |= vfp_exceptbits_from_host(i);
11712     return fpscr;
11713 }
11714 
11715 uint32_t vfp_get_fpscr(CPUARMState *env)
11716 {
11717     return HELPER(vfp_get_fpscr)(env);
11718 }
11719 
11720 /* Convert vfp exception flags to target form.  */
11721 static inline int vfp_exceptbits_to_host(int target_bits)
11722 {
11723     int host_bits = 0;
11724 
11725     if (target_bits & 1)
11726         host_bits |= float_flag_invalid;
11727     if (target_bits & 2)
11728         host_bits |= float_flag_divbyzero;
11729     if (target_bits & 4)
11730         host_bits |= float_flag_overflow;
11731     if (target_bits & 8)
11732         host_bits |= float_flag_underflow;
11733     if (target_bits & 0x10)
11734         host_bits |= float_flag_inexact;
11735     if (target_bits & 0x80)
11736         host_bits |= float_flag_input_denormal;
11737     return host_bits;
11738 }
11739 
11740 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
11741 {
11742     int i;
11743     uint32_t changed;
11744 
11745     /* When ARMv8.2-FP16 is not supported, FZ16 is RES0.  */
11746     if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) {
11747         val &= ~FPCR_FZ16;
11748     }
11749 
11750     changed = env->vfp.xregs[ARM_VFP_FPSCR];
11751     env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
11752     env->vfp.vec_len = (val >> 16) & 7;
11753     env->vfp.vec_stride = (val >> 20) & 3;
11754 
11755     changed ^= val;
11756     if (changed & (3 << 22)) {
11757         i = (val >> 22) & 3;
11758         switch (i) {
11759         case FPROUNDING_TIEEVEN:
11760             i = float_round_nearest_even;
11761             break;
11762         case FPROUNDING_POSINF:
11763             i = float_round_up;
11764             break;
11765         case FPROUNDING_NEGINF:
11766             i = float_round_down;
11767             break;
11768         case FPROUNDING_ZERO:
11769             i = float_round_to_zero;
11770             break;
11771         }
11772         set_float_rounding_mode(i, &env->vfp.fp_status);
11773         set_float_rounding_mode(i, &env->vfp.fp_status_f16);
11774     }
11775     if (changed & FPCR_FZ16) {
11776         bool ftz_enabled = val & FPCR_FZ16;
11777         set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
11778         set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16);
11779     }
11780     if (changed & FPCR_FZ) {
11781         bool ftz_enabled = val & FPCR_FZ;
11782         set_flush_to_zero(ftz_enabled, &env->vfp.fp_status);
11783         set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status);
11784     }
11785     if (changed & FPCR_DN) {
11786         bool dnan_enabled = val & FPCR_DN;
11787         set_default_nan_mode(dnan_enabled, &env->vfp.fp_status);
11788         set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16);
11789     }
11790 
11791     /* The exception flags are ORed together when we read fpscr so we
11792      * only need to preserve the current state in one of our
11793      * float_status values.
11794      */
11795     i = vfp_exceptbits_to_host(val);
11796     set_float_exception_flags(i, &env->vfp.fp_status);
11797     set_float_exception_flags(0, &env->vfp.fp_status_f16);
11798     set_float_exception_flags(0, &env->vfp.standard_fp_status);
11799 }
11800 
11801 void vfp_set_fpscr(CPUARMState *env, uint32_t val)
11802 {
11803     HELPER(vfp_set_fpscr)(env, val);
11804 }
11805 
11806 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
11807 
11808 #define VFP_BINOP(name) \
11809 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
11810 { \
11811     float_status *fpst = fpstp; \
11812     return float32_ ## name(a, b, fpst); \
11813 } \
11814 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
11815 { \
11816     float_status *fpst = fpstp; \
11817     return float64_ ## name(a, b, fpst); \
11818 }
11819 VFP_BINOP(add)
11820 VFP_BINOP(sub)
11821 VFP_BINOP(mul)
11822 VFP_BINOP(div)
11823 VFP_BINOP(min)
11824 VFP_BINOP(max)
11825 VFP_BINOP(minnum)
11826 VFP_BINOP(maxnum)
11827 #undef VFP_BINOP
11828 
11829 float32 VFP_HELPER(neg, s)(float32 a)
11830 {
11831     return float32_chs(a);
11832 }
11833 
11834 float64 VFP_HELPER(neg, d)(float64 a)
11835 {
11836     return float64_chs(a);
11837 }
11838 
11839 float32 VFP_HELPER(abs, s)(float32 a)
11840 {
11841     return float32_abs(a);
11842 }
11843 
11844 float64 VFP_HELPER(abs, d)(float64 a)
11845 {
11846     return float64_abs(a);
11847 }
11848 
11849 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
11850 {
11851     return float32_sqrt(a, &env->vfp.fp_status);
11852 }
11853 
11854 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
11855 {
11856     return float64_sqrt(a, &env->vfp.fp_status);
11857 }
11858 
11859 /* XXX: check quiet/signaling case */
11860 #define DO_VFP_cmp(p, type) \
11861 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
11862 { \
11863     uint32_t flags; \
11864     switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
11865     case 0: flags = 0x6; break; \
11866     case -1: flags = 0x8; break; \
11867     case 1: flags = 0x2; break; \
11868     default: case 2: flags = 0x3; break; \
11869     } \
11870     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11871         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11872 } \
11873 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
11874 { \
11875     uint32_t flags; \
11876     switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
11877     case 0: flags = 0x6; break; \
11878     case -1: flags = 0x8; break; \
11879     case 1: flags = 0x2; break; \
11880     default: case 2: flags = 0x3; break; \
11881     } \
11882     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
11883         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
11884 }
11885 DO_VFP_cmp(s, float32)
11886 DO_VFP_cmp(d, float64)
11887 #undef DO_VFP_cmp
11888 
11889 /* Integer to float and float to integer conversions */
11890 
11891 #define CONV_ITOF(name, ftype, fsz, sign)                           \
11892 ftype HELPER(name)(uint32_t x, void *fpstp)                         \
11893 {                                                                   \
11894     float_status *fpst = fpstp;                                     \
11895     return sign##int32_to_##float##fsz((sign##int32_t)x, fpst);     \
11896 }
11897 
11898 #define CONV_FTOI(name, ftype, fsz, sign, round)                \
11899 sign##int32_t HELPER(name)(ftype x, void *fpstp)                \
11900 {                                                               \
11901     float_status *fpst = fpstp;                                 \
11902     if (float##fsz##_is_any_nan(x)) {                           \
11903         float_raise(float_flag_invalid, fpst);                  \
11904         return 0;                                               \
11905     }                                                           \
11906     return float##fsz##_to_##sign##int32##round(x, fpst);       \
11907 }
11908 
11909 #define FLOAT_CONVS(name, p, ftype, fsz, sign)            \
11910     CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign)        \
11911     CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, )        \
11912     CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero)
11913 
11914 FLOAT_CONVS(si, h, uint32_t, 16, )
11915 FLOAT_CONVS(si, s, float32, 32, )
11916 FLOAT_CONVS(si, d, float64, 64, )
11917 FLOAT_CONVS(ui, h, uint32_t, 16, u)
11918 FLOAT_CONVS(ui, s, float32, 32, u)
11919 FLOAT_CONVS(ui, d, float64, 64, u)
11920 
11921 #undef CONV_ITOF
11922 #undef CONV_FTOI
11923 #undef FLOAT_CONVS
11924 
11925 /* floating point conversion */
11926 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
11927 {
11928     return float32_to_float64(x, &env->vfp.fp_status);
11929 }
11930 
11931 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
11932 {
11933     return float64_to_float32(x, &env->vfp.fp_status);
11934 }
11935 
11936 /* VFP3 fixed point conversion.  */
11937 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
11938 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t  x, uint32_t shift, \
11939                                      void *fpstp) \
11940 { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); }
11941 
11942 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff)   \
11943 uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \
11944                                             void *fpst)                   \
11945 {                                                                         \
11946     if (unlikely(float##fsz##_is_any_nan(x))) {                           \
11947         float_raise(float_flag_invalid, fpst);                            \
11948         return 0;                                                         \
11949     }                                                                     \
11950     return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst);       \
11951 }
11952 
11953 #define VFP_CONV_FIX(name, p, fsz, isz, itype)                   \
11954 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
11955 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype,               \
11956                          float_round_to_zero, _round_to_zero)    \
11957 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype,               \
11958                          get_float_rounding_mode(fpst), )
11959 
11960 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype)               \
11961 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
11962 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype,               \
11963                          get_float_rounding_mode(fpst), )
11964 
11965 VFP_CONV_FIX(sh, d, 64, 64, int16)
11966 VFP_CONV_FIX(sl, d, 64, 64, int32)
11967 VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
11968 VFP_CONV_FIX(uh, d, 64, 64, uint16)
11969 VFP_CONV_FIX(ul, d, 64, 64, uint32)
11970 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
11971 VFP_CONV_FIX(sh, s, 32, 32, int16)
11972 VFP_CONV_FIX(sl, s, 32, 32, int32)
11973 VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
11974 VFP_CONV_FIX(uh, s, 32, 32, uint16)
11975 VFP_CONV_FIX(ul, s, 32, 32, uint32)
11976 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
11977 
11978 #undef VFP_CONV_FIX
11979 #undef VFP_CONV_FIX_FLOAT
11980 #undef VFP_CONV_FLOAT_FIX_ROUND
11981 #undef VFP_CONV_FIX_A64
11982 
11983 uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst)
11984 {
11985     return int32_to_float16_scalbn(x, -shift, fpst);
11986 }
11987 
11988 uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst)
11989 {
11990     return uint32_to_float16_scalbn(x, -shift, fpst);
11991 }
11992 
11993 uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst)
11994 {
11995     return int64_to_float16_scalbn(x, -shift, fpst);
11996 }
11997 
11998 uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst)
11999 {
12000     return uint64_to_float16_scalbn(x, -shift, fpst);
12001 }
12002 
12003 uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst)
12004 {
12005     if (unlikely(float16_is_any_nan(x))) {
12006         float_raise(float_flag_invalid, fpst);
12007         return 0;
12008     }
12009     return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst),
12010                                    shift, fpst);
12011 }
12012 
12013 uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst)
12014 {
12015     if (unlikely(float16_is_any_nan(x))) {
12016         float_raise(float_flag_invalid, fpst);
12017         return 0;
12018     }
12019     return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst),
12020                                     shift, fpst);
12021 }
12022 
12023 uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst)
12024 {
12025     if (unlikely(float16_is_any_nan(x))) {
12026         float_raise(float_flag_invalid, fpst);
12027         return 0;
12028     }
12029     return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst),
12030                                    shift, fpst);
12031 }
12032 
12033 uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst)
12034 {
12035     if (unlikely(float16_is_any_nan(x))) {
12036         float_raise(float_flag_invalid, fpst);
12037         return 0;
12038     }
12039     return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst),
12040                                     shift, fpst);
12041 }
12042 
12043 uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst)
12044 {
12045     if (unlikely(float16_is_any_nan(x))) {
12046         float_raise(float_flag_invalid, fpst);
12047         return 0;
12048     }
12049     return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst),
12050                                    shift, fpst);
12051 }
12052 
12053 uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst)
12054 {
12055     if (unlikely(float16_is_any_nan(x))) {
12056         float_raise(float_flag_invalid, fpst);
12057         return 0;
12058     }
12059     return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst),
12060                                     shift, fpst);
12061 }
12062 
12063 /* Set the current fp rounding mode and return the old one.
12064  * The argument is a softfloat float_round_ value.
12065  */
12066 uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp)
12067 {
12068     float_status *fp_status = fpstp;
12069 
12070     uint32_t prev_rmode = get_float_rounding_mode(fp_status);
12071     set_float_rounding_mode(rmode, fp_status);
12072 
12073     return prev_rmode;
12074 }
12075 
12076 /* Set the current fp rounding mode in the standard fp status and return
12077  * the old one. This is for NEON instructions that need to change the
12078  * rounding mode but wish to use the standard FPSCR values for everything
12079  * else. Always set the rounding mode back to the correct value after
12080  * modifying it.
12081  * The argument is a softfloat float_round_ value.
12082  */
12083 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
12084 {
12085     float_status *fp_status = &env->vfp.standard_fp_status;
12086 
12087     uint32_t prev_rmode = get_float_rounding_mode(fp_status);
12088     set_float_rounding_mode(rmode, fp_status);
12089 
12090     return prev_rmode;
12091 }
12092 
12093 /* Half precision conversions.  */
12094 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode)
12095 {
12096     /* Squash FZ16 to 0 for the duration of conversion.  In this case,
12097      * it would affect flushing input denormals.
12098      */
12099     float_status *fpst = fpstp;
12100     flag save = get_flush_inputs_to_zero(fpst);
12101     set_flush_inputs_to_zero(false, fpst);
12102     float32 r = float16_to_float32(a, !ahp_mode, fpst);
12103     set_flush_inputs_to_zero(save, fpst);
12104     return r;
12105 }
12106 
12107 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode)
12108 {
12109     /* Squash FZ16 to 0 for the duration of conversion.  In this case,
12110      * it would affect flushing output denormals.
12111      */
12112     float_status *fpst = fpstp;
12113     flag save = get_flush_to_zero(fpst);
12114     set_flush_to_zero(false, fpst);
12115     float16 r = float32_to_float16(a, !ahp_mode, fpst);
12116     set_flush_to_zero(save, fpst);
12117     return r;
12118 }
12119 
12120 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode)
12121 {
12122     /* Squash FZ16 to 0 for the duration of conversion.  In this case,
12123      * it would affect flushing input denormals.
12124      */
12125     float_status *fpst = fpstp;
12126     flag save = get_flush_inputs_to_zero(fpst);
12127     set_flush_inputs_to_zero(false, fpst);
12128     float64 r = float16_to_float64(a, !ahp_mode, fpst);
12129     set_flush_inputs_to_zero(save, fpst);
12130     return r;
12131 }
12132 
12133 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode)
12134 {
12135     /* Squash FZ16 to 0 for the duration of conversion.  In this case,
12136      * it would affect flushing output denormals.
12137      */
12138     float_status *fpst = fpstp;
12139     flag save = get_flush_to_zero(fpst);
12140     set_flush_to_zero(false, fpst);
12141     float16 r = float64_to_float16(a, !ahp_mode, fpst);
12142     set_flush_to_zero(save, fpst);
12143     return r;
12144 }
12145 
12146 #define float32_two make_float32(0x40000000)
12147 #define float32_three make_float32(0x40400000)
12148 #define float32_one_point_five make_float32(0x3fc00000)
12149 
12150 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
12151 {
12152     float_status *s = &env->vfp.standard_fp_status;
12153     if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
12154         (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
12155         if (!(float32_is_zero(a) || float32_is_zero(b))) {
12156             float_raise(float_flag_input_denormal, s);
12157         }
12158         return float32_two;
12159     }
12160     return float32_sub(float32_two, float32_mul(a, b, s), s);
12161 }
12162 
12163 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
12164 {
12165     float_status *s = &env->vfp.standard_fp_status;
12166     float32 product;
12167     if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
12168         (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
12169         if (!(float32_is_zero(a) || float32_is_zero(b))) {
12170             float_raise(float_flag_input_denormal, s);
12171         }
12172         return float32_one_point_five;
12173     }
12174     product = float32_mul(a, b, s);
12175     return float32_div(float32_sub(float32_three, product, s), float32_two, s);
12176 }
12177 
12178 /* NEON helpers.  */
12179 
12180 /* Constants 256 and 512 are used in some helpers; we avoid relying on
12181  * int->float conversions at run-time.  */
12182 #define float64_256 make_float64(0x4070000000000000LL)
12183 #define float64_512 make_float64(0x4080000000000000LL)
12184 #define float16_maxnorm make_float16(0x7bff)
12185 #define float32_maxnorm make_float32(0x7f7fffff)
12186 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
12187 
12188 /* Reciprocal functions
12189  *
12190  * The algorithm that must be used to calculate the estimate
12191  * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate
12192  */
12193 
12194 /* See RecipEstimate()
12195  *
12196  * input is a 9 bit fixed point number
12197  * input range 256 .. 511 for a number from 0.5 <= x < 1.0.
12198  * result range 256 .. 511 for a number from 1.0 to 511/256.
12199  */
12200 
12201 static int recip_estimate(int input)
12202 {
12203     int a, b, r;
12204     assert(256 <= input && input < 512);
12205     a = (input * 2) + 1;
12206     b = (1 << 19) / a;
12207     r = (b + 1) >> 1;
12208     assert(256 <= r && r < 512);
12209     return r;
12210 }
12211 
12212 /*
12213  * Common wrapper to call recip_estimate
12214  *
12215  * The parameters are exponent and 64 bit fraction (without implicit
12216  * bit) where the binary point is nominally at bit 52. Returns a
12217  * float64 which can then be rounded to the appropriate size by the
12218  * callee.
12219  */
12220 
12221 static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac)
12222 {
12223     uint32_t scaled, estimate;
12224     uint64_t result_frac;
12225     int result_exp;
12226 
12227     /* Handle sub-normals */
12228     if (*exp == 0) {
12229         if (extract64(frac, 51, 1) == 0) {
12230             *exp = -1;
12231             frac <<= 2;
12232         } else {
12233             frac <<= 1;
12234         }
12235     }
12236 
12237     /* scaled = UInt('1':fraction<51:44>) */
12238     scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
12239     estimate = recip_estimate(scaled);
12240 
12241     result_exp = exp_off - *exp;
12242     result_frac = deposit64(0, 44, 8, estimate);
12243     if (result_exp == 0) {
12244         result_frac = deposit64(result_frac >> 1, 51, 1, 1);
12245     } else if (result_exp == -1) {
12246         result_frac = deposit64(result_frac >> 2, 50, 2, 1);
12247         result_exp = 0;
12248     }
12249 
12250     *exp = result_exp;
12251 
12252     return result_frac;
12253 }
12254 
12255 static bool round_to_inf(float_status *fpst, bool sign_bit)
12256 {
12257     switch (fpst->float_rounding_mode) {
12258     case float_round_nearest_even: /* Round to Nearest */
12259         return true;
12260     case float_round_up: /* Round to +Inf */
12261         return !sign_bit;
12262     case float_round_down: /* Round to -Inf */
12263         return sign_bit;
12264     case float_round_to_zero: /* Round to Zero */
12265         return false;
12266     }
12267 
12268     g_assert_not_reached();
12269 }
12270 
12271 uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp)
12272 {
12273     float_status *fpst = fpstp;
12274     float16 f16 = float16_squash_input_denormal(input, fpst);
12275     uint32_t f16_val = float16_val(f16);
12276     uint32_t f16_sign = float16_is_neg(f16);
12277     int f16_exp = extract32(f16_val, 10, 5);
12278     uint32_t f16_frac = extract32(f16_val, 0, 10);
12279     uint64_t f64_frac;
12280 
12281     if (float16_is_any_nan(f16)) {
12282         float16 nan = f16;
12283         if (float16_is_signaling_nan(f16, fpst)) {
12284             float_raise(float_flag_invalid, fpst);
12285             nan = float16_silence_nan(f16, fpst);
12286         }
12287         if (fpst->default_nan_mode) {
12288             nan =  float16_default_nan(fpst);
12289         }
12290         return nan;
12291     } else if (float16_is_infinity(f16)) {
12292         return float16_set_sign(float16_zero, float16_is_neg(f16));
12293     } else if (float16_is_zero(f16)) {
12294         float_raise(float_flag_divbyzero, fpst);
12295         return float16_set_sign(float16_infinity, float16_is_neg(f16));
12296     } else if (float16_abs(f16) < (1 << 8)) {
12297         /* Abs(value) < 2.0^-16 */
12298         float_raise(float_flag_overflow | float_flag_inexact, fpst);
12299         if (round_to_inf(fpst, f16_sign)) {
12300             return float16_set_sign(float16_infinity, f16_sign);
12301         } else {
12302             return float16_set_sign(float16_maxnorm, f16_sign);
12303         }
12304     } else if (f16_exp >= 29 && fpst->flush_to_zero) {
12305         float_raise(float_flag_underflow, fpst);
12306         return float16_set_sign(float16_zero, float16_is_neg(f16));
12307     }
12308 
12309     f64_frac = call_recip_estimate(&f16_exp, 29,
12310                                    ((uint64_t) f16_frac) << (52 - 10));
12311 
12312     /* result = sign : result_exp<4:0> : fraction<51:42> */
12313     f16_val = deposit32(0, 15, 1, f16_sign);
12314     f16_val = deposit32(f16_val, 10, 5, f16_exp);
12315     f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10));
12316     return make_float16(f16_val);
12317 }
12318 
12319 float32 HELPER(recpe_f32)(float32 input, void *fpstp)
12320 {
12321     float_status *fpst = fpstp;
12322     float32 f32 = float32_squash_input_denormal(input, fpst);
12323     uint32_t f32_val = float32_val(f32);
12324     bool f32_sign = float32_is_neg(f32);
12325     int f32_exp = extract32(f32_val, 23, 8);
12326     uint32_t f32_frac = extract32(f32_val, 0, 23);
12327     uint64_t f64_frac;
12328 
12329     if (float32_is_any_nan(f32)) {
12330         float32 nan = f32;
12331         if (float32_is_signaling_nan(f32, fpst)) {
12332             float_raise(float_flag_invalid, fpst);
12333             nan = float32_silence_nan(f32, fpst);
12334         }
12335         if (fpst->default_nan_mode) {
12336             nan =  float32_default_nan(fpst);
12337         }
12338         return nan;
12339     } else if (float32_is_infinity(f32)) {
12340         return float32_set_sign(float32_zero, float32_is_neg(f32));
12341     } else if (float32_is_zero(f32)) {
12342         float_raise(float_flag_divbyzero, fpst);
12343         return float32_set_sign(float32_infinity, float32_is_neg(f32));
12344     } else if (float32_abs(f32) < (1ULL << 21)) {
12345         /* Abs(value) < 2.0^-128 */
12346         float_raise(float_flag_overflow | float_flag_inexact, fpst);
12347         if (round_to_inf(fpst, f32_sign)) {
12348             return float32_set_sign(float32_infinity, f32_sign);
12349         } else {
12350             return float32_set_sign(float32_maxnorm, f32_sign);
12351         }
12352     } else if (f32_exp >= 253 && fpst->flush_to_zero) {
12353         float_raise(float_flag_underflow, fpst);
12354         return float32_set_sign(float32_zero, float32_is_neg(f32));
12355     }
12356 
12357     f64_frac = call_recip_estimate(&f32_exp, 253,
12358                                    ((uint64_t) f32_frac) << (52 - 23));
12359 
12360     /* result = sign : result_exp<7:0> : fraction<51:29> */
12361     f32_val = deposit32(0, 31, 1, f32_sign);
12362     f32_val = deposit32(f32_val, 23, 8, f32_exp);
12363     f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23));
12364     return make_float32(f32_val);
12365 }
12366 
12367 float64 HELPER(recpe_f64)(float64 input, void *fpstp)
12368 {
12369     float_status *fpst = fpstp;
12370     float64 f64 = float64_squash_input_denormal(input, fpst);
12371     uint64_t f64_val = float64_val(f64);
12372     bool f64_sign = float64_is_neg(f64);
12373     int f64_exp = extract64(f64_val, 52, 11);
12374     uint64_t f64_frac = extract64(f64_val, 0, 52);
12375 
12376     /* Deal with any special cases */
12377     if (float64_is_any_nan(f64)) {
12378         float64 nan = f64;
12379         if (float64_is_signaling_nan(f64, fpst)) {
12380             float_raise(float_flag_invalid, fpst);
12381             nan = float64_silence_nan(f64, fpst);
12382         }
12383         if (fpst->default_nan_mode) {
12384             nan =  float64_default_nan(fpst);
12385         }
12386         return nan;
12387     } else if (float64_is_infinity(f64)) {
12388         return float64_set_sign(float64_zero, float64_is_neg(f64));
12389     } else if (float64_is_zero(f64)) {
12390         float_raise(float_flag_divbyzero, fpst);
12391         return float64_set_sign(float64_infinity, float64_is_neg(f64));
12392     } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
12393         /* Abs(value) < 2.0^-1024 */
12394         float_raise(float_flag_overflow | float_flag_inexact, fpst);
12395         if (round_to_inf(fpst, f64_sign)) {
12396             return float64_set_sign(float64_infinity, f64_sign);
12397         } else {
12398             return float64_set_sign(float64_maxnorm, f64_sign);
12399         }
12400     } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
12401         float_raise(float_flag_underflow, fpst);
12402         return float64_set_sign(float64_zero, float64_is_neg(f64));
12403     }
12404 
12405     f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac);
12406 
12407     /* result = sign : result_exp<10:0> : fraction<51:0>; */
12408     f64_val = deposit64(0, 63, 1, f64_sign);
12409     f64_val = deposit64(f64_val, 52, 11, f64_exp);
12410     f64_val = deposit64(f64_val, 0, 52, f64_frac);
12411     return make_float64(f64_val);
12412 }
12413 
12414 /* The algorithm that must be used to calculate the estimate
12415  * is specified by the ARM ARM.
12416  */
12417 
12418 static int do_recip_sqrt_estimate(int a)
12419 {
12420     int b, estimate;
12421 
12422     assert(128 <= a && a < 512);
12423     if (a < 256) {
12424         a = a * 2 + 1;
12425     } else {
12426         a = (a >> 1) << 1;
12427         a = (a + 1) * 2;
12428     }
12429     b = 512;
12430     while (a * (b + 1) * (b + 1) < (1 << 28)) {
12431         b += 1;
12432     }
12433     estimate = (b + 1) / 2;
12434     assert(256 <= estimate && estimate < 512);
12435 
12436     return estimate;
12437 }
12438 
12439 
12440 static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac)
12441 {
12442     int estimate;
12443     uint32_t scaled;
12444 
12445     if (*exp == 0) {
12446         while (extract64(frac, 51, 1) == 0) {
12447             frac = frac << 1;
12448             *exp -= 1;
12449         }
12450         frac = extract64(frac, 0, 51) << 1;
12451     }
12452 
12453     if (*exp & 1) {
12454         /* scaled = UInt('01':fraction<51:45>) */
12455         scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7));
12456     } else {
12457         /* scaled = UInt('1':fraction<51:44>) */
12458         scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8));
12459     }
12460     estimate = do_recip_sqrt_estimate(scaled);
12461 
12462     *exp = (exp_off - *exp) / 2;
12463     return extract64(estimate, 0, 8) << 44;
12464 }
12465 
12466 uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp)
12467 {
12468     float_status *s = fpstp;
12469     float16 f16 = float16_squash_input_denormal(input, s);
12470     uint16_t val = float16_val(f16);
12471     bool f16_sign = float16_is_neg(f16);
12472     int f16_exp = extract32(val, 10, 5);
12473     uint16_t f16_frac = extract32(val, 0, 10);
12474     uint64_t f64_frac;
12475 
12476     if (float16_is_any_nan(f16)) {
12477         float16 nan = f16;
12478         if (float16_is_signaling_nan(f16, s)) {
12479             float_raise(float_flag_invalid, s);
12480             nan = float16_silence_nan(f16, s);
12481         }
12482         if (s->default_nan_mode) {
12483             nan =  float16_default_nan(s);
12484         }
12485         return nan;
12486     } else if (float16_is_zero(f16)) {
12487         float_raise(float_flag_divbyzero, s);
12488         return float16_set_sign(float16_infinity, f16_sign);
12489     } else if (f16_sign) {
12490         float_raise(float_flag_invalid, s);
12491         return float16_default_nan(s);
12492     } else if (float16_is_infinity(f16)) {
12493         return float16_zero;
12494     }
12495 
12496     /* Scale and normalize to a double-precision value between 0.25 and 1.0,
12497      * preserving the parity of the exponent.  */
12498 
12499     f64_frac = ((uint64_t) f16_frac) << (52 - 10);
12500 
12501     f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac);
12502 
12503     /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */
12504     val = deposit32(0, 15, 1, f16_sign);
12505     val = deposit32(val, 10, 5, f16_exp);
12506     val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8));
12507     return make_float16(val);
12508 }
12509 
12510 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
12511 {
12512     float_status *s = fpstp;
12513     float32 f32 = float32_squash_input_denormal(input, s);
12514     uint32_t val = float32_val(f32);
12515     uint32_t f32_sign = float32_is_neg(f32);
12516     int f32_exp = extract32(val, 23, 8);
12517     uint32_t f32_frac = extract32(val, 0, 23);
12518     uint64_t f64_frac;
12519 
12520     if (float32_is_any_nan(f32)) {
12521         float32 nan = f32;
12522         if (float32_is_signaling_nan(f32, s)) {
12523             float_raise(float_flag_invalid, s);
12524             nan = float32_silence_nan(f32, s);
12525         }
12526         if (s->default_nan_mode) {
12527             nan =  float32_default_nan(s);
12528         }
12529         return nan;
12530     } else if (float32_is_zero(f32)) {
12531         float_raise(float_flag_divbyzero, s);
12532         return float32_set_sign(float32_infinity, float32_is_neg(f32));
12533     } else if (float32_is_neg(f32)) {
12534         float_raise(float_flag_invalid, s);
12535         return float32_default_nan(s);
12536     } else if (float32_is_infinity(f32)) {
12537         return float32_zero;
12538     }
12539 
12540     /* Scale and normalize to a double-precision value between 0.25 and 1.0,
12541      * preserving the parity of the exponent.  */
12542 
12543     f64_frac = ((uint64_t) f32_frac) << 29;
12544 
12545     f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac);
12546 
12547     /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */
12548     val = deposit32(0, 31, 1, f32_sign);
12549     val = deposit32(val, 23, 8, f32_exp);
12550     val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8));
12551     return make_float32(val);
12552 }
12553 
12554 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
12555 {
12556     float_status *s = fpstp;
12557     float64 f64 = float64_squash_input_denormal(input, s);
12558     uint64_t val = float64_val(f64);
12559     bool f64_sign = float64_is_neg(f64);
12560     int f64_exp = extract64(val, 52, 11);
12561     uint64_t f64_frac = extract64(val, 0, 52);
12562 
12563     if (float64_is_any_nan(f64)) {
12564         float64 nan = f64;
12565         if (float64_is_signaling_nan(f64, s)) {
12566             float_raise(float_flag_invalid, s);
12567             nan = float64_silence_nan(f64, s);
12568         }
12569         if (s->default_nan_mode) {
12570             nan =  float64_default_nan(s);
12571         }
12572         return nan;
12573     } else if (float64_is_zero(f64)) {
12574         float_raise(float_flag_divbyzero, s);
12575         return float64_set_sign(float64_infinity, float64_is_neg(f64));
12576     } else if (float64_is_neg(f64)) {
12577         float_raise(float_flag_invalid, s);
12578         return float64_default_nan(s);
12579     } else if (float64_is_infinity(f64)) {
12580         return float64_zero;
12581     }
12582 
12583     f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac);
12584 
12585     /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */
12586     val = deposit64(0, 61, 1, f64_sign);
12587     val = deposit64(val, 52, 11, f64_exp);
12588     val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8));
12589     return make_float64(val);
12590 }
12591 
12592 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
12593 {
12594     /* float_status *s = fpstp; */
12595     int input, estimate;
12596 
12597     if ((a & 0x80000000) == 0) {
12598         return 0xffffffff;
12599     }
12600 
12601     input = extract32(a, 23, 9);
12602     estimate = recip_estimate(input);
12603 
12604     return deposit32(0, (32 - 9), 9, estimate);
12605 }
12606 
12607 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
12608 {
12609     int estimate;
12610 
12611     if ((a & 0xc0000000) == 0) {
12612         return 0xffffffff;
12613     }
12614 
12615     estimate = do_recip_sqrt_estimate(extract32(a, 23, 9));
12616 
12617     return deposit32(0, 23, 9, estimate);
12618 }
12619 
12620 /* VFPv4 fused multiply-accumulate */
12621 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
12622 {
12623     float_status *fpst = fpstp;
12624     return float32_muladd(a, b, c, 0, fpst);
12625 }
12626 
12627 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
12628 {
12629     float_status *fpst = fpstp;
12630     return float64_muladd(a, b, c, 0, fpst);
12631 }
12632 
12633 /* ARMv8 round to integral */
12634 float32 HELPER(rints_exact)(float32 x, void *fp_status)
12635 {
12636     return float32_round_to_int(x, fp_status);
12637 }
12638 
12639 float64 HELPER(rintd_exact)(float64 x, void *fp_status)
12640 {
12641     return float64_round_to_int(x, fp_status);
12642 }
12643 
12644 float32 HELPER(rints)(float32 x, void *fp_status)
12645 {
12646     int old_flags = get_float_exception_flags(fp_status), new_flags;
12647     float32 ret;
12648 
12649     ret = float32_round_to_int(x, fp_status);
12650 
12651     /* Suppress any inexact exceptions the conversion produced */
12652     if (!(old_flags & float_flag_inexact)) {
12653         new_flags = get_float_exception_flags(fp_status);
12654         set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
12655     }
12656 
12657     return ret;
12658 }
12659 
12660 float64 HELPER(rintd)(float64 x, void *fp_status)
12661 {
12662     int old_flags = get_float_exception_flags(fp_status), new_flags;
12663     float64 ret;
12664 
12665     ret = float64_round_to_int(x, fp_status);
12666 
12667     new_flags = get_float_exception_flags(fp_status);
12668 
12669     /* Suppress any inexact exceptions the conversion produced */
12670     if (!(old_flags & float_flag_inexact)) {
12671         new_flags = get_float_exception_flags(fp_status);
12672         set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
12673     }
12674 
12675     return ret;
12676 }
12677 
12678 /* Convert ARM rounding mode to softfloat */
12679 int arm_rmode_to_sf(int rmode)
12680 {
12681     switch (rmode) {
12682     case FPROUNDING_TIEAWAY:
12683         rmode = float_round_ties_away;
12684         break;
12685     case FPROUNDING_ODD:
12686         /* FIXME: add support for TIEAWAY and ODD */
12687         qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
12688                       rmode);
12689         /* fall through for now */
12690     case FPROUNDING_TIEEVEN:
12691     default:
12692         rmode = float_round_nearest_even;
12693         break;
12694     case FPROUNDING_POSINF:
12695         rmode = float_round_up;
12696         break;
12697     case FPROUNDING_NEGINF:
12698         rmode = float_round_down;
12699         break;
12700     case FPROUNDING_ZERO:
12701         rmode = float_round_to_zero;
12702         break;
12703     }
12704     return rmode;
12705 }
12706 
12707 /* CRC helpers.
12708  * The upper bytes of val (above the number specified by 'bytes') must have
12709  * been zeroed out by the caller.
12710  */
12711 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12712 {
12713     uint8_t buf[4];
12714 
12715     stl_le_p(buf, val);
12716 
12717     /* zlib crc32 converts the accumulator and output to one's complement.  */
12718     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12719 }
12720 
12721 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12722 {
12723     uint8_t buf[4];
12724 
12725     stl_le_p(buf, val);
12726 
12727     /* Linux crc32c converts the output to one's complement.  */
12728     return crc32c(acc, buf, bytes) ^ 0xffffffff;
12729 }
12730 
12731 /* Return the exception level to which FP-disabled exceptions should
12732  * be taken, or 0 if FP is enabled.
12733  */
12734 int fp_exception_el(CPUARMState *env, int cur_el)
12735 {
12736 #ifndef CONFIG_USER_ONLY
12737     int fpen;
12738 
12739     /* CPACR and the CPTR registers don't exist before v6, so FP is
12740      * always accessible
12741      */
12742     if (!arm_feature(env, ARM_FEATURE_V6)) {
12743         return 0;
12744     }
12745 
12746     /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12747      * 0, 2 : trap EL0 and EL1/PL1 accesses
12748      * 1    : trap only EL0 accesses
12749      * 3    : trap no accesses
12750      */
12751     fpen = extract32(env->cp15.cpacr_el1, 20, 2);
12752     switch (fpen) {
12753     case 0:
12754     case 2:
12755         if (cur_el == 0 || cur_el == 1) {
12756             /* Trap to PL1, which might be EL1 or EL3 */
12757             if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
12758                 return 3;
12759             }
12760             return 1;
12761         }
12762         if (cur_el == 3 && !is_a64(env)) {
12763             /* Secure PL1 running at EL3 */
12764             return 3;
12765         }
12766         break;
12767     case 1:
12768         if (cur_el == 0) {
12769             return 1;
12770         }
12771         break;
12772     case 3:
12773         break;
12774     }
12775 
12776     /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12777      * check because zero bits in the registers mean "don't trap".
12778      */
12779 
12780     /* CPTR_EL2 : present in v7VE or v8 */
12781     if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
12782         && !arm_is_secure_below_el3(env)) {
12783         /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12784         return 2;
12785     }
12786 
12787     /* CPTR_EL3 : present in v8 */
12788     if (extract32(env->cp15.cptr_el[3], 10, 1)) {
12789         /* Trap all FP ops to EL3 */
12790         return 3;
12791     }
12792 #endif
12793     return 0;
12794 }
12795 
12796 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
12797                           target_ulong *cs_base, uint32_t *pflags)
12798 {
12799     ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
12800     int current_el = arm_current_el(env);
12801     int fp_el = fp_exception_el(env, current_el);
12802     uint32_t flags;
12803 
12804     if (is_a64(env)) {
12805         ARMCPU *cpu = arm_env_get_cpu(env);
12806 
12807         *pc = env->pc;
12808         flags = ARM_TBFLAG_AARCH64_STATE_MASK;
12809         /* Get control bits for tagged addresses */
12810         flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
12811         flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
12812 
12813         if (cpu_isar_feature(aa64_sve, cpu)) {
12814             int sve_el = sve_exception_el(env, current_el);
12815             uint32_t zcr_len;
12816 
12817             /* If SVE is disabled, but FP is enabled,
12818              * then the effective len is 0.
12819              */
12820             if (sve_el != 0 && fp_el == 0) {
12821                 zcr_len = 0;
12822             } else {
12823                 zcr_len = sve_zcr_len_for_el(env, current_el);
12824             }
12825             flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT;
12826             flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT;
12827         }
12828     } else {
12829         *pc = env->regs[15];
12830         flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
12831             | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
12832             | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
12833             | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
12834             | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
12835         if (!(access_secure_reg(env))) {
12836             flags |= ARM_TBFLAG_NS_MASK;
12837         }
12838         if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
12839             || arm_el_is_aa64(env, 1)) {
12840             flags |= ARM_TBFLAG_VFPEN_MASK;
12841         }
12842         flags |= (extract32(env->cp15.c15_cpar, 0, 2)
12843                   << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
12844     }
12845 
12846     flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
12847 
12848     /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12849      * states defined in the ARM ARM for software singlestep:
12850      *  SS_ACTIVE   PSTATE.SS   State
12851      *     0            x       Inactive (the TB flag for SS is always 0)
12852      *     1            0       Active-pending
12853      *     1            1       Active-not-pending
12854      */
12855     if (arm_singlestep_active(env)) {
12856         flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
12857         if (is_a64(env)) {
12858             if (env->pstate & PSTATE_SS) {
12859                 flags |= ARM_TBFLAG_PSTATE_SS_MASK;
12860             }
12861         } else {
12862             if (env->uncached_cpsr & PSTATE_SS) {
12863                 flags |= ARM_TBFLAG_PSTATE_SS_MASK;
12864             }
12865         }
12866     }
12867     if (arm_cpu_data_is_big_endian(env)) {
12868         flags |= ARM_TBFLAG_BE_DATA_MASK;
12869     }
12870     flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT;
12871 
12872     if (arm_v7m_is_handler_mode(env)) {
12873         flags |= ARM_TBFLAG_HANDLER_MASK;
12874     }
12875 
12876     /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
12877      * suppressing them because the requested execution priority is less than 0.
12878      */
12879     if (arm_feature(env, ARM_FEATURE_V8) &&
12880         arm_feature(env, ARM_FEATURE_M) &&
12881         !((mmu_idx  & ARM_MMU_IDX_M_NEGPRI) &&
12882           (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
12883         flags |= ARM_TBFLAG_STACKCHECK_MASK;
12884     }
12885 
12886     *pflags = flags;
12887     *cs_base = 0;
12888 }
12889 
12890 #ifdef TARGET_AARCH64
12891 /*
12892  * The manual says that when SVE is enabled and VQ is widened the
12893  * implementation is allowed to zero the previously inaccessible
12894  * portion of the registers.  The corollary to that is that when
12895  * SVE is enabled and VQ is narrowed we are also allowed to zero
12896  * the now inaccessible portion of the registers.
12897  *
12898  * The intent of this is that no predicate bit beyond VQ is ever set.
12899  * Which means that some operations on predicate registers themselves
12900  * may operate on full uint64_t or even unrolled across the maximum
12901  * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
12902  * may well be cheaper than conditionals to restrict the operation
12903  * to the relevant portion of a uint16_t[16].
12904  */
12905 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
12906 {
12907     int i, j;
12908     uint64_t pmask;
12909 
12910     assert(vq >= 1 && vq <= ARM_MAX_VQ);
12911     assert(vq <= arm_env_get_cpu(env)->sve_max_vq);
12912 
12913     /* Zap the high bits of the zregs.  */
12914     for (i = 0; i < 32; i++) {
12915         memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
12916     }
12917 
12918     /* Zap the high bits of the pregs and ffr.  */
12919     pmask = 0;
12920     if (vq & 3) {
12921         pmask = ~(-1ULL << (16 * (vq & 3)));
12922     }
12923     for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
12924         for (i = 0; i < 17; ++i) {
12925             env->vfp.pregs[i].p[j] &= pmask;
12926         }
12927         pmask = 0;
12928     }
12929 }
12930 
12931 /*
12932  * Notice a change in SVE vector size when changing EL.
12933  */
12934 void aarch64_sve_change_el(CPUARMState *env, int old_el,
12935                            int new_el, bool el0_a64)
12936 {
12937     ARMCPU *cpu = arm_env_get_cpu(env);
12938     int old_len, new_len;
12939     bool old_a64, new_a64;
12940 
12941     /* Nothing to do if no SVE.  */
12942     if (!cpu_isar_feature(aa64_sve, cpu)) {
12943         return;
12944     }
12945 
12946     /* Nothing to do if FP is disabled in either EL.  */
12947     if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
12948         return;
12949     }
12950 
12951     /*
12952      * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12953      * at ELx, or not available because the EL is in AArch32 state, then
12954      * for all purposes other than a direct read, the ZCR_ELx.LEN field
12955      * has an effective value of 0".
12956      *
12957      * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12958      * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12959      * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
12960      * we already have the correct register contents when encountering the
12961      * vq0->vq0 transition between EL0->EL1.
12962      */
12963     old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
12964     old_len = (old_a64 && !sve_exception_el(env, old_el)
12965                ? sve_zcr_len_for_el(env, old_el) : 0);
12966     new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
12967     new_len = (new_a64 && !sve_exception_el(env, new_el)
12968                ? sve_zcr_len_for_el(env, new_el) : 0);
12969 
12970     /* When changing vector length, clear inaccessible state.  */
12971     if (new_len < old_len) {
12972         aarch64_sve_narrow_vq(env, new_len + 1);
12973     }
12974 }
12975 #endif
12976