xref: /openbmc/qemu/target/arm/helper.c (revision 3f53bc61)
1 #include "qemu/osdep.h"
2 #include "trace.h"
3 #include "cpu.h"
4 #include "internals.h"
5 #include "exec/gdbstub.h"
6 #include "exec/helper-proto.h"
7 #include "qemu/host-utils.h"
8 #include "sysemu/arch_init.h"
9 #include "sysemu/sysemu.h"
10 #include "qemu/bitops.h"
11 #include "qemu/crc32c.h"
12 #include "exec/exec-all.h"
13 #include "exec/cpu_ldst.h"
14 #include "arm_ldst.h"
15 #include <zlib.h> /* For crc32 */
16 #include "exec/semihost.h"
17 #include "sysemu/kvm.h"
18 
19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
20 
21 #ifndef CONFIG_USER_ONLY
22 static bool get_phys_addr(CPUARMState *env, target_ulong address,
23                           int access_type, ARMMMUIdx mmu_idx,
24                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
25                           target_ulong *page_size, uint32_t *fsr,
26                           ARMMMUFaultInfo *fi);
27 
28 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
29                                int access_type, ARMMMUIdx mmu_idx,
30                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
31                                target_ulong *page_size_ptr, uint32_t *fsr,
32                                ARMMMUFaultInfo *fi);
33 
34 /* Definitions for the PMCCNTR and PMCR registers */
35 #define PMCRD   0x8
36 #define PMCRC   0x4
37 #define PMCRE   0x1
38 #endif
39 
40 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
41 {
42     int nregs;
43 
44     /* VFP data registers are always little-endian.  */
45     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
46     if (reg < nregs) {
47         stfq_le_p(buf, env->vfp.regs[reg]);
48         return 8;
49     }
50     if (arm_feature(env, ARM_FEATURE_NEON)) {
51         /* Aliases for Q regs.  */
52         nregs += 16;
53         if (reg < nregs) {
54             stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
55             stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
56             return 16;
57         }
58     }
59     switch (reg - nregs) {
60     case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
61     case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
62     case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
63     }
64     return 0;
65 }
66 
67 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
68 {
69     int nregs;
70 
71     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
72     if (reg < nregs) {
73         env->vfp.regs[reg] = ldfq_le_p(buf);
74         return 8;
75     }
76     if (arm_feature(env, ARM_FEATURE_NEON)) {
77         nregs += 16;
78         if (reg < nregs) {
79             env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
80             env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
81             return 16;
82         }
83     }
84     switch (reg - nregs) {
85     case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
86     case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
87     case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
88     }
89     return 0;
90 }
91 
92 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
93 {
94     switch (reg) {
95     case 0 ... 31:
96         /* 128 bit FP register */
97         stfq_le_p(buf, env->vfp.regs[reg * 2]);
98         stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
99         return 16;
100     case 32:
101         /* FPSR */
102         stl_p(buf, vfp_get_fpsr(env));
103         return 4;
104     case 33:
105         /* FPCR */
106         stl_p(buf, vfp_get_fpcr(env));
107         return 4;
108     default:
109         return 0;
110     }
111 }
112 
113 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
114 {
115     switch (reg) {
116     case 0 ... 31:
117         /* 128 bit FP register */
118         env->vfp.regs[reg * 2] = ldfq_le_p(buf);
119         env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
120         return 16;
121     case 32:
122         /* FPSR */
123         vfp_set_fpsr(env, ldl_p(buf));
124         return 4;
125     case 33:
126         /* FPCR */
127         vfp_set_fpcr(env, ldl_p(buf));
128         return 4;
129     default:
130         return 0;
131     }
132 }
133 
134 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
135 {
136     assert(ri->fieldoffset);
137     if (cpreg_field_is_64bit(ri)) {
138         return CPREG_FIELD64(env, ri);
139     } else {
140         return CPREG_FIELD32(env, ri);
141     }
142 }
143 
144 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
145                       uint64_t value)
146 {
147     assert(ri->fieldoffset);
148     if (cpreg_field_is_64bit(ri)) {
149         CPREG_FIELD64(env, ri) = value;
150     } else {
151         CPREG_FIELD32(env, ri) = value;
152     }
153 }
154 
155 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
156 {
157     return (char *)env + ri->fieldoffset;
158 }
159 
160 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
161 {
162     /* Raw read of a coprocessor register (as needed for migration, etc). */
163     if (ri->type & ARM_CP_CONST) {
164         return ri->resetvalue;
165     } else if (ri->raw_readfn) {
166         return ri->raw_readfn(env, ri);
167     } else if (ri->readfn) {
168         return ri->readfn(env, ri);
169     } else {
170         return raw_read(env, ri);
171     }
172 }
173 
174 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
175                              uint64_t v)
176 {
177     /* Raw write of a coprocessor register (as needed for migration, etc).
178      * Note that constant registers are treated as write-ignored; the
179      * caller should check for success by whether a readback gives the
180      * value written.
181      */
182     if (ri->type & ARM_CP_CONST) {
183         return;
184     } else if (ri->raw_writefn) {
185         ri->raw_writefn(env, ri, v);
186     } else if (ri->writefn) {
187         ri->writefn(env, ri, v);
188     } else {
189         raw_write(env, ri, v);
190     }
191 }
192 
193 static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
194 {
195    /* Return true if the regdef would cause an assertion if you called
196     * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
197     * program bug for it not to have the NO_RAW flag).
198     * NB that returning false here doesn't necessarily mean that calling
199     * read/write_raw_cp_reg() is safe, because we can't distinguish "has
200     * read/write access functions which are safe for raw use" from "has
201     * read/write access functions which have side effects but has forgotten
202     * to provide raw access functions".
203     * The tests here line up with the conditions in read/write_raw_cp_reg()
204     * and assertions in raw_read()/raw_write().
205     */
206     if ((ri->type & ARM_CP_CONST) ||
207         ri->fieldoffset ||
208         ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
209         return false;
210     }
211     return true;
212 }
213 
214 bool write_cpustate_to_list(ARMCPU *cpu)
215 {
216     /* Write the coprocessor state from cpu->env to the (index,value) list. */
217     int i;
218     bool ok = true;
219 
220     for (i = 0; i < cpu->cpreg_array_len; i++) {
221         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
222         const ARMCPRegInfo *ri;
223 
224         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
225         if (!ri) {
226             ok = false;
227             continue;
228         }
229         if (ri->type & ARM_CP_NO_RAW) {
230             continue;
231         }
232         cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
233     }
234     return ok;
235 }
236 
237 bool write_list_to_cpustate(ARMCPU *cpu)
238 {
239     int i;
240     bool ok = true;
241 
242     for (i = 0; i < cpu->cpreg_array_len; i++) {
243         uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
244         uint64_t v = cpu->cpreg_values[i];
245         const ARMCPRegInfo *ri;
246 
247         ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
248         if (!ri) {
249             ok = false;
250             continue;
251         }
252         if (ri->type & ARM_CP_NO_RAW) {
253             continue;
254         }
255         /* Write value and confirm it reads back as written
256          * (to catch read-only registers and partially read-only
257          * registers where the incoming migration value doesn't match)
258          */
259         write_raw_cp_reg(&cpu->env, ri, v);
260         if (read_raw_cp_reg(&cpu->env, ri) != v) {
261             ok = false;
262         }
263     }
264     return ok;
265 }
266 
267 static void add_cpreg_to_list(gpointer key, gpointer opaque)
268 {
269     ARMCPU *cpu = opaque;
270     uint64_t regidx;
271     const ARMCPRegInfo *ri;
272 
273     regidx = *(uint32_t *)key;
274     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
275 
276     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
277         cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
278         /* The value array need not be initialized at this point */
279         cpu->cpreg_array_len++;
280     }
281 }
282 
283 static void count_cpreg(gpointer key, gpointer opaque)
284 {
285     ARMCPU *cpu = opaque;
286     uint64_t regidx;
287     const ARMCPRegInfo *ri;
288 
289     regidx = *(uint32_t *)key;
290     ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
291 
292     if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
293         cpu->cpreg_array_len++;
294     }
295 }
296 
297 static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
298 {
299     uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
300     uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
301 
302     if (aidx > bidx) {
303         return 1;
304     }
305     if (aidx < bidx) {
306         return -1;
307     }
308     return 0;
309 }
310 
311 void init_cpreg_list(ARMCPU *cpu)
312 {
313     /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
314      * Note that we require cpreg_tuples[] to be sorted by key ID.
315      */
316     GList *keys;
317     int arraylen;
318 
319     keys = g_hash_table_get_keys(cpu->cp_regs);
320     keys = g_list_sort(keys, cpreg_key_compare);
321 
322     cpu->cpreg_array_len = 0;
323 
324     g_list_foreach(keys, count_cpreg, cpu);
325 
326     arraylen = cpu->cpreg_array_len;
327     cpu->cpreg_indexes = g_new(uint64_t, arraylen);
328     cpu->cpreg_values = g_new(uint64_t, arraylen);
329     cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
330     cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
331     cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
332     cpu->cpreg_array_len = 0;
333 
334     g_list_foreach(keys, add_cpreg_to_list, cpu);
335 
336     assert(cpu->cpreg_array_len == arraylen);
337 
338     g_list_free(keys);
339 }
340 
341 /*
342  * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
343  * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
344  *
345  * access_el3_aa32ns: Used to check AArch32 register views.
346  * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
347  */
348 static CPAccessResult access_el3_aa32ns(CPUARMState *env,
349                                         const ARMCPRegInfo *ri,
350                                         bool isread)
351 {
352     bool secure = arm_is_secure_below_el3(env);
353 
354     assert(!arm_el_is_aa64(env, 3));
355     if (secure) {
356         return CP_ACCESS_TRAP_UNCATEGORIZED;
357     }
358     return CP_ACCESS_OK;
359 }
360 
361 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
362                                                 const ARMCPRegInfo *ri,
363                                                 bool isread)
364 {
365     if (!arm_el_is_aa64(env, 3)) {
366         return access_el3_aa32ns(env, ri, isread);
367     }
368     return CP_ACCESS_OK;
369 }
370 
371 /* Some secure-only AArch32 registers trap to EL3 if used from
372  * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
373  * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
374  * We assume that the .access field is set to PL1_RW.
375  */
376 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
377                                             const ARMCPRegInfo *ri,
378                                             bool isread)
379 {
380     if (arm_current_el(env) == 3) {
381         return CP_ACCESS_OK;
382     }
383     if (arm_is_secure_below_el3(env)) {
384         return CP_ACCESS_TRAP_EL3;
385     }
386     /* This will be EL1 NS and EL2 NS, which just UNDEF */
387     return CP_ACCESS_TRAP_UNCATEGORIZED;
388 }
389 
390 /* Check for traps to "powerdown debug" registers, which are controlled
391  * by MDCR.TDOSA
392  */
393 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
394                                    bool isread)
395 {
396     int el = arm_current_el(env);
397 
398     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
399         && !arm_is_secure_below_el3(env)) {
400         return CP_ACCESS_TRAP_EL2;
401     }
402     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
403         return CP_ACCESS_TRAP_EL3;
404     }
405     return CP_ACCESS_OK;
406 }
407 
408 /* Check for traps to "debug ROM" registers, which are controlled
409  * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
410  */
411 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
412                                   bool isread)
413 {
414     int el = arm_current_el(env);
415 
416     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
417         && !arm_is_secure_below_el3(env)) {
418         return CP_ACCESS_TRAP_EL2;
419     }
420     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
421         return CP_ACCESS_TRAP_EL3;
422     }
423     return CP_ACCESS_OK;
424 }
425 
426 /* Check for traps to general debug registers, which are controlled
427  * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
428  */
429 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
430                                   bool isread)
431 {
432     int el = arm_current_el(env);
433 
434     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
435         && !arm_is_secure_below_el3(env)) {
436         return CP_ACCESS_TRAP_EL2;
437     }
438     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
439         return CP_ACCESS_TRAP_EL3;
440     }
441     return CP_ACCESS_OK;
442 }
443 
444 /* Check for traps to performance monitor registers, which are controlled
445  * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
446  */
447 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
448                                  bool isread)
449 {
450     int el = arm_current_el(env);
451 
452     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
453         && !arm_is_secure_below_el3(env)) {
454         return CP_ACCESS_TRAP_EL2;
455     }
456     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
457         return CP_ACCESS_TRAP_EL3;
458     }
459     return CP_ACCESS_OK;
460 }
461 
462 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
463 {
464     ARMCPU *cpu = arm_env_get_cpu(env);
465 
466     raw_write(env, ri, value);
467     tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
468 }
469 
470 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
471 {
472     ARMCPU *cpu = arm_env_get_cpu(env);
473 
474     if (raw_read(env, ri) != value) {
475         /* Unlike real hardware the qemu TLB uses virtual addresses,
476          * not modified virtual addresses, so this causes a TLB flush.
477          */
478         tlb_flush(CPU(cpu));
479         raw_write(env, ri, value);
480     }
481 }
482 
483 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
484                              uint64_t value)
485 {
486     ARMCPU *cpu = arm_env_get_cpu(env);
487 
488     if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
489         && !extended_addresses_enabled(env)) {
490         /* For VMSA (when not using the LPAE long descriptor page table
491          * format) this register includes the ASID, so do a TLB flush.
492          * For PMSA it is purely a process ID and no action is needed.
493          */
494         tlb_flush(CPU(cpu));
495     }
496     raw_write(env, ri, value);
497 }
498 
499 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
500                           uint64_t value)
501 {
502     /* Invalidate all (TLBIALL) */
503     ARMCPU *cpu = arm_env_get_cpu(env);
504 
505     tlb_flush(CPU(cpu));
506 }
507 
508 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
509                           uint64_t value)
510 {
511     /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
512     ARMCPU *cpu = arm_env_get_cpu(env);
513 
514     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
515 }
516 
517 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
518                            uint64_t value)
519 {
520     /* Invalidate by ASID (TLBIASID) */
521     ARMCPU *cpu = arm_env_get_cpu(env);
522 
523     tlb_flush(CPU(cpu));
524 }
525 
526 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
527                            uint64_t value)
528 {
529     /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
530     ARMCPU *cpu = arm_env_get_cpu(env);
531 
532     tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
533 }
534 
535 /* IS variants of TLB operations must affect all cores */
536 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
537                              uint64_t value)
538 {
539     CPUState *cs = ENV_GET_CPU(env);
540 
541     tlb_flush_all_cpus_synced(cs);
542 }
543 
544 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
545                              uint64_t value)
546 {
547     CPUState *cs = ENV_GET_CPU(env);
548 
549     tlb_flush_all_cpus_synced(cs);
550 }
551 
552 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
553                              uint64_t value)
554 {
555     CPUState *cs = ENV_GET_CPU(env);
556 
557     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
558 }
559 
560 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
561                              uint64_t value)
562 {
563     CPUState *cs = ENV_GET_CPU(env);
564 
565     tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
566 }
567 
568 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
569                                uint64_t value)
570 {
571     CPUState *cs = ENV_GET_CPU(env);
572 
573     tlb_flush_by_mmuidx(cs,
574                         (1 << ARMMMUIdx_S12NSE1) |
575                         (1 << ARMMMUIdx_S12NSE0) |
576                         (1 << ARMMMUIdx_S2NS));
577 }
578 
579 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
580                                   uint64_t value)
581 {
582     CPUState *cs = ENV_GET_CPU(env);
583 
584     tlb_flush_by_mmuidx_all_cpus_synced(cs,
585                                         (1 << ARMMMUIdx_S12NSE1) |
586                                         (1 << ARMMMUIdx_S12NSE0) |
587                                         (1 << ARMMMUIdx_S2NS));
588 }
589 
590 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
591                             uint64_t value)
592 {
593     /* Invalidate by IPA. This has to invalidate any structures that
594      * contain only stage 2 translation information, but does not need
595      * to apply to structures that contain combined stage 1 and stage 2
596      * translation information.
597      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
598      */
599     CPUState *cs = ENV_GET_CPU(env);
600     uint64_t pageaddr;
601 
602     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
603         return;
604     }
605 
606     pageaddr = sextract64(value << 12, 0, 40);
607 
608     tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
609 }
610 
611 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
612                                uint64_t value)
613 {
614     CPUState *cs = ENV_GET_CPU(env);
615     uint64_t pageaddr;
616 
617     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
618         return;
619     }
620 
621     pageaddr = sextract64(value << 12, 0, 40);
622 
623     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
624                                              (1 << ARMMMUIdx_S2NS));
625 }
626 
627 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
628                               uint64_t value)
629 {
630     CPUState *cs = ENV_GET_CPU(env);
631 
632     tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
633 }
634 
635 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
636                                  uint64_t value)
637 {
638     CPUState *cs = ENV_GET_CPU(env);
639 
640     tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
641 }
642 
643 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
644                               uint64_t value)
645 {
646     CPUState *cs = ENV_GET_CPU(env);
647     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
648 
649     tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
650 }
651 
652 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
653                                  uint64_t value)
654 {
655     CPUState *cs = ENV_GET_CPU(env);
656     uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
657 
658     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
659                                              (1 << ARMMMUIdx_S1E2));
660 }
661 
662 static const ARMCPRegInfo cp_reginfo[] = {
663     /* Define the secure and non-secure FCSE identifier CP registers
664      * separately because there is no secure bank in V8 (no _EL3).  This allows
665      * the secure register to be properly reset and migrated. There is also no
666      * v8 EL1 version of the register so the non-secure instance stands alone.
667      */
668     { .name = "FCSEIDR(NS)",
669       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
670       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
671       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
672       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
673     { .name = "FCSEIDR(S)",
674       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
675       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
676       .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
677       .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
678     /* Define the secure and non-secure context identifier CP registers
679      * separately because there is no secure bank in V8 (no _EL3).  This allows
680      * the secure register to be properly reset and migrated.  In the
681      * non-secure case, the 32-bit register will have reset and migration
682      * disabled during registration as it is handled by the 64-bit instance.
683      */
684     { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
685       .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
686       .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
687       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
688       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
689     { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
690       .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
691       .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
692       .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
693       .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
694     REGINFO_SENTINEL
695 };
696 
697 static const ARMCPRegInfo not_v8_cp_reginfo[] = {
698     /* NB: Some of these registers exist in v8 but with more precise
699      * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
700      */
701     /* MMU Domain access control / MPU write buffer control */
702     { .name = "DACR",
703       .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
704       .access = PL1_RW, .resetvalue = 0,
705       .writefn = dacr_write, .raw_writefn = raw_write,
706       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
707                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
708     /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
709      * For v6 and v5, these mappings are overly broad.
710      */
711     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
712       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
713     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
714       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
715     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
716       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
717     { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
718       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
719     /* Cache maintenance ops; some of this space may be overridden later. */
720     { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
721       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
722       .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
723     REGINFO_SENTINEL
724 };
725 
726 static const ARMCPRegInfo not_v6_cp_reginfo[] = {
727     /* Not all pre-v6 cores implemented this WFI, so this is slightly
728      * over-broad.
729      */
730     { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
731       .access = PL1_W, .type = ARM_CP_WFI },
732     REGINFO_SENTINEL
733 };
734 
735 static const ARMCPRegInfo not_v7_cp_reginfo[] = {
736     /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
737      * is UNPREDICTABLE; we choose to NOP as most implementations do).
738      */
739     { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
740       .access = PL1_W, .type = ARM_CP_WFI },
741     /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
742      * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
743      * OMAPCP will override this space.
744      */
745     { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
746       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
747       .resetvalue = 0 },
748     { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
749       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
750       .resetvalue = 0 },
751     /* v6 doesn't have the cache ID registers but Linux reads them anyway */
752     { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
753       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
754       .resetvalue = 0 },
755     /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
756      * implementing it as RAZ means the "debug architecture version" bits
757      * will read as a reserved value, which should cause Linux to not try
758      * to use the debug hardware.
759      */
760     { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
761       .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
762     /* MMU TLB control. Note that the wildcarding means we cover not just
763      * the unified TLB ops but also the dside/iside/inner-shareable variants.
764      */
765     { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
766       .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
767       .type = ARM_CP_NO_RAW },
768     { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
769       .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
770       .type = ARM_CP_NO_RAW },
771     { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
772       .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
773       .type = ARM_CP_NO_RAW },
774     { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
775       .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
776       .type = ARM_CP_NO_RAW },
777     { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
778       .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
779     { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
780       .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
781     REGINFO_SENTINEL
782 };
783 
784 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
785                         uint64_t value)
786 {
787     uint32_t mask = 0;
788 
789     /* In ARMv8 most bits of CPACR_EL1 are RES0. */
790     if (!arm_feature(env, ARM_FEATURE_V8)) {
791         /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
792          * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
793          * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
794          */
795         if (arm_feature(env, ARM_FEATURE_VFP)) {
796             /* VFP coprocessor: cp10 & cp11 [23:20] */
797             mask |= (1 << 31) | (1 << 30) | (0xf << 20);
798 
799             if (!arm_feature(env, ARM_FEATURE_NEON)) {
800                 /* ASEDIS [31] bit is RAO/WI */
801                 value |= (1 << 31);
802             }
803 
804             /* VFPv3 and upwards with NEON implement 32 double precision
805              * registers (D0-D31).
806              */
807             if (!arm_feature(env, ARM_FEATURE_NEON) ||
808                     !arm_feature(env, ARM_FEATURE_VFP3)) {
809                 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
810                 value |= (1 << 30);
811             }
812         }
813         value &= mask;
814     }
815     env->cp15.cpacr_el1 = value;
816 }
817 
818 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
819                                    bool isread)
820 {
821     if (arm_feature(env, ARM_FEATURE_V8)) {
822         /* Check if CPACR accesses are to be trapped to EL2 */
823         if (arm_current_el(env) == 1 &&
824             (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
825             return CP_ACCESS_TRAP_EL2;
826         /* Check if CPACR accesses are to be trapped to EL3 */
827         } else if (arm_current_el(env) < 3 &&
828                    (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
829             return CP_ACCESS_TRAP_EL3;
830         }
831     }
832 
833     return CP_ACCESS_OK;
834 }
835 
836 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
837                                   bool isread)
838 {
839     /* Check if CPTR accesses are set to trap to EL3 */
840     if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
841         return CP_ACCESS_TRAP_EL3;
842     }
843 
844     return CP_ACCESS_OK;
845 }
846 
847 static const ARMCPRegInfo v6_cp_reginfo[] = {
848     /* prefetch by MVA in v6, NOP in v7 */
849     { .name = "MVA_prefetch",
850       .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
851       .access = PL1_W, .type = ARM_CP_NOP },
852     /* We need to break the TB after ISB to execute self-modifying code
853      * correctly and also to take any pending interrupts immediately.
854      * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
855      */
856     { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
857       .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
858     { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
859       .access = PL0_W, .type = ARM_CP_NOP },
860     { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
861       .access = PL0_W, .type = ARM_CP_NOP },
862     { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
863       .access = PL1_RW,
864       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
865                              offsetof(CPUARMState, cp15.ifar_ns) },
866       .resetvalue = 0, },
867     /* Watchpoint Fault Address Register : should actually only be present
868      * for 1136, 1176, 11MPCore.
869      */
870     { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
871       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
872     { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
873       .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
874       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
875       .resetvalue = 0, .writefn = cpacr_write },
876     REGINFO_SENTINEL
877 };
878 
879 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
880                                    bool isread)
881 {
882     /* Performance monitor registers user accessibility is controlled
883      * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
884      * trapping to EL2 or EL3 for other accesses.
885      */
886     int el = arm_current_el(env);
887 
888     if (el == 0 && !env->cp15.c9_pmuserenr) {
889         return CP_ACCESS_TRAP;
890     }
891     if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
892         && !arm_is_secure_below_el3(env)) {
893         return CP_ACCESS_TRAP_EL2;
894     }
895     if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
896         return CP_ACCESS_TRAP_EL3;
897     }
898 
899     return CP_ACCESS_OK;
900 }
901 
902 #ifndef CONFIG_USER_ONLY
903 
904 static inline bool arm_ccnt_enabled(CPUARMState *env)
905 {
906     /* This does not support checking PMCCFILTR_EL0 register */
907 
908     if (!(env->cp15.c9_pmcr & PMCRE)) {
909         return false;
910     }
911 
912     return true;
913 }
914 
915 void pmccntr_sync(CPUARMState *env)
916 {
917     uint64_t temp_ticks;
918 
919     temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
920                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
921 
922     if (env->cp15.c9_pmcr & PMCRD) {
923         /* Increment once every 64 processor clock cycles */
924         temp_ticks /= 64;
925     }
926 
927     if (arm_ccnt_enabled(env)) {
928         env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
929     }
930 }
931 
932 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
933                        uint64_t value)
934 {
935     pmccntr_sync(env);
936 
937     if (value & PMCRC) {
938         /* The counter has been reset */
939         env->cp15.c15_ccnt = 0;
940     }
941 
942     /* only the DP, X, D and E bits are writable */
943     env->cp15.c9_pmcr &= ~0x39;
944     env->cp15.c9_pmcr |= (value & 0x39);
945 
946     pmccntr_sync(env);
947 }
948 
949 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
950 {
951     uint64_t total_ticks;
952 
953     if (!arm_ccnt_enabled(env)) {
954         /* Counter is disabled, do not change value */
955         return env->cp15.c15_ccnt;
956     }
957 
958     total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
959                            ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
960 
961     if (env->cp15.c9_pmcr & PMCRD) {
962         /* Increment once every 64 processor clock cycles */
963         total_ticks /= 64;
964     }
965     return total_ticks - env->cp15.c15_ccnt;
966 }
967 
968 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
969                          uint64_t value)
970 {
971     /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
972      * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
973      * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
974      * accessed.
975      */
976     env->cp15.c9_pmselr = value & 0x1f;
977 }
978 
979 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
980                         uint64_t value)
981 {
982     uint64_t total_ticks;
983 
984     if (!arm_ccnt_enabled(env)) {
985         /* Counter is disabled, set the absolute value */
986         env->cp15.c15_ccnt = value;
987         return;
988     }
989 
990     total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
991                            ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
992 
993     if (env->cp15.c9_pmcr & PMCRD) {
994         /* Increment once every 64 processor clock cycles */
995         total_ticks /= 64;
996     }
997     env->cp15.c15_ccnt = total_ticks - value;
998 }
999 
1000 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1001                             uint64_t value)
1002 {
1003     uint64_t cur_val = pmccntr_read(env, NULL);
1004 
1005     pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1006 }
1007 
1008 #else /* CONFIG_USER_ONLY */
1009 
1010 void pmccntr_sync(CPUARMState *env)
1011 {
1012 }
1013 
1014 #endif
1015 
1016 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1017                             uint64_t value)
1018 {
1019     pmccntr_sync(env);
1020     env->cp15.pmccfiltr_el0 = value & 0x7E000000;
1021     pmccntr_sync(env);
1022 }
1023 
1024 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1025                             uint64_t value)
1026 {
1027     value &= (1 << 31);
1028     env->cp15.c9_pmcnten |= value;
1029 }
1030 
1031 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1032                              uint64_t value)
1033 {
1034     value &= (1 << 31);
1035     env->cp15.c9_pmcnten &= ~value;
1036 }
1037 
1038 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1039                          uint64_t value)
1040 {
1041     env->cp15.c9_pmovsr &= ~value;
1042 }
1043 
1044 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1045                              uint64_t value)
1046 {
1047     /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1048      * PMSELR value is equal to or greater than the number of implemented
1049      * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1050      */
1051     if (env->cp15.c9_pmselr == 0x1f) {
1052         pmccfiltr_write(env, ri, value);
1053     }
1054 }
1055 
1056 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1057 {
1058     /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1059      * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1060      */
1061     if (env->cp15.c9_pmselr == 0x1f) {
1062         return env->cp15.pmccfiltr_el0;
1063     } else {
1064         return 0;
1065     }
1066 }
1067 
1068 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1069                             uint64_t value)
1070 {
1071     env->cp15.c9_pmuserenr = value & 1;
1072 }
1073 
1074 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1075                              uint64_t value)
1076 {
1077     /* We have no event counters so only the C bit can be changed */
1078     value &= (1 << 31);
1079     env->cp15.c9_pminten |= value;
1080 }
1081 
1082 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1083                              uint64_t value)
1084 {
1085     value &= (1 << 31);
1086     env->cp15.c9_pminten &= ~value;
1087 }
1088 
1089 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1090                        uint64_t value)
1091 {
1092     /* Note that even though the AArch64 view of this register has bits
1093      * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1094      * architectural requirements for bits which are RES0 only in some
1095      * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1096      * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1097      */
1098     raw_write(env, ri, value & ~0x1FULL);
1099 }
1100 
1101 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1102 {
1103     /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1104      * For bits that vary between AArch32/64, code needs to check the
1105      * current execution mode before directly using the feature bit.
1106      */
1107     uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1108 
1109     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1110         valid_mask &= ~SCR_HCE;
1111 
1112         /* On ARMv7, SMD (or SCD as it is called in v7) is only
1113          * supported if EL2 exists. The bit is UNK/SBZP when
1114          * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1115          * when EL2 is unavailable.
1116          * On ARMv8, this bit is always available.
1117          */
1118         if (arm_feature(env, ARM_FEATURE_V7) &&
1119             !arm_feature(env, ARM_FEATURE_V8)) {
1120             valid_mask &= ~SCR_SMD;
1121         }
1122     }
1123 
1124     /* Clear all-context RES0 bits.  */
1125     value &= valid_mask;
1126     raw_write(env, ri, value);
1127 }
1128 
1129 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1130 {
1131     ARMCPU *cpu = arm_env_get_cpu(env);
1132 
1133     /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1134      * bank
1135      */
1136     uint32_t index = A32_BANKED_REG_GET(env, csselr,
1137                                         ri->secure & ARM_CP_SECSTATE_S);
1138 
1139     return cpu->ccsidr[index];
1140 }
1141 
1142 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1143                          uint64_t value)
1144 {
1145     raw_write(env, ri, value & 0xf);
1146 }
1147 
1148 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1149 {
1150     CPUState *cs = ENV_GET_CPU(env);
1151     uint64_t ret = 0;
1152 
1153     if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1154         ret |= CPSR_I;
1155     }
1156     if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1157         ret |= CPSR_F;
1158     }
1159     /* External aborts are not possible in QEMU so A bit is always clear */
1160     return ret;
1161 }
1162 
1163 static const ARMCPRegInfo v7_cp_reginfo[] = {
1164     /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1165     { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1166       .access = PL1_W, .type = ARM_CP_NOP },
1167     /* Performance monitors are implementation defined in v7,
1168      * but with an ARM recommended set of registers, which we
1169      * follow (although we don't actually implement any counters)
1170      *
1171      * Performance registers fall into three categories:
1172      *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1173      *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1174      *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1175      * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1176      * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1177      */
1178     { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1179       .access = PL0_RW, .type = ARM_CP_ALIAS,
1180       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1181       .writefn = pmcntenset_write,
1182       .accessfn = pmreg_access,
1183       .raw_writefn = raw_write },
1184     { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1185       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1186       .access = PL0_RW, .accessfn = pmreg_access,
1187       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1188       .writefn = pmcntenset_write, .raw_writefn = raw_write },
1189     { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1190       .access = PL0_RW,
1191       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1192       .accessfn = pmreg_access,
1193       .writefn = pmcntenclr_write,
1194       .type = ARM_CP_ALIAS },
1195     { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1196       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1197       .access = PL0_RW, .accessfn = pmreg_access,
1198       .type = ARM_CP_ALIAS,
1199       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1200       .writefn = pmcntenclr_write },
1201     { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1202       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1203       .accessfn = pmreg_access,
1204       .writefn = pmovsr_write,
1205       .raw_writefn = raw_write },
1206     { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1207       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1208       .access = PL0_RW, .accessfn = pmreg_access,
1209       .type = ARM_CP_ALIAS,
1210       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1211       .writefn = pmovsr_write,
1212       .raw_writefn = raw_write },
1213     /* Unimplemented so WI. */
1214     { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1215       .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
1216 #ifndef CONFIG_USER_ONLY
1217     { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1218       .access = PL0_RW, .type = ARM_CP_ALIAS,
1219       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1220       .accessfn = pmreg_access, .writefn = pmselr_write,
1221       .raw_writefn = raw_write},
1222     { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1223       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1224       .access = PL0_RW, .accessfn = pmreg_access,
1225       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1226       .writefn = pmselr_write, .raw_writefn = raw_write, },
1227     { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1228       .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1229       .readfn = pmccntr_read, .writefn = pmccntr_write32,
1230       .accessfn = pmreg_access },
1231     { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1232       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1233       .access = PL0_RW, .accessfn = pmreg_access,
1234       .type = ARM_CP_IO,
1235       .readfn = pmccntr_read, .writefn = pmccntr_write, },
1236 #endif
1237     { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1238       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1239       .writefn = pmccfiltr_write,
1240       .access = PL0_RW, .accessfn = pmreg_access,
1241       .type = ARM_CP_IO,
1242       .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1243       .resetvalue = 0, },
1244     { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1245       .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1246       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1247     { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1248       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1249       .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1250       .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1251     /* Unimplemented, RAZ/WI. */
1252     { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1253       .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1254       .accessfn = pmreg_access },
1255     { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1256       .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1257       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1258       .resetvalue = 0,
1259       .writefn = pmuserenr_write, .raw_writefn = raw_write },
1260     { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1261       .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1262       .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1263       .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1264       .resetvalue = 0,
1265       .writefn = pmuserenr_write, .raw_writefn = raw_write },
1266     { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1267       .access = PL1_RW, .accessfn = access_tpm,
1268       .type = ARM_CP_ALIAS,
1269       .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1270       .resetvalue = 0,
1271       .writefn = pmintenset_write, .raw_writefn = raw_write },
1272     { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1273       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1274       .access = PL1_RW, .accessfn = access_tpm,
1275       .type = ARM_CP_IO,
1276       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1277       .writefn = pmintenset_write, .raw_writefn = raw_write,
1278       .resetvalue = 0x0 },
1279     { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1280       .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1281       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1282       .writefn = pmintenclr_write, },
1283     { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1284       .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1285       .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1286       .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1287       .writefn = pmintenclr_write },
1288     { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1289       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1290       .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1291     { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1292       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1293       .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1294       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1295                              offsetof(CPUARMState, cp15.csselr_ns) } },
1296     /* Auxiliary ID register: this actually has an IMPDEF value but for now
1297      * just RAZ for all cores:
1298      */
1299     { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1300       .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1301       .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1302     /* Auxiliary fault status registers: these also are IMPDEF, and we
1303      * choose to RAZ/WI for all cores.
1304      */
1305     { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1306       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1307       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1308     { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1309       .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1310       .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1311     /* MAIR can just read-as-written because we don't implement caches
1312      * and so don't need to care about memory attributes.
1313      */
1314     { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1315       .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1316       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1317       .resetvalue = 0 },
1318     { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1319       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1320       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1321       .resetvalue = 0 },
1322     /* For non-long-descriptor page tables these are PRRR and NMRR;
1323      * regardless they still act as reads-as-written for QEMU.
1324      */
1325      /* MAIR0/1 are defined separately from their 64-bit counterpart which
1326       * allows them to assign the correct fieldoffset based on the endianness
1327       * handled in the field definitions.
1328       */
1329     { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1330       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1331       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1332                              offsetof(CPUARMState, cp15.mair0_ns) },
1333       .resetfn = arm_cp_reset_ignore },
1334     { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1335       .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1336       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1337                              offsetof(CPUARMState, cp15.mair1_ns) },
1338       .resetfn = arm_cp_reset_ignore },
1339     { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1340       .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1341       .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1342     /* 32 bit ITLB invalidates */
1343     { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1344       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1345     { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1346       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1347     { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1348       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1349     /* 32 bit DTLB invalidates */
1350     { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1351       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1352     { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1353       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1354     { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1355       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1356     /* 32 bit TLB invalidates */
1357     { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1358       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1359     { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1360       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1361     { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1362       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1363     { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1364       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1365     REGINFO_SENTINEL
1366 };
1367 
1368 static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1369     /* 32 bit TLB invalidates, Inner Shareable */
1370     { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1371       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1372     { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1373       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1374     { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1375       .type = ARM_CP_NO_RAW, .access = PL1_W,
1376       .writefn = tlbiasid_is_write },
1377     { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1378       .type = ARM_CP_NO_RAW, .access = PL1_W,
1379       .writefn = tlbimvaa_is_write },
1380     REGINFO_SENTINEL
1381 };
1382 
1383 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1384                         uint64_t value)
1385 {
1386     value &= 1;
1387     env->teecr = value;
1388 }
1389 
1390 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1391                                     bool isread)
1392 {
1393     if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1394         return CP_ACCESS_TRAP;
1395     }
1396     return CP_ACCESS_OK;
1397 }
1398 
1399 static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1400     { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1401       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1402       .resetvalue = 0,
1403       .writefn = teecr_write },
1404     { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1405       .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1406       .accessfn = teehbr_access, .resetvalue = 0 },
1407     REGINFO_SENTINEL
1408 };
1409 
1410 static const ARMCPRegInfo v6k_cp_reginfo[] = {
1411     { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1412       .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1413       .access = PL0_RW,
1414       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1415     { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1416       .access = PL0_RW,
1417       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1418                              offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1419       .resetfn = arm_cp_reset_ignore },
1420     { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1421       .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1422       .access = PL0_R|PL1_W,
1423       .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1424       .resetvalue = 0},
1425     { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1426       .access = PL0_R|PL1_W,
1427       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1428                              offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1429       .resetfn = arm_cp_reset_ignore },
1430     { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1431       .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1432       .access = PL1_RW,
1433       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1434     { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1435       .access = PL1_RW,
1436       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1437                              offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1438       .resetvalue = 0 },
1439     REGINFO_SENTINEL
1440 };
1441 
1442 #ifndef CONFIG_USER_ONLY
1443 
1444 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1445                                        bool isread)
1446 {
1447     /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1448      * Writable only at the highest implemented exception level.
1449      */
1450     int el = arm_current_el(env);
1451 
1452     switch (el) {
1453     case 0:
1454         if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1455             return CP_ACCESS_TRAP;
1456         }
1457         break;
1458     case 1:
1459         if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1460             arm_is_secure_below_el3(env)) {
1461             /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1462             return CP_ACCESS_TRAP_UNCATEGORIZED;
1463         }
1464         break;
1465     case 2:
1466     case 3:
1467         break;
1468     }
1469 
1470     if (!isread && el < arm_highest_el(env)) {
1471         return CP_ACCESS_TRAP_UNCATEGORIZED;
1472     }
1473 
1474     return CP_ACCESS_OK;
1475 }
1476 
1477 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1478                                         bool isread)
1479 {
1480     unsigned int cur_el = arm_current_el(env);
1481     bool secure = arm_is_secure(env);
1482 
1483     /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1484     if (cur_el == 0 &&
1485         !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1486         return CP_ACCESS_TRAP;
1487     }
1488 
1489     if (arm_feature(env, ARM_FEATURE_EL2) &&
1490         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1491         !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1492         return CP_ACCESS_TRAP_EL2;
1493     }
1494     return CP_ACCESS_OK;
1495 }
1496 
1497 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1498                                       bool isread)
1499 {
1500     unsigned int cur_el = arm_current_el(env);
1501     bool secure = arm_is_secure(env);
1502 
1503     /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1504      * EL0[PV]TEN is zero.
1505      */
1506     if (cur_el == 0 &&
1507         !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1508         return CP_ACCESS_TRAP;
1509     }
1510 
1511     if (arm_feature(env, ARM_FEATURE_EL2) &&
1512         timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1513         !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1514         return CP_ACCESS_TRAP_EL2;
1515     }
1516     return CP_ACCESS_OK;
1517 }
1518 
1519 static CPAccessResult gt_pct_access(CPUARMState *env,
1520                                     const ARMCPRegInfo *ri,
1521                                     bool isread)
1522 {
1523     return gt_counter_access(env, GTIMER_PHYS, isread);
1524 }
1525 
1526 static CPAccessResult gt_vct_access(CPUARMState *env,
1527                                     const ARMCPRegInfo *ri,
1528                                     bool isread)
1529 {
1530     return gt_counter_access(env, GTIMER_VIRT, isread);
1531 }
1532 
1533 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1534                                        bool isread)
1535 {
1536     return gt_timer_access(env, GTIMER_PHYS, isread);
1537 }
1538 
1539 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1540                                        bool isread)
1541 {
1542     return gt_timer_access(env, GTIMER_VIRT, isread);
1543 }
1544 
1545 static CPAccessResult gt_stimer_access(CPUARMState *env,
1546                                        const ARMCPRegInfo *ri,
1547                                        bool isread)
1548 {
1549     /* The AArch64 register view of the secure physical timer is
1550      * always accessible from EL3, and configurably accessible from
1551      * Secure EL1.
1552      */
1553     switch (arm_current_el(env)) {
1554     case 1:
1555         if (!arm_is_secure(env)) {
1556             return CP_ACCESS_TRAP;
1557         }
1558         if (!(env->cp15.scr_el3 & SCR_ST)) {
1559             return CP_ACCESS_TRAP_EL3;
1560         }
1561         return CP_ACCESS_OK;
1562     case 0:
1563     case 2:
1564         return CP_ACCESS_TRAP;
1565     case 3:
1566         return CP_ACCESS_OK;
1567     default:
1568         g_assert_not_reached();
1569     }
1570 }
1571 
1572 static uint64_t gt_get_countervalue(CPUARMState *env)
1573 {
1574     return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1575 }
1576 
1577 static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1578 {
1579     ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1580 
1581     if (gt->ctl & 1) {
1582         /* Timer enabled: calculate and set current ISTATUS, irq, and
1583          * reset timer to when ISTATUS next has to change
1584          */
1585         uint64_t offset = timeridx == GTIMER_VIRT ?
1586                                       cpu->env.cp15.cntvoff_el2 : 0;
1587         uint64_t count = gt_get_countervalue(&cpu->env);
1588         /* Note that this must be unsigned 64 bit arithmetic: */
1589         int istatus = count - offset >= gt->cval;
1590         uint64_t nexttick;
1591         int irqstate;
1592 
1593         gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1594 
1595         irqstate = (istatus && !(gt->ctl & 2));
1596         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1597 
1598         if (istatus) {
1599             /* Next transition is when count rolls back over to zero */
1600             nexttick = UINT64_MAX;
1601         } else {
1602             /* Next transition is when we hit cval */
1603             nexttick = gt->cval + offset;
1604         }
1605         /* Note that the desired next expiry time might be beyond the
1606          * signed-64-bit range of a QEMUTimer -- in this case we just
1607          * set the timer for as far in the future as possible. When the
1608          * timer expires we will reset the timer for any remaining period.
1609          */
1610         if (nexttick > INT64_MAX / GTIMER_SCALE) {
1611             nexttick = INT64_MAX / GTIMER_SCALE;
1612         }
1613         timer_mod(cpu->gt_timer[timeridx], nexttick);
1614         trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1615     } else {
1616         /* Timer disabled: ISTATUS and timer output always clear */
1617         gt->ctl &= ~4;
1618         qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1619         timer_del(cpu->gt_timer[timeridx]);
1620         trace_arm_gt_recalc_disabled(timeridx);
1621     }
1622 }
1623 
1624 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1625                            int timeridx)
1626 {
1627     ARMCPU *cpu = arm_env_get_cpu(env);
1628 
1629     timer_del(cpu->gt_timer[timeridx]);
1630 }
1631 
1632 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1633 {
1634     return gt_get_countervalue(env);
1635 }
1636 
1637 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1638 {
1639     return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1640 }
1641 
1642 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643                           int timeridx,
1644                           uint64_t value)
1645 {
1646     trace_arm_gt_cval_write(timeridx, value);
1647     env->cp15.c14_timer[timeridx].cval = value;
1648     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1649 }
1650 
1651 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1652                              int timeridx)
1653 {
1654     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1655 
1656     return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1657                       (gt_get_countervalue(env) - offset));
1658 }
1659 
1660 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1661                           int timeridx,
1662                           uint64_t value)
1663 {
1664     uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1665 
1666     trace_arm_gt_tval_write(timeridx, value);
1667     env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1668                                          sextract64(value, 0, 32);
1669     gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1670 }
1671 
1672 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1673                          int timeridx,
1674                          uint64_t value)
1675 {
1676     ARMCPU *cpu = arm_env_get_cpu(env);
1677     uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1678 
1679     trace_arm_gt_ctl_write(timeridx, value);
1680     env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1681     if ((oldval ^ value) & 1) {
1682         /* Enable toggled */
1683         gt_recalc_timer(cpu, timeridx);
1684     } else if ((oldval ^ value) & 2) {
1685         /* IMASK toggled: don't need to recalculate,
1686          * just set the interrupt line based on ISTATUS
1687          */
1688         int irqstate = (oldval & 4) && !(value & 2);
1689 
1690         trace_arm_gt_imask_toggle(timeridx, irqstate);
1691         qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1692     }
1693 }
1694 
1695 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1696 {
1697     gt_timer_reset(env, ri, GTIMER_PHYS);
1698 }
1699 
1700 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1701                                uint64_t value)
1702 {
1703     gt_cval_write(env, ri, GTIMER_PHYS, value);
1704 }
1705 
1706 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1707 {
1708     return gt_tval_read(env, ri, GTIMER_PHYS);
1709 }
1710 
1711 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1712                                uint64_t value)
1713 {
1714     gt_tval_write(env, ri, GTIMER_PHYS, value);
1715 }
1716 
1717 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1718                               uint64_t value)
1719 {
1720     gt_ctl_write(env, ri, GTIMER_PHYS, value);
1721 }
1722 
1723 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1724 {
1725     gt_timer_reset(env, ri, GTIMER_VIRT);
1726 }
1727 
1728 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1729                                uint64_t value)
1730 {
1731     gt_cval_write(env, ri, GTIMER_VIRT, value);
1732 }
1733 
1734 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1735 {
1736     return gt_tval_read(env, ri, GTIMER_VIRT);
1737 }
1738 
1739 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1740                                uint64_t value)
1741 {
1742     gt_tval_write(env, ri, GTIMER_VIRT, value);
1743 }
1744 
1745 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1746                               uint64_t value)
1747 {
1748     gt_ctl_write(env, ri, GTIMER_VIRT, value);
1749 }
1750 
1751 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1752                               uint64_t value)
1753 {
1754     ARMCPU *cpu = arm_env_get_cpu(env);
1755 
1756     trace_arm_gt_cntvoff_write(value);
1757     raw_write(env, ri, value);
1758     gt_recalc_timer(cpu, GTIMER_VIRT);
1759 }
1760 
1761 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1762 {
1763     gt_timer_reset(env, ri, GTIMER_HYP);
1764 }
1765 
1766 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1767                               uint64_t value)
1768 {
1769     gt_cval_write(env, ri, GTIMER_HYP, value);
1770 }
1771 
1772 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1773 {
1774     return gt_tval_read(env, ri, GTIMER_HYP);
1775 }
1776 
1777 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1778                               uint64_t value)
1779 {
1780     gt_tval_write(env, ri, GTIMER_HYP, value);
1781 }
1782 
1783 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1784                               uint64_t value)
1785 {
1786     gt_ctl_write(env, ri, GTIMER_HYP, value);
1787 }
1788 
1789 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1790 {
1791     gt_timer_reset(env, ri, GTIMER_SEC);
1792 }
1793 
1794 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1795                               uint64_t value)
1796 {
1797     gt_cval_write(env, ri, GTIMER_SEC, value);
1798 }
1799 
1800 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1801 {
1802     return gt_tval_read(env, ri, GTIMER_SEC);
1803 }
1804 
1805 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1806                               uint64_t value)
1807 {
1808     gt_tval_write(env, ri, GTIMER_SEC, value);
1809 }
1810 
1811 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1812                               uint64_t value)
1813 {
1814     gt_ctl_write(env, ri, GTIMER_SEC, value);
1815 }
1816 
1817 void arm_gt_ptimer_cb(void *opaque)
1818 {
1819     ARMCPU *cpu = opaque;
1820 
1821     gt_recalc_timer(cpu, GTIMER_PHYS);
1822 }
1823 
1824 void arm_gt_vtimer_cb(void *opaque)
1825 {
1826     ARMCPU *cpu = opaque;
1827 
1828     gt_recalc_timer(cpu, GTIMER_VIRT);
1829 }
1830 
1831 void arm_gt_htimer_cb(void *opaque)
1832 {
1833     ARMCPU *cpu = opaque;
1834 
1835     gt_recalc_timer(cpu, GTIMER_HYP);
1836 }
1837 
1838 void arm_gt_stimer_cb(void *opaque)
1839 {
1840     ARMCPU *cpu = opaque;
1841 
1842     gt_recalc_timer(cpu, GTIMER_SEC);
1843 }
1844 
1845 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1846     /* Note that CNTFRQ is purely reads-as-written for the benefit
1847      * of software; writing it doesn't actually change the timer frequency.
1848      * Our reset value matches the fixed frequency we implement the timer at.
1849      */
1850     { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1851       .type = ARM_CP_ALIAS,
1852       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1853       .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1854     },
1855     { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1856       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1857       .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1858       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1859       .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1860     },
1861     /* overall control: mostly access permissions */
1862     { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1863       .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1864       .access = PL1_RW,
1865       .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1866       .resetvalue = 0,
1867     },
1868     /* per-timer control */
1869     { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1870       .secure = ARM_CP_SECSTATE_NS,
1871       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1872       .accessfn = gt_ptimer_access,
1873       .fieldoffset = offsetoflow32(CPUARMState,
1874                                    cp15.c14_timer[GTIMER_PHYS].ctl),
1875       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1876     },
1877     { .name = "CNTP_CTL(S)",
1878       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1879       .secure = ARM_CP_SECSTATE_S,
1880       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1881       .accessfn = gt_ptimer_access,
1882       .fieldoffset = offsetoflow32(CPUARMState,
1883                                    cp15.c14_timer[GTIMER_SEC].ctl),
1884       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1885     },
1886     { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1887       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1888       .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1889       .accessfn = gt_ptimer_access,
1890       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1891       .resetvalue = 0,
1892       .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1893     },
1894     { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1895       .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1896       .accessfn = gt_vtimer_access,
1897       .fieldoffset = offsetoflow32(CPUARMState,
1898                                    cp15.c14_timer[GTIMER_VIRT].ctl),
1899       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1900     },
1901     { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1902       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1903       .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1904       .accessfn = gt_vtimer_access,
1905       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1906       .resetvalue = 0,
1907       .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1908     },
1909     /* TimerValue views: a 32 bit downcounting view of the underlying state */
1910     { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1911       .secure = ARM_CP_SECSTATE_NS,
1912       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1913       .accessfn = gt_ptimer_access,
1914       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1915     },
1916     { .name = "CNTP_TVAL(S)",
1917       .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1918       .secure = ARM_CP_SECSTATE_S,
1919       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1920       .accessfn = gt_ptimer_access,
1921       .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
1922     },
1923     { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1924       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1925       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1926       .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
1927       .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1928     },
1929     { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1930       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1931       .accessfn = gt_vtimer_access,
1932       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1933     },
1934     { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1935       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1936       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1937       .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
1938       .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1939     },
1940     /* The counter itself */
1941     { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1942       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1943       .accessfn = gt_pct_access,
1944       .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1945     },
1946     { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1947       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1948       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1949       .accessfn = gt_pct_access, .readfn = gt_cnt_read,
1950     },
1951     { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1952       .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1953       .accessfn = gt_vct_access,
1954       .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
1955     },
1956     { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1957       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1958       .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1959       .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
1960     },
1961     /* Comparison value, indicating when the timer goes off */
1962     { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1963       .secure = ARM_CP_SECSTATE_NS,
1964       .access = PL1_RW | PL0_R,
1965       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1966       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1967       .accessfn = gt_ptimer_access,
1968       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1969     },
1970     { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
1971       .secure = ARM_CP_SECSTATE_S,
1972       .access = PL1_RW | PL0_R,
1973       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1974       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1975       .accessfn = gt_ptimer_access,
1976       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1977     },
1978     { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1979       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1980       .access = PL1_RW | PL0_R,
1981       .type = ARM_CP_IO,
1982       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1983       .resetvalue = 0, .accessfn = gt_ptimer_access,
1984       .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1985     },
1986     { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1987       .access = PL1_RW | PL0_R,
1988       .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1989       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1990       .accessfn = gt_vtimer_access,
1991       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1992     },
1993     { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1994       .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1995       .access = PL1_RW | PL0_R,
1996       .type = ARM_CP_IO,
1997       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1998       .resetvalue = 0, .accessfn = gt_vtimer_access,
1999       .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2000     },
2001     /* Secure timer -- this is actually restricted to only EL3
2002      * and configurably Secure-EL1 via the accessfn.
2003      */
2004     { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2005       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2006       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2007       .accessfn = gt_stimer_access,
2008       .readfn = gt_sec_tval_read,
2009       .writefn = gt_sec_tval_write,
2010       .resetfn = gt_sec_timer_reset,
2011     },
2012     { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2013       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2014       .type = ARM_CP_IO, .access = PL1_RW,
2015       .accessfn = gt_stimer_access,
2016       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2017       .resetvalue = 0,
2018       .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2019     },
2020     { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2021       .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2022       .type = ARM_CP_IO, .access = PL1_RW,
2023       .accessfn = gt_stimer_access,
2024       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2025       .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2026     },
2027     REGINFO_SENTINEL
2028 };
2029 
2030 #else
2031 /* In user-mode none of the generic timer registers are accessible,
2032  * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2033  * so instead just don't register any of them.
2034  */
2035 static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2036     REGINFO_SENTINEL
2037 };
2038 
2039 #endif
2040 
2041 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2042 {
2043     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2044         raw_write(env, ri, value);
2045     } else if (arm_feature(env, ARM_FEATURE_V7)) {
2046         raw_write(env, ri, value & 0xfffff6ff);
2047     } else {
2048         raw_write(env, ri, value & 0xfffff1ff);
2049     }
2050 }
2051 
2052 #ifndef CONFIG_USER_ONLY
2053 /* get_phys_addr() isn't present for user-mode-only targets */
2054 
2055 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2056                                  bool isread)
2057 {
2058     if (ri->opc2 & 4) {
2059         /* The ATS12NSO* operations must trap to EL3 if executed in
2060          * Secure EL1 (which can only happen if EL3 is AArch64).
2061          * They are simply UNDEF if executed from NS EL1.
2062          * They function normally from EL2 or EL3.
2063          */
2064         if (arm_current_el(env) == 1) {
2065             if (arm_is_secure_below_el3(env)) {
2066                 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2067             }
2068             return CP_ACCESS_TRAP_UNCATEGORIZED;
2069         }
2070     }
2071     return CP_ACCESS_OK;
2072 }
2073 
2074 static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2075                              int access_type, ARMMMUIdx mmu_idx)
2076 {
2077     hwaddr phys_addr;
2078     target_ulong page_size;
2079     int prot;
2080     uint32_t fsr;
2081     bool ret;
2082     uint64_t par64;
2083     MemTxAttrs attrs = {};
2084     ARMMMUFaultInfo fi = {};
2085 
2086     ret = get_phys_addr(env, value, access_type, mmu_idx,
2087                         &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
2088     if (extended_addresses_enabled(env)) {
2089         /* fsr is a DFSR/IFSR value for the long descriptor
2090          * translation table format, but with WnR always clear.
2091          * Convert it to a 64-bit PAR.
2092          */
2093         par64 = (1 << 11); /* LPAE bit always set */
2094         if (!ret) {
2095             par64 |= phys_addr & ~0xfffULL;
2096             if (!attrs.secure) {
2097                 par64 |= (1 << 9); /* NS */
2098             }
2099             /* We don't set the ATTR or SH fields in the PAR. */
2100         } else {
2101             par64 |= 1; /* F */
2102             par64 |= (fsr & 0x3f) << 1; /* FS */
2103             /* Note that S2WLK and FSTAGE are always zero, because we don't
2104              * implement virtualization and therefore there can't be a stage 2
2105              * fault.
2106              */
2107         }
2108     } else {
2109         /* fsr is a DFSR/IFSR value for the short descriptor
2110          * translation table format (with WnR always clear).
2111          * Convert it to a 32-bit PAR.
2112          */
2113         if (!ret) {
2114             /* We do not set any attribute bits in the PAR */
2115             if (page_size == (1 << 24)
2116                 && arm_feature(env, ARM_FEATURE_V7)) {
2117                 par64 = (phys_addr & 0xff000000) | (1 << 1);
2118             } else {
2119                 par64 = phys_addr & 0xfffff000;
2120             }
2121             if (!attrs.secure) {
2122                 par64 |= (1 << 9); /* NS */
2123             }
2124         } else {
2125             par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2126                     ((fsr & 0xf) << 1) | 1;
2127         }
2128     }
2129     return par64;
2130 }
2131 
2132 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2133 {
2134     int access_type = ri->opc2 & 1;
2135     uint64_t par64;
2136     ARMMMUIdx mmu_idx;
2137     int el = arm_current_el(env);
2138     bool secure = arm_is_secure_below_el3(env);
2139 
2140     switch (ri->opc2 & 6) {
2141     case 0:
2142         /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2143         switch (el) {
2144         case 3:
2145             mmu_idx = ARMMMUIdx_S1E3;
2146             break;
2147         case 2:
2148             mmu_idx = ARMMMUIdx_S1NSE1;
2149             break;
2150         case 1:
2151             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2152             break;
2153         default:
2154             g_assert_not_reached();
2155         }
2156         break;
2157     case 2:
2158         /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2159         switch (el) {
2160         case 3:
2161             mmu_idx = ARMMMUIdx_S1SE0;
2162             break;
2163         case 2:
2164             mmu_idx = ARMMMUIdx_S1NSE0;
2165             break;
2166         case 1:
2167             mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2168             break;
2169         default:
2170             g_assert_not_reached();
2171         }
2172         break;
2173     case 4:
2174         /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2175         mmu_idx = ARMMMUIdx_S12NSE1;
2176         break;
2177     case 6:
2178         /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2179         mmu_idx = ARMMMUIdx_S12NSE0;
2180         break;
2181     default:
2182         g_assert_not_reached();
2183     }
2184 
2185     par64 = do_ats_write(env, value, access_type, mmu_idx);
2186 
2187     A32_BANKED_CURRENT_REG_SET(env, par, par64);
2188 }
2189 
2190 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2191                         uint64_t value)
2192 {
2193     int access_type = ri->opc2 & 1;
2194     uint64_t par64;
2195 
2196     par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2197 
2198     A32_BANKED_CURRENT_REG_SET(env, par, par64);
2199 }
2200 
2201 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2202                                      bool isread)
2203 {
2204     if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2205         return CP_ACCESS_TRAP;
2206     }
2207     return CP_ACCESS_OK;
2208 }
2209 
2210 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2211                         uint64_t value)
2212 {
2213     int access_type = ri->opc2 & 1;
2214     ARMMMUIdx mmu_idx;
2215     int secure = arm_is_secure_below_el3(env);
2216 
2217     switch (ri->opc2 & 6) {
2218     case 0:
2219         switch (ri->opc1) {
2220         case 0: /* AT S1E1R, AT S1E1W */
2221             mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2222             break;
2223         case 4: /* AT S1E2R, AT S1E2W */
2224             mmu_idx = ARMMMUIdx_S1E2;
2225             break;
2226         case 6: /* AT S1E3R, AT S1E3W */
2227             mmu_idx = ARMMMUIdx_S1E3;
2228             break;
2229         default:
2230             g_assert_not_reached();
2231         }
2232         break;
2233     case 2: /* AT S1E0R, AT S1E0W */
2234         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2235         break;
2236     case 4: /* AT S12E1R, AT S12E1W */
2237         mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2238         break;
2239     case 6: /* AT S12E0R, AT S12E0W */
2240         mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2241         break;
2242     default:
2243         g_assert_not_reached();
2244     }
2245 
2246     env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2247 }
2248 #endif
2249 
2250 static const ARMCPRegInfo vapa_cp_reginfo[] = {
2251     { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2252       .access = PL1_RW, .resetvalue = 0,
2253       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2254                              offsetoflow32(CPUARMState, cp15.par_ns) },
2255       .writefn = par_write },
2256 #ifndef CONFIG_USER_ONLY
2257     /* This underdecoding is safe because the reginfo is NO_RAW. */
2258     { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2259       .access = PL1_W, .accessfn = ats_access,
2260       .writefn = ats_write, .type = ARM_CP_NO_RAW },
2261 #endif
2262     REGINFO_SENTINEL
2263 };
2264 
2265 /* Return basic MPU access permission bits.  */
2266 static uint32_t simple_mpu_ap_bits(uint32_t val)
2267 {
2268     uint32_t ret;
2269     uint32_t mask;
2270     int i;
2271     ret = 0;
2272     mask = 3;
2273     for (i = 0; i < 16; i += 2) {
2274         ret |= (val >> i) & mask;
2275         mask <<= 2;
2276     }
2277     return ret;
2278 }
2279 
2280 /* Pad basic MPU access permission bits to extended format.  */
2281 static uint32_t extended_mpu_ap_bits(uint32_t val)
2282 {
2283     uint32_t ret;
2284     uint32_t mask;
2285     int i;
2286     ret = 0;
2287     mask = 3;
2288     for (i = 0; i < 16; i += 2) {
2289         ret |= (val & mask) << i;
2290         mask <<= 2;
2291     }
2292     return ret;
2293 }
2294 
2295 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2296                                  uint64_t value)
2297 {
2298     env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2299 }
2300 
2301 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2302 {
2303     return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2304 }
2305 
2306 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2307                                  uint64_t value)
2308 {
2309     env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2310 }
2311 
2312 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2313 {
2314     return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2315 }
2316 
2317 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2318 {
2319     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2320 
2321     if (!u32p) {
2322         return 0;
2323     }
2324 
2325     u32p += env->cp15.c6_rgnr;
2326     return *u32p;
2327 }
2328 
2329 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2330                          uint64_t value)
2331 {
2332     ARMCPU *cpu = arm_env_get_cpu(env);
2333     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2334 
2335     if (!u32p) {
2336         return;
2337     }
2338 
2339     u32p += env->cp15.c6_rgnr;
2340     tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2341     *u32p = value;
2342 }
2343 
2344 static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2345 {
2346     ARMCPU *cpu = arm_env_get_cpu(env);
2347     uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2348 
2349     if (!u32p) {
2350         return;
2351     }
2352 
2353     memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
2354 }
2355 
2356 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2357                               uint64_t value)
2358 {
2359     ARMCPU *cpu = arm_env_get_cpu(env);
2360     uint32_t nrgs = cpu->pmsav7_dregion;
2361 
2362     if (value >= nrgs) {
2363         qemu_log_mask(LOG_GUEST_ERROR,
2364                       "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2365                       " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2366         return;
2367     }
2368 
2369     raw_write(env, ri, value);
2370 }
2371 
2372 static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2373     { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2374       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2375       .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2376       .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2377     { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2378       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2379       .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2380       .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2381     { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2382       .access = PL1_RW, .type = ARM_CP_NO_RAW,
2383       .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2384       .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2385     { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2386       .access = PL1_RW,
2387       .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
2388       .writefn = pmsav7_rgnr_write },
2389     REGINFO_SENTINEL
2390 };
2391 
2392 static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2393     { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2394       .access = PL1_RW, .type = ARM_CP_ALIAS,
2395       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2396       .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2397     { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2398       .access = PL1_RW, .type = ARM_CP_ALIAS,
2399       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2400       .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2401     { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2402       .access = PL1_RW,
2403       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2404       .resetvalue = 0, },
2405     { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2406       .access = PL1_RW,
2407       .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2408       .resetvalue = 0, },
2409     { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2410       .access = PL1_RW,
2411       .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2412     { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2413       .access = PL1_RW,
2414       .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2415     /* Protection region base and size registers */
2416     { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2417       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2418       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2419     { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2420       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2421       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2422     { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2423       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2424       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2425     { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2426       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2427       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2428     { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2429       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2430       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2431     { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2432       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2433       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2434     { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2435       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2436       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2437     { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2438       .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2439       .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2440     REGINFO_SENTINEL
2441 };
2442 
2443 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2444                                  uint64_t value)
2445 {
2446     TCR *tcr = raw_ptr(env, ri);
2447     int maskshift = extract32(value, 0, 3);
2448 
2449     if (!arm_feature(env, ARM_FEATURE_V8)) {
2450         if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2451             /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2452              * using Long-desciptor translation table format */
2453             value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2454         } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2455             /* In an implementation that includes the Security Extensions
2456              * TTBCR has additional fields PD0 [4] and PD1 [5] for
2457              * Short-descriptor translation table format.
2458              */
2459             value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2460         } else {
2461             value &= TTBCR_N;
2462         }
2463     }
2464 
2465     /* Update the masks corresponding to the TCR bank being written
2466      * Note that we always calculate mask and base_mask, but
2467      * they are only used for short-descriptor tables (ie if EAE is 0);
2468      * for long-descriptor tables the TCR fields are used differently
2469      * and the mask and base_mask values are meaningless.
2470      */
2471     tcr->raw_tcr = value;
2472     tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2473     tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2474 }
2475 
2476 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2477                              uint64_t value)
2478 {
2479     ARMCPU *cpu = arm_env_get_cpu(env);
2480 
2481     if (arm_feature(env, ARM_FEATURE_LPAE)) {
2482         /* With LPAE the TTBCR could result in a change of ASID
2483          * via the TTBCR.A1 bit, so do a TLB flush.
2484          */
2485         tlb_flush(CPU(cpu));
2486     }
2487     vmsa_ttbcr_raw_write(env, ri, value);
2488 }
2489 
2490 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2491 {
2492     TCR *tcr = raw_ptr(env, ri);
2493 
2494     /* Reset both the TCR as well as the masks corresponding to the bank of
2495      * the TCR being reset.
2496      */
2497     tcr->raw_tcr = 0;
2498     tcr->mask = 0;
2499     tcr->base_mask = 0xffffc000u;
2500 }
2501 
2502 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2503                                uint64_t value)
2504 {
2505     ARMCPU *cpu = arm_env_get_cpu(env);
2506     TCR *tcr = raw_ptr(env, ri);
2507 
2508     /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2509     tlb_flush(CPU(cpu));
2510     tcr->raw_tcr = value;
2511 }
2512 
2513 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2514                             uint64_t value)
2515 {
2516     /* 64 bit accesses to the TTBRs can change the ASID and so we
2517      * must flush the TLB.
2518      */
2519     if (cpreg_field_is_64bit(ri)) {
2520         ARMCPU *cpu = arm_env_get_cpu(env);
2521 
2522         tlb_flush(CPU(cpu));
2523     }
2524     raw_write(env, ri, value);
2525 }
2526 
2527 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2528                         uint64_t value)
2529 {
2530     ARMCPU *cpu = arm_env_get_cpu(env);
2531     CPUState *cs = CPU(cpu);
2532 
2533     /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
2534     if (raw_read(env, ri) != value) {
2535         tlb_flush_by_mmuidx(cs,
2536                             (1 << ARMMMUIdx_S12NSE1) |
2537                             (1 << ARMMMUIdx_S12NSE0) |
2538                             (1 << ARMMMUIdx_S2NS));
2539         raw_write(env, ri, value);
2540     }
2541 }
2542 
2543 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2544     { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2545       .access = PL1_RW, .type = ARM_CP_ALIAS,
2546       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2547                              offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2548     { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2549       .access = PL1_RW, .resetvalue = 0,
2550       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2551                              offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2552     { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2553       .access = PL1_RW, .resetvalue = 0,
2554       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2555                              offsetof(CPUARMState, cp15.dfar_ns) } },
2556     { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2557       .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2558       .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2559       .resetvalue = 0, },
2560     REGINFO_SENTINEL
2561 };
2562 
2563 static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2564     { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2565       .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2566       .access = PL1_RW,
2567       .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2568     { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2569       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2570       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2571       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2572                              offsetof(CPUARMState, cp15.ttbr0_ns) } },
2573     { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2574       .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2575       .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2576       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2577                              offsetof(CPUARMState, cp15.ttbr1_ns) } },
2578     { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2579       .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2580       .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2581       .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2582       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2583     { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2584       .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2585       .raw_writefn = vmsa_ttbcr_raw_write,
2586       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2587                              offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2588     REGINFO_SENTINEL
2589 };
2590 
2591 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2592                                 uint64_t value)
2593 {
2594     env->cp15.c15_ticonfig = value & 0xe7;
2595     /* The OS_TYPE bit in this register changes the reported CPUID! */
2596     env->cp15.c0_cpuid = (value & (1 << 5)) ?
2597         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2598 }
2599 
2600 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2601                                 uint64_t value)
2602 {
2603     env->cp15.c15_threadid = value & 0xffff;
2604 }
2605 
2606 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2607                            uint64_t value)
2608 {
2609     /* Wait-for-interrupt (deprecated) */
2610     cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2611 }
2612 
2613 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2614                                   uint64_t value)
2615 {
2616     /* On OMAP there are registers indicating the max/min index of dcache lines
2617      * containing a dirty line; cache flush operations have to reset these.
2618      */
2619     env->cp15.c15_i_max = 0x000;
2620     env->cp15.c15_i_min = 0xff0;
2621 }
2622 
2623 static const ARMCPRegInfo omap_cp_reginfo[] = {
2624     { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2625       .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2626       .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2627       .resetvalue = 0, },
2628     { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2629       .access = PL1_RW, .type = ARM_CP_NOP },
2630     { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2631       .access = PL1_RW,
2632       .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2633       .writefn = omap_ticonfig_write },
2634     { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2635       .access = PL1_RW,
2636       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2637     { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2638       .access = PL1_RW, .resetvalue = 0xff0,
2639       .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2640     { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2641       .access = PL1_RW,
2642       .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2643       .writefn = omap_threadid_write },
2644     { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2645       .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2646       .type = ARM_CP_NO_RAW,
2647       .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2648     /* TODO: Peripheral port remap register:
2649      * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2650      * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2651      * when MMU is off.
2652      */
2653     { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2654       .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2655       .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2656       .writefn = omap_cachemaint_write },
2657     { .name = "C9", .cp = 15, .crn = 9,
2658       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2659       .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2660     REGINFO_SENTINEL
2661 };
2662 
2663 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2664                               uint64_t value)
2665 {
2666     env->cp15.c15_cpar = value & 0x3fff;
2667 }
2668 
2669 static const ARMCPRegInfo xscale_cp_reginfo[] = {
2670     { .name = "XSCALE_CPAR",
2671       .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2672       .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2673       .writefn = xscale_cpar_write, },
2674     { .name = "XSCALE_AUXCR",
2675       .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2676       .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2677       .resetvalue = 0, },
2678     /* XScale specific cache-lockdown: since we have no cache we NOP these
2679      * and hope the guest does not really rely on cache behaviour.
2680      */
2681     { .name = "XSCALE_LOCK_ICACHE_LINE",
2682       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2683       .access = PL1_W, .type = ARM_CP_NOP },
2684     { .name = "XSCALE_UNLOCK_ICACHE",
2685       .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2686       .access = PL1_W, .type = ARM_CP_NOP },
2687     { .name = "XSCALE_DCACHE_LOCK",
2688       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2689       .access = PL1_RW, .type = ARM_CP_NOP },
2690     { .name = "XSCALE_UNLOCK_DCACHE",
2691       .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2692       .access = PL1_W, .type = ARM_CP_NOP },
2693     REGINFO_SENTINEL
2694 };
2695 
2696 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2697     /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2698      * implementation of this implementation-defined space.
2699      * Ideally this should eventually disappear in favour of actually
2700      * implementing the correct behaviour for all cores.
2701      */
2702     { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2703       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2704       .access = PL1_RW,
2705       .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2706       .resetvalue = 0 },
2707     REGINFO_SENTINEL
2708 };
2709 
2710 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2711     /* Cache status: RAZ because we have no cache so it's always clean */
2712     { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2713       .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2714       .resetvalue = 0 },
2715     REGINFO_SENTINEL
2716 };
2717 
2718 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2719     /* We never have a a block transfer operation in progress */
2720     { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2721       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2722       .resetvalue = 0 },
2723     /* The cache ops themselves: these all NOP for QEMU */
2724     { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2725       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2726     { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2727       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2728     { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2729       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2730     { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2731       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2732     { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2733       .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2734     { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2735       .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2736     REGINFO_SENTINEL
2737 };
2738 
2739 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2740     /* The cache test-and-clean instructions always return (1 << 30)
2741      * to indicate that there are no dirty cache lines.
2742      */
2743     { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2744       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2745       .resetvalue = (1 << 30) },
2746     { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2747       .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2748       .resetvalue = (1 << 30) },
2749     REGINFO_SENTINEL
2750 };
2751 
2752 static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2753     /* Ignore ReadBuffer accesses */
2754     { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2755       .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2756       .access = PL1_RW, .resetvalue = 0,
2757       .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2758     REGINFO_SENTINEL
2759 };
2760 
2761 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2762 {
2763     ARMCPU *cpu = arm_env_get_cpu(env);
2764     unsigned int cur_el = arm_current_el(env);
2765     bool secure = arm_is_secure(env);
2766 
2767     if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2768         return env->cp15.vpidr_el2;
2769     }
2770     return raw_read(env, ri);
2771 }
2772 
2773 static uint64_t mpidr_read_val(CPUARMState *env)
2774 {
2775     ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2776     uint64_t mpidr = cpu->mp_affinity;
2777 
2778     if (arm_feature(env, ARM_FEATURE_V7MP)) {
2779         mpidr |= (1U << 31);
2780         /* Cores which are uniprocessor (non-coherent)
2781          * but still implement the MP extensions set
2782          * bit 30. (For instance, Cortex-R5).
2783          */
2784         if (cpu->mp_is_up) {
2785             mpidr |= (1u << 30);
2786         }
2787     }
2788     return mpidr;
2789 }
2790 
2791 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2792 {
2793     unsigned int cur_el = arm_current_el(env);
2794     bool secure = arm_is_secure(env);
2795 
2796     if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2797         return env->cp15.vmpidr_el2;
2798     }
2799     return mpidr_read_val(env);
2800 }
2801 
2802 static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2803     { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2804       .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2805       .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2806     REGINFO_SENTINEL
2807 };
2808 
2809 static const ARMCPRegInfo lpae_cp_reginfo[] = {
2810     /* NOP AMAIR0/1 */
2811     { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2812       .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2813       .access = PL1_RW, .type = ARM_CP_CONST,
2814       .resetvalue = 0 },
2815     /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2816     { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2817       .access = PL1_RW, .type = ARM_CP_CONST,
2818       .resetvalue = 0 },
2819     { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2820       .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2821       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2822                              offsetof(CPUARMState, cp15.par_ns)} },
2823     { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2824       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2825       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2826                              offsetof(CPUARMState, cp15.ttbr0_ns) },
2827       .writefn = vmsa_ttbr_write, },
2828     { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2829       .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2830       .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2831                              offsetof(CPUARMState, cp15.ttbr1_ns) },
2832       .writefn = vmsa_ttbr_write, },
2833     REGINFO_SENTINEL
2834 };
2835 
2836 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2837 {
2838     return vfp_get_fpcr(env);
2839 }
2840 
2841 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2842                             uint64_t value)
2843 {
2844     vfp_set_fpcr(env, value);
2845 }
2846 
2847 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2848 {
2849     return vfp_get_fpsr(env);
2850 }
2851 
2852 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2853                             uint64_t value)
2854 {
2855     vfp_set_fpsr(env, value);
2856 }
2857 
2858 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2859                                        bool isread)
2860 {
2861     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2862         return CP_ACCESS_TRAP;
2863     }
2864     return CP_ACCESS_OK;
2865 }
2866 
2867 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2868                             uint64_t value)
2869 {
2870     env->daif = value & PSTATE_DAIF;
2871 }
2872 
2873 static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2874                                           const ARMCPRegInfo *ri,
2875                                           bool isread)
2876 {
2877     /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2878      * SCTLR_EL1.UCI is set.
2879      */
2880     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2881         return CP_ACCESS_TRAP;
2882     }
2883     return CP_ACCESS_OK;
2884 }
2885 
2886 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2887  * Page D4-1736 (DDI0487A.b)
2888  */
2889 
2890 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2891                                     uint64_t value)
2892 {
2893     CPUState *cs = ENV_GET_CPU(env);
2894 
2895     if (arm_is_secure_below_el3(env)) {
2896         tlb_flush_by_mmuidx(cs,
2897                             (1 << ARMMMUIdx_S1SE1) |
2898                             (1 << ARMMMUIdx_S1SE0));
2899     } else {
2900         tlb_flush_by_mmuidx(cs,
2901                             (1 << ARMMMUIdx_S12NSE1) |
2902                             (1 << ARMMMUIdx_S12NSE0));
2903     }
2904 }
2905 
2906 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2907                                       uint64_t value)
2908 {
2909     CPUState *cs = ENV_GET_CPU(env);
2910     bool sec = arm_is_secure_below_el3(env);
2911 
2912     if (sec) {
2913         tlb_flush_by_mmuidx_all_cpus_synced(cs,
2914                                             (1 << ARMMMUIdx_S1SE1) |
2915                                             (1 << ARMMMUIdx_S1SE0));
2916     } else {
2917         tlb_flush_by_mmuidx_all_cpus_synced(cs,
2918                                             (1 << ARMMMUIdx_S12NSE1) |
2919                                             (1 << ARMMMUIdx_S12NSE0));
2920     }
2921 }
2922 
2923 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2924                                   uint64_t value)
2925 {
2926     /* Note that the 'ALL' scope must invalidate both stage 1 and
2927      * stage 2 translations, whereas most other scopes only invalidate
2928      * stage 1 translations.
2929      */
2930     ARMCPU *cpu = arm_env_get_cpu(env);
2931     CPUState *cs = CPU(cpu);
2932 
2933     if (arm_is_secure_below_el3(env)) {
2934         tlb_flush_by_mmuidx(cs,
2935                             (1 << ARMMMUIdx_S1SE1) |
2936                             (1 << ARMMMUIdx_S1SE0));
2937     } else {
2938         if (arm_feature(env, ARM_FEATURE_EL2)) {
2939             tlb_flush_by_mmuidx(cs,
2940                                 (1 << ARMMMUIdx_S12NSE1) |
2941                                 (1 << ARMMMUIdx_S12NSE0) |
2942                                 (1 << ARMMMUIdx_S2NS));
2943         } else {
2944             tlb_flush_by_mmuidx(cs,
2945                                 (1 << ARMMMUIdx_S12NSE1) |
2946                                 (1 << ARMMMUIdx_S12NSE0));
2947         }
2948     }
2949 }
2950 
2951 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2952                                   uint64_t value)
2953 {
2954     ARMCPU *cpu = arm_env_get_cpu(env);
2955     CPUState *cs = CPU(cpu);
2956 
2957     tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2));
2958 }
2959 
2960 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2961                                   uint64_t value)
2962 {
2963     ARMCPU *cpu = arm_env_get_cpu(env);
2964     CPUState *cs = CPU(cpu);
2965 
2966     tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E3));
2967 }
2968 
2969 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2970                                     uint64_t value)
2971 {
2972     /* Note that the 'ALL' scope must invalidate both stage 1 and
2973      * stage 2 translations, whereas most other scopes only invalidate
2974      * stage 1 translations.
2975      */
2976     CPUState *cs = ENV_GET_CPU(env);
2977     bool sec = arm_is_secure_below_el3(env);
2978     bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
2979 
2980     if (sec) {
2981         tlb_flush_by_mmuidx_all_cpus_synced(cs,
2982                                             (1 << ARMMMUIdx_S1SE1) |
2983                                             (1 << ARMMMUIdx_S1SE0));
2984     } else if (has_el2) {
2985         tlb_flush_by_mmuidx_all_cpus_synced(cs,
2986                                             (1 << ARMMMUIdx_S12NSE1) |
2987                                             (1 << ARMMMUIdx_S12NSE0) |
2988                                             (1 << ARMMMUIdx_S2NS));
2989     } else {
2990           tlb_flush_by_mmuidx_all_cpus_synced(cs,
2991                                               (1 << ARMMMUIdx_S12NSE1) |
2992                                               (1 << ARMMMUIdx_S12NSE0));
2993     }
2994 }
2995 
2996 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2997                                     uint64_t value)
2998 {
2999     CPUState *cs = ENV_GET_CPU(env);
3000 
3001     tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2));
3002 }
3003 
3004 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3005                                     uint64_t value)
3006 {
3007     CPUState *cs = ENV_GET_CPU(env);
3008 
3009     tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E3));
3010 }
3011 
3012 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3013                                  uint64_t value)
3014 {
3015     /* Invalidate by VA, EL1&0 (AArch64 version).
3016      * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3017      * since we don't support flush-for-specific-ASID-only or
3018      * flush-last-level-only.
3019      */
3020     ARMCPU *cpu = arm_env_get_cpu(env);
3021     CPUState *cs = CPU(cpu);
3022     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3023 
3024     if (arm_is_secure_below_el3(env)) {
3025         tlb_flush_page_by_mmuidx(cs, pageaddr,
3026                                  (1 << ARMMMUIdx_S1SE1) |
3027                                  (1 << ARMMMUIdx_S1SE0));
3028     } else {
3029         tlb_flush_page_by_mmuidx(cs, pageaddr,
3030                                  (1 << ARMMMUIdx_S12NSE1) |
3031                                  (1 << ARMMMUIdx_S12NSE0));
3032     }
3033 }
3034 
3035 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3036                                  uint64_t value)
3037 {
3038     /* Invalidate by VA, EL2
3039      * Currently handles both VAE2 and VALE2, since we don't support
3040      * flush-last-level-only.
3041      */
3042     ARMCPU *cpu = arm_env_get_cpu(env);
3043     CPUState *cs = CPU(cpu);
3044     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3045 
3046     tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2));
3047 }
3048 
3049 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3050                                  uint64_t value)
3051 {
3052     /* Invalidate by VA, EL3
3053      * Currently handles both VAE3 and VALE3, since we don't support
3054      * flush-last-level-only.
3055      */
3056     ARMCPU *cpu = arm_env_get_cpu(env);
3057     CPUState *cs = CPU(cpu);
3058     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3059 
3060     tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E3));
3061 }
3062 
3063 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3064                                    uint64_t value)
3065 {
3066     ARMCPU *cpu = arm_env_get_cpu(env);
3067     CPUState *cs = CPU(cpu);
3068     bool sec = arm_is_secure_below_el3(env);
3069     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3070 
3071     if (sec) {
3072         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3073                                                  (1 << ARMMMUIdx_S1SE1) |
3074                                                  (1 << ARMMMUIdx_S1SE0));
3075     } else {
3076         tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3077                                                  (1 << ARMMMUIdx_S12NSE1) |
3078                                                  (1 << ARMMMUIdx_S12NSE0));
3079     }
3080 }
3081 
3082 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3083                                    uint64_t value)
3084 {
3085     CPUState *cs = ENV_GET_CPU(env);
3086     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3087 
3088     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3089                                              (1 << ARMMMUIdx_S1E2));
3090 }
3091 
3092 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3093                                    uint64_t value)
3094 {
3095     CPUState *cs = ENV_GET_CPU(env);
3096     uint64_t pageaddr = sextract64(value << 12, 0, 56);
3097 
3098     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3099                                              (1 << ARMMMUIdx_S1E3));
3100 }
3101 
3102 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3103                                     uint64_t value)
3104 {
3105     /* Invalidate by IPA. This has to invalidate any structures that
3106      * contain only stage 2 translation information, but does not need
3107      * to apply to structures that contain combined stage 1 and stage 2
3108      * translation information.
3109      * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3110      */
3111     ARMCPU *cpu = arm_env_get_cpu(env);
3112     CPUState *cs = CPU(cpu);
3113     uint64_t pageaddr;
3114 
3115     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3116         return;
3117     }
3118 
3119     pageaddr = sextract64(value << 12, 0, 48);
3120 
3121     tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS));
3122 }
3123 
3124 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3125                                       uint64_t value)
3126 {
3127     CPUState *cs = ENV_GET_CPU(env);
3128     uint64_t pageaddr;
3129 
3130     if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3131         return;
3132     }
3133 
3134     pageaddr = sextract64(value << 12, 0, 48);
3135 
3136     tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3137                                              (1 << ARMMMUIdx_S2NS));
3138 }
3139 
3140 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3141                                       bool isread)
3142 {
3143     /* We don't implement EL2, so the only control on DC ZVA is the
3144      * bit in the SCTLR which can prohibit access for EL0.
3145      */
3146     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3147         return CP_ACCESS_TRAP;
3148     }
3149     return CP_ACCESS_OK;
3150 }
3151 
3152 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3153 {
3154     ARMCPU *cpu = arm_env_get_cpu(env);
3155     int dzp_bit = 1 << 4;
3156 
3157     /* DZP indicates whether DC ZVA access is allowed */
3158     if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3159         dzp_bit = 0;
3160     }
3161     return cpu->dcz_blocksize | dzp_bit;
3162 }
3163 
3164 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3165                                     bool isread)
3166 {
3167     if (!(env->pstate & PSTATE_SP)) {
3168         /* Access to SP_EL0 is undefined if it's being used as
3169          * the stack pointer.
3170          */
3171         return CP_ACCESS_TRAP_UNCATEGORIZED;
3172     }
3173     return CP_ACCESS_OK;
3174 }
3175 
3176 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3177 {
3178     return env->pstate & PSTATE_SP;
3179 }
3180 
3181 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3182 {
3183     update_spsel(env, val);
3184 }
3185 
3186 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3187                         uint64_t value)
3188 {
3189     ARMCPU *cpu = arm_env_get_cpu(env);
3190 
3191     if (raw_read(env, ri) == value) {
3192         /* Skip the TLB flush if nothing actually changed; Linux likes
3193          * to do a lot of pointless SCTLR writes.
3194          */
3195         return;
3196     }
3197 
3198     raw_write(env, ri, value);
3199     /* ??? Lots of these bits are not implemented.  */
3200     /* This may enable/disable the MMU, so do a TLB flush.  */
3201     tlb_flush(CPU(cpu));
3202 }
3203 
3204 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3205                                      bool isread)
3206 {
3207     if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3208         return CP_ACCESS_TRAP_FP_EL2;
3209     }
3210     if (env->cp15.cptr_el[3] & CPTR_TFP) {
3211         return CP_ACCESS_TRAP_FP_EL3;
3212     }
3213     return CP_ACCESS_OK;
3214 }
3215 
3216 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3217                        uint64_t value)
3218 {
3219     env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3220 }
3221 
3222 static const ARMCPRegInfo v8_cp_reginfo[] = {
3223     /* Minimal set of EL0-visible registers. This will need to be expanded
3224      * significantly for system emulation of AArch64 CPUs.
3225      */
3226     { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3227       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3228       .access = PL0_RW, .type = ARM_CP_NZCV },
3229     { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3230       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3231       .type = ARM_CP_NO_RAW,
3232       .access = PL0_RW, .accessfn = aa64_daif_access,
3233       .fieldoffset = offsetof(CPUARMState, daif),
3234       .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3235     { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3236       .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3237       .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3238     { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3239       .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3240       .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3241     { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3242       .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3243       .access = PL0_R, .type = ARM_CP_NO_RAW,
3244       .readfn = aa64_dczid_read },
3245     { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3246       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3247       .access = PL0_W, .type = ARM_CP_DC_ZVA,
3248 #ifndef CONFIG_USER_ONLY
3249       /* Avoid overhead of an access check that always passes in user-mode */
3250       .accessfn = aa64_zva_access,
3251 #endif
3252     },
3253     { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3254       .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3255       .access = PL1_R, .type = ARM_CP_CURRENTEL },
3256     /* Cache ops: all NOPs since we don't emulate caches */
3257     { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3258       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3259       .access = PL1_W, .type = ARM_CP_NOP },
3260     { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3261       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3262       .access = PL1_W, .type = ARM_CP_NOP },
3263     { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3264       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3265       .access = PL0_W, .type = ARM_CP_NOP,
3266       .accessfn = aa64_cacheop_access },
3267     { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3268       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3269       .access = PL1_W, .type = ARM_CP_NOP },
3270     { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3271       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3272       .access = PL1_W, .type = ARM_CP_NOP },
3273     { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3274       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3275       .access = PL0_W, .type = ARM_CP_NOP,
3276       .accessfn = aa64_cacheop_access },
3277     { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3278       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3279       .access = PL1_W, .type = ARM_CP_NOP },
3280     { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3281       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3282       .access = PL0_W, .type = ARM_CP_NOP,
3283       .accessfn = aa64_cacheop_access },
3284     { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3285       .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3286       .access = PL0_W, .type = ARM_CP_NOP,
3287       .accessfn = aa64_cacheop_access },
3288     { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3289       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3290       .access = PL1_W, .type = ARM_CP_NOP },
3291     /* TLBI operations */
3292     { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3293       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3294       .access = PL1_W, .type = ARM_CP_NO_RAW,
3295       .writefn = tlbi_aa64_vmalle1is_write },
3296     { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3297       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3298       .access = PL1_W, .type = ARM_CP_NO_RAW,
3299       .writefn = tlbi_aa64_vae1is_write },
3300     { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3301       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3302       .access = PL1_W, .type = ARM_CP_NO_RAW,
3303       .writefn = tlbi_aa64_vmalle1is_write },
3304     { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3305       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3306       .access = PL1_W, .type = ARM_CP_NO_RAW,
3307       .writefn = tlbi_aa64_vae1is_write },
3308     { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3309       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3310       .access = PL1_W, .type = ARM_CP_NO_RAW,
3311       .writefn = tlbi_aa64_vae1is_write },
3312     { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3313       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3314       .access = PL1_W, .type = ARM_CP_NO_RAW,
3315       .writefn = tlbi_aa64_vae1is_write },
3316     { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3317       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3318       .access = PL1_W, .type = ARM_CP_NO_RAW,
3319       .writefn = tlbi_aa64_vmalle1_write },
3320     { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3321       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3322       .access = PL1_W, .type = ARM_CP_NO_RAW,
3323       .writefn = tlbi_aa64_vae1_write },
3324     { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3325       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3326       .access = PL1_W, .type = ARM_CP_NO_RAW,
3327       .writefn = tlbi_aa64_vmalle1_write },
3328     { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3329       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3330       .access = PL1_W, .type = ARM_CP_NO_RAW,
3331       .writefn = tlbi_aa64_vae1_write },
3332     { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3333       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3334       .access = PL1_W, .type = ARM_CP_NO_RAW,
3335       .writefn = tlbi_aa64_vae1_write },
3336     { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3337       .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3338       .access = PL1_W, .type = ARM_CP_NO_RAW,
3339       .writefn = tlbi_aa64_vae1_write },
3340     { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3341       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3342       .access = PL2_W, .type = ARM_CP_NO_RAW,
3343       .writefn = tlbi_aa64_ipas2e1is_write },
3344     { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3345       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3346       .access = PL2_W, .type = ARM_CP_NO_RAW,
3347       .writefn = tlbi_aa64_ipas2e1is_write },
3348     { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3349       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3350       .access = PL2_W, .type = ARM_CP_NO_RAW,
3351       .writefn = tlbi_aa64_alle1is_write },
3352     { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3353       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3354       .access = PL2_W, .type = ARM_CP_NO_RAW,
3355       .writefn = tlbi_aa64_alle1is_write },
3356     { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3357       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3358       .access = PL2_W, .type = ARM_CP_NO_RAW,
3359       .writefn = tlbi_aa64_ipas2e1_write },
3360     { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3361       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3362       .access = PL2_W, .type = ARM_CP_NO_RAW,
3363       .writefn = tlbi_aa64_ipas2e1_write },
3364     { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3365       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3366       .access = PL2_W, .type = ARM_CP_NO_RAW,
3367       .writefn = tlbi_aa64_alle1_write },
3368     { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3369       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3370       .access = PL2_W, .type = ARM_CP_NO_RAW,
3371       .writefn = tlbi_aa64_alle1is_write },
3372 #ifndef CONFIG_USER_ONLY
3373     /* 64 bit address translation operations */
3374     { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3375       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3376       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3377     { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3378       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3379       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3380     { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3381       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3382       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3383     { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3384       .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3385       .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3386     { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3387       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3388       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3389     { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3390       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3391       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3392     { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3393       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3394       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3395     { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3396       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3397       .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3398     /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3399     { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3400       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3401       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3402     { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3403       .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3404       .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3405     { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3406       .type = ARM_CP_ALIAS,
3407       .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3408       .access = PL1_RW, .resetvalue = 0,
3409       .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3410       .writefn = par_write },
3411 #endif
3412     /* TLB invalidate last level of translation table walk */
3413     { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3414       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3415     { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3416       .type = ARM_CP_NO_RAW, .access = PL1_W,
3417       .writefn = tlbimvaa_is_write },
3418     { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3419       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3420     { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3421       .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3422     { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3423       .type = ARM_CP_NO_RAW, .access = PL2_W,
3424       .writefn = tlbimva_hyp_write },
3425     { .name = "TLBIMVALHIS",
3426       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3427       .type = ARM_CP_NO_RAW, .access = PL2_W,
3428       .writefn = tlbimva_hyp_is_write },
3429     { .name = "TLBIIPAS2",
3430       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3431       .type = ARM_CP_NO_RAW, .access = PL2_W,
3432       .writefn = tlbiipas2_write },
3433     { .name = "TLBIIPAS2IS",
3434       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3435       .type = ARM_CP_NO_RAW, .access = PL2_W,
3436       .writefn = tlbiipas2_is_write },
3437     { .name = "TLBIIPAS2L",
3438       .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3439       .type = ARM_CP_NO_RAW, .access = PL2_W,
3440       .writefn = tlbiipas2_write },
3441     { .name = "TLBIIPAS2LIS",
3442       .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3443       .type = ARM_CP_NO_RAW, .access = PL2_W,
3444       .writefn = tlbiipas2_is_write },
3445     /* 32 bit cache operations */
3446     { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3447       .type = ARM_CP_NOP, .access = PL1_W },
3448     { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3449       .type = ARM_CP_NOP, .access = PL1_W },
3450     { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3451       .type = ARM_CP_NOP, .access = PL1_W },
3452     { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3453       .type = ARM_CP_NOP, .access = PL1_W },
3454     { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3455       .type = ARM_CP_NOP, .access = PL1_W },
3456     { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3457       .type = ARM_CP_NOP, .access = PL1_W },
3458     { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3459       .type = ARM_CP_NOP, .access = PL1_W },
3460     { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3461       .type = ARM_CP_NOP, .access = PL1_W },
3462     { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3463       .type = ARM_CP_NOP, .access = PL1_W },
3464     { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3465       .type = ARM_CP_NOP, .access = PL1_W },
3466     { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3467       .type = ARM_CP_NOP, .access = PL1_W },
3468     { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3469       .type = ARM_CP_NOP, .access = PL1_W },
3470     { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3471       .type = ARM_CP_NOP, .access = PL1_W },
3472     /* MMU Domain access control / MPU write buffer control */
3473     { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3474       .access = PL1_RW, .resetvalue = 0,
3475       .writefn = dacr_write, .raw_writefn = raw_write,
3476       .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3477                              offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3478     { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3479       .type = ARM_CP_ALIAS,
3480       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3481       .access = PL1_RW,
3482       .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3483     { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3484       .type = ARM_CP_ALIAS,
3485       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3486       .access = PL1_RW,
3487       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3488     /* We rely on the access checks not allowing the guest to write to the
3489      * state field when SPSel indicates that it's being used as the stack
3490      * pointer.
3491      */
3492     { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3493       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3494       .access = PL1_RW, .accessfn = sp_el0_access,
3495       .type = ARM_CP_ALIAS,
3496       .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3497     { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3498       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3499       .access = PL2_RW, .type = ARM_CP_ALIAS,
3500       .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3501     { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3502       .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3503       .type = ARM_CP_NO_RAW,
3504       .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3505     { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3506       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3507       .type = ARM_CP_ALIAS,
3508       .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3509       .access = PL2_RW, .accessfn = fpexc32_access },
3510     { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3511       .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3512       .access = PL2_RW, .resetvalue = 0,
3513       .writefn = dacr_write, .raw_writefn = raw_write,
3514       .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3515     { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3516       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3517       .access = PL2_RW, .resetvalue = 0,
3518       .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3519     { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3520       .type = ARM_CP_ALIAS,
3521       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3522       .access = PL2_RW,
3523       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3524     { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3525       .type = ARM_CP_ALIAS,
3526       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3527       .access = PL2_RW,
3528       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3529     { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3530       .type = ARM_CP_ALIAS,
3531       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3532       .access = PL2_RW,
3533       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3534     { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3535       .type = ARM_CP_ALIAS,
3536       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3537       .access = PL2_RW,
3538       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3539     { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3540       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3541       .resetvalue = 0,
3542       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3543     { .name = "SDCR", .type = ARM_CP_ALIAS,
3544       .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3545       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3546       .writefn = sdcr_write,
3547       .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3548     REGINFO_SENTINEL
3549 };
3550 
3551 /* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
3552 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3553     { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3554       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3555       .access = PL2_RW,
3556       .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3557     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3558       .type = ARM_CP_NO_RAW,
3559       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3560       .access = PL2_RW,
3561       .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3562     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3563       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3564       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3565     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3566       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3567       .access = PL2_RW, .type = ARM_CP_CONST,
3568       .resetvalue = 0 },
3569     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3570       .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3571       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3572     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3573       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3574       .access = PL2_RW, .type = ARM_CP_CONST,
3575       .resetvalue = 0 },
3576     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3577       .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3578       .access = PL2_RW, .type = ARM_CP_CONST,
3579       .resetvalue = 0 },
3580     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3581       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3582       .access = PL2_RW, .type = ARM_CP_CONST,
3583       .resetvalue = 0 },
3584     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3585       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3586       .access = PL2_RW, .type = ARM_CP_CONST,
3587       .resetvalue = 0 },
3588     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3589       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3590       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3591     { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3592       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3593       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3594       .type = ARM_CP_CONST, .resetvalue = 0 },
3595     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3596       .cp = 15, .opc1 = 6, .crm = 2,
3597       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3598       .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3599     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3600       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3601       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3602     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3603       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3604       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3605     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3606       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3607       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3608     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3609       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3610       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3611     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3612       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3613       .resetvalue = 0 },
3614     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3615       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3616       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3617     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3618       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3619       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3620     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3621       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3622       .resetvalue = 0 },
3623     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3624       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3625       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3626     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3627       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3628       .resetvalue = 0 },
3629     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3630       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3631       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3632     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3633       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3634       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3635     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3636       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3637       .access = PL2_RW, .accessfn = access_tda,
3638       .type = ARM_CP_CONST, .resetvalue = 0 },
3639     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3640       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3641       .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3642       .type = ARM_CP_CONST, .resetvalue = 0 },
3643     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3644       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3645       .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3646     REGINFO_SENTINEL
3647 };
3648 
3649 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3650 {
3651     ARMCPU *cpu = arm_env_get_cpu(env);
3652     uint64_t valid_mask = HCR_MASK;
3653 
3654     if (arm_feature(env, ARM_FEATURE_EL3)) {
3655         valid_mask &= ~HCR_HCD;
3656     } else {
3657         valid_mask &= ~HCR_TSC;
3658     }
3659 
3660     /* Clear RES0 bits.  */
3661     value &= valid_mask;
3662 
3663     /* These bits change the MMU setup:
3664      * HCR_VM enables stage 2 translation
3665      * HCR_PTW forbids certain page-table setups
3666      * HCR_DC Disables stage1 and enables stage2 translation
3667      */
3668     if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3669         tlb_flush(CPU(cpu));
3670     }
3671     raw_write(env, ri, value);
3672 }
3673 
3674 static const ARMCPRegInfo el2_cp_reginfo[] = {
3675     { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3676       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3677       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3678       .writefn = hcr_write },
3679     { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
3680       .type = ARM_CP_ALIAS,
3681       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
3682       .access = PL2_RW,
3683       .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
3684     { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
3685       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3686       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
3687     { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
3688       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3689       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
3690     { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
3691       .type = ARM_CP_ALIAS,
3692       .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
3693       .access = PL2_RW,
3694       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
3695     { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3696       .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3697       .access = PL2_RW, .writefn = vbar_write,
3698       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
3699       .resetvalue = 0 },
3700     { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
3701       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
3702       .access = PL3_RW, .type = ARM_CP_ALIAS,
3703       .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
3704     { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3705       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3706       .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
3707       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
3708     { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3709       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3710       .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
3711       .resetvalue = 0 },
3712     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3713       .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3714       .access = PL2_RW, .type = ARM_CP_ALIAS,
3715       .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
3716     { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3717       .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3718       .access = PL2_RW, .type = ARM_CP_CONST,
3719       .resetvalue = 0 },
3720     /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3721     { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3722       .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3723       .access = PL2_RW, .type = ARM_CP_CONST,
3724       .resetvalue = 0 },
3725     { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3726       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3727       .access = PL2_RW, .type = ARM_CP_CONST,
3728       .resetvalue = 0 },
3729     { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3730       .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3731       .access = PL2_RW, .type = ARM_CP_CONST,
3732       .resetvalue = 0 },
3733     { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3734       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3735       .access = PL2_RW,
3736       /* no .writefn needed as this can't cause an ASID change;
3737        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3738        */
3739       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
3740     { .name = "VTCR", .state = ARM_CP_STATE_AA32,
3741       .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3742       .type = ARM_CP_ALIAS,
3743       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3744       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3745     { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
3746       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3747       .access = PL2_RW,
3748       /* no .writefn needed as this can't cause an ASID change;
3749        * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3750        */
3751       .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3752     { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3753       .cp = 15, .opc1 = 6, .crm = 2,
3754       .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3755       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3756       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
3757       .writefn = vttbr_write },
3758     { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3759       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3760       .access = PL2_RW, .writefn = vttbr_write,
3761       .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
3762     { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3763       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3764       .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
3765       .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
3766     { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3767       .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3768       .access = PL2_RW, .resetvalue = 0,
3769       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
3770     { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3771       .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3772       .access = PL2_RW, .resetvalue = 0,
3773       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3774     { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3775       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3776       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3777     { .name = "TLBIALLNSNH",
3778       .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3779       .type = ARM_CP_NO_RAW, .access = PL2_W,
3780       .writefn = tlbiall_nsnh_write },
3781     { .name = "TLBIALLNSNHIS",
3782       .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3783       .type = ARM_CP_NO_RAW, .access = PL2_W,
3784       .writefn = tlbiall_nsnh_is_write },
3785     { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3786       .type = ARM_CP_NO_RAW, .access = PL2_W,
3787       .writefn = tlbiall_hyp_write },
3788     { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3789       .type = ARM_CP_NO_RAW, .access = PL2_W,
3790       .writefn = tlbiall_hyp_is_write },
3791     { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3792       .type = ARM_CP_NO_RAW, .access = PL2_W,
3793       .writefn = tlbimva_hyp_write },
3794     { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3795       .type = ARM_CP_NO_RAW, .access = PL2_W,
3796       .writefn = tlbimva_hyp_is_write },
3797     { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
3798       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3799       .type = ARM_CP_NO_RAW, .access = PL2_W,
3800       .writefn = tlbi_aa64_alle2_write },
3801     { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
3802       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3803       .type = ARM_CP_NO_RAW, .access = PL2_W,
3804       .writefn = tlbi_aa64_vae2_write },
3805     { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
3806       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3807       .access = PL2_W, .type = ARM_CP_NO_RAW,
3808       .writefn = tlbi_aa64_vae2_write },
3809     { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
3810       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3811       .access = PL2_W, .type = ARM_CP_NO_RAW,
3812       .writefn = tlbi_aa64_alle2is_write },
3813     { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
3814       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3815       .type = ARM_CP_NO_RAW, .access = PL2_W,
3816       .writefn = tlbi_aa64_vae2is_write },
3817     { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
3818       .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3819       .access = PL2_W, .type = ARM_CP_NO_RAW,
3820       .writefn = tlbi_aa64_vae2is_write },
3821 #ifndef CONFIG_USER_ONLY
3822     /* Unlike the other EL2-related AT operations, these must
3823      * UNDEF from EL3 if EL2 is not implemented, which is why we
3824      * define them here rather than with the rest of the AT ops.
3825      */
3826     { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
3827       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3828       .access = PL2_W, .accessfn = at_s1e2_access,
3829       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3830     { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
3831       .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3832       .access = PL2_W, .accessfn = at_s1e2_access,
3833       .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3834     /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3835      * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3836      * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3837      * to behave as if SCR.NS was 1.
3838      */
3839     { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3840       .access = PL2_W,
3841       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3842     { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3843       .access = PL2_W,
3844       .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3845     { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3846       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3847       /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3848        * reset values as IMPDEF. We choose to reset to 3 to comply with
3849        * both ARMv7 and ARMv8.
3850        */
3851       .access = PL2_RW, .resetvalue = 3,
3852       .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
3853     { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3854       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3855       .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3856       .writefn = gt_cntvoff_write,
3857       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3858     { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3859       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
3860       .writefn = gt_cntvoff_write,
3861       .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3862     { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3863       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3864       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3865       .type = ARM_CP_IO, .access = PL2_RW,
3866       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3867     { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3868       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3869       .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
3870       .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3871     { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3872       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3873       .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
3874       .resetfn = gt_hyp_timer_reset,
3875       .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
3876     { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3877       .type = ARM_CP_IO,
3878       .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3879       .access = PL2_RW,
3880       .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
3881       .resetvalue = 0,
3882       .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
3883 #endif
3884     /* The only field of MDCR_EL2 that has a defined architectural reset value
3885      * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
3886      * don't impelment any PMU event counters, so using zero as a reset
3887      * value for MDCR_EL2 is okay
3888      */
3889     { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3890       .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3891       .access = PL2_RW, .resetvalue = 0,
3892       .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
3893     { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
3894       .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3895       .access = PL2_RW, .accessfn = access_el3_aa32ns,
3896       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3897     { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
3898       .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3899       .access = PL2_RW,
3900       .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3901     { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3902       .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3903       .access = PL2_RW,
3904       .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
3905     REGINFO_SENTINEL
3906 };
3907 
3908 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
3909                                    bool isread)
3910 {
3911     /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
3912      * At Secure EL1 it traps to EL3.
3913      */
3914     if (arm_current_el(env) == 3) {
3915         return CP_ACCESS_OK;
3916     }
3917     if (arm_is_secure_below_el3(env)) {
3918         return CP_ACCESS_TRAP_EL3;
3919     }
3920     /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
3921     if (isread) {
3922         return CP_ACCESS_OK;
3923     }
3924     return CP_ACCESS_TRAP_UNCATEGORIZED;
3925 }
3926 
3927 static const ARMCPRegInfo el3_cp_reginfo[] = {
3928     { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
3929       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
3930       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
3931       .resetvalue = 0, .writefn = scr_write },
3932     { .name = "SCR",  .type = ARM_CP_ALIAS,
3933       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
3934       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3935       .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
3936       .writefn = scr_write },
3937     { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
3938       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
3939       .access = PL3_RW, .resetvalue = 0,
3940       .fieldoffset = offsetof(CPUARMState, cp15.sder) },
3941     { .name = "SDER",
3942       .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
3943       .access = PL3_RW, .resetvalue = 0,
3944       .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
3945     { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
3946       .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3947       .writefn = vbar_write, .resetvalue = 0,
3948       .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
3949     { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
3950       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
3951       .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3952       .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
3953     { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
3954       .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
3955       .access = PL3_RW,
3956       /* no .writefn needed as this can't cause an ASID change;
3957        * we must provide a .raw_writefn and .resetfn because we handle
3958        * reset and migration for the AArch32 TTBCR(S), which might be
3959        * using mask and base_mask.
3960        */
3961       .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
3962       .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
3963     { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
3964       .type = ARM_CP_ALIAS,
3965       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
3966       .access = PL3_RW,
3967       .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
3968     { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
3969       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
3970       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
3971     { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
3972       .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
3973       .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
3974     { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
3975       .type = ARM_CP_ALIAS,
3976       .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
3977       .access = PL3_RW,
3978       .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
3979     { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
3980       .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
3981       .access = PL3_RW, .writefn = vbar_write,
3982       .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
3983       .resetvalue = 0 },
3984     { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
3985       .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
3986       .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
3987       .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
3988     { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
3989       .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
3990       .access = PL3_RW, .resetvalue = 0,
3991       .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
3992     { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
3993       .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
3994       .access = PL3_RW, .type = ARM_CP_CONST,
3995       .resetvalue = 0 },
3996     { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
3997       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
3998       .access = PL3_RW, .type = ARM_CP_CONST,
3999       .resetvalue = 0 },
4000     { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4001       .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4002       .access = PL3_RW, .type = ARM_CP_CONST,
4003       .resetvalue = 0 },
4004     { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
4005       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
4006       .access = PL3_W, .type = ARM_CP_NO_RAW,
4007       .writefn = tlbi_aa64_alle3is_write },
4008     { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
4009       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
4010       .access = PL3_W, .type = ARM_CP_NO_RAW,
4011       .writefn = tlbi_aa64_vae3is_write },
4012     { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
4013       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
4014       .access = PL3_W, .type = ARM_CP_NO_RAW,
4015       .writefn = tlbi_aa64_vae3is_write },
4016     { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
4017       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
4018       .access = PL3_W, .type = ARM_CP_NO_RAW,
4019       .writefn = tlbi_aa64_alle3_write },
4020     { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
4021       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
4022       .access = PL3_W, .type = ARM_CP_NO_RAW,
4023       .writefn = tlbi_aa64_vae3_write },
4024     { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
4025       .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
4026       .access = PL3_W, .type = ARM_CP_NO_RAW,
4027       .writefn = tlbi_aa64_vae3_write },
4028     REGINFO_SENTINEL
4029 };
4030 
4031 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4032                                      bool isread)
4033 {
4034     /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4035      * but the AArch32 CTR has its own reginfo struct)
4036      */
4037     if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4038         return CP_ACCESS_TRAP;
4039     }
4040     return CP_ACCESS_OK;
4041 }
4042 
4043 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4044                         uint64_t value)
4045 {
4046     /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4047      * read via a bit in OSLSR_EL1.
4048      */
4049     int oslock;
4050 
4051     if (ri->state == ARM_CP_STATE_AA32) {
4052         oslock = (value == 0xC5ACCE55);
4053     } else {
4054         oslock = value & 1;
4055     }
4056 
4057     env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
4058 }
4059 
4060 static const ARMCPRegInfo debug_cp_reginfo[] = {
4061     /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4062      * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4063      * unlike DBGDRAR it is never accessible from EL0.
4064      * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4065      * accessor.
4066      */
4067     { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4068       .access = PL0_R, .accessfn = access_tdra,
4069       .type = ARM_CP_CONST, .resetvalue = 0 },
4070     { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4071       .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4072       .access = PL1_R, .accessfn = access_tdra,
4073       .type = ARM_CP_CONST, .resetvalue = 0 },
4074     { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4075       .access = PL0_R, .accessfn = access_tdra,
4076       .type = ARM_CP_CONST, .resetvalue = 0 },
4077     /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4078     { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4079       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4080       .access = PL1_RW, .accessfn = access_tda,
4081       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4082       .resetvalue = 0 },
4083     /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4084      * We don't implement the configurable EL0 access.
4085      */
4086     { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4087       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4088       .type = ARM_CP_ALIAS,
4089       .access = PL1_R, .accessfn = access_tda,
4090       .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4091     { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4092       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4093       .access = PL1_W, .type = ARM_CP_NO_RAW,
4094       .accessfn = access_tdosa,
4095       .writefn = oslar_write },
4096     { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4097       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4098       .access = PL1_R, .resetvalue = 10,
4099       .accessfn = access_tdosa,
4100       .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4101     /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4102     { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4103       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4104       .access = PL1_RW, .accessfn = access_tdosa,
4105       .type = ARM_CP_NOP },
4106     /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4107      * implement vector catch debug events yet.
4108      */
4109     { .name = "DBGVCR",
4110       .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4111       .access = PL1_RW, .accessfn = access_tda,
4112       .type = ARM_CP_NOP },
4113     /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4114      * to save and restore a 32-bit guest's DBGVCR)
4115      */
4116     { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
4117       .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
4118       .access = PL2_RW, .accessfn = access_tda,
4119       .type = ARM_CP_NOP },
4120     /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4121      * Channel but Linux may try to access this register. The 32-bit
4122      * alias is DBGDCCINT.
4123      */
4124     { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
4125       .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4126       .access = PL1_RW, .accessfn = access_tda,
4127       .type = ARM_CP_NOP },
4128     REGINFO_SENTINEL
4129 };
4130 
4131 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4132     /* 64 bit access versions of the (dummy) debug registers */
4133     { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4134       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4135     { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4136       .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4137     REGINFO_SENTINEL
4138 };
4139 
4140 void hw_watchpoint_update(ARMCPU *cpu, int n)
4141 {
4142     CPUARMState *env = &cpu->env;
4143     vaddr len = 0;
4144     vaddr wvr = env->cp15.dbgwvr[n];
4145     uint64_t wcr = env->cp15.dbgwcr[n];
4146     int mask;
4147     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4148 
4149     if (env->cpu_watchpoint[n]) {
4150         cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4151         env->cpu_watchpoint[n] = NULL;
4152     }
4153 
4154     if (!extract64(wcr, 0, 1)) {
4155         /* E bit clear : watchpoint disabled */
4156         return;
4157     }
4158 
4159     switch (extract64(wcr, 3, 2)) {
4160     case 0:
4161         /* LSC 00 is reserved and must behave as if the wp is disabled */
4162         return;
4163     case 1:
4164         flags |= BP_MEM_READ;
4165         break;
4166     case 2:
4167         flags |= BP_MEM_WRITE;
4168         break;
4169     case 3:
4170         flags |= BP_MEM_ACCESS;
4171         break;
4172     }
4173 
4174     /* Attempts to use both MASK and BAS fields simultaneously are
4175      * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4176      * thus generating a watchpoint for every byte in the masked region.
4177      */
4178     mask = extract64(wcr, 24, 4);
4179     if (mask == 1 || mask == 2) {
4180         /* Reserved values of MASK; we must act as if the mask value was
4181          * some non-reserved value, or as if the watchpoint were disabled.
4182          * We choose the latter.
4183          */
4184         return;
4185     } else if (mask) {
4186         /* Watchpoint covers an aligned area up to 2GB in size */
4187         len = 1ULL << mask;
4188         /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4189          * whether the watchpoint fires when the unmasked bits match; we opt
4190          * to generate the exceptions.
4191          */
4192         wvr &= ~(len - 1);
4193     } else {
4194         /* Watchpoint covers bytes defined by the byte address select bits */
4195         int bas = extract64(wcr, 5, 8);
4196         int basstart;
4197 
4198         if (bas == 0) {
4199             /* This must act as if the watchpoint is disabled */
4200             return;
4201         }
4202 
4203         if (extract64(wvr, 2, 1)) {
4204             /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4205              * ignored, and BAS[3:0] define which bytes to watch.
4206              */
4207             bas &= 0xf;
4208         }
4209         /* The BAS bits are supposed to be programmed to indicate a contiguous
4210          * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4211          * we fire for each byte in the word/doubleword addressed by the WVR.
4212          * We choose to ignore any non-zero bits after the first range of 1s.
4213          */
4214         basstart = ctz32(bas);
4215         len = cto32(bas >> basstart);
4216         wvr += basstart;
4217     }
4218 
4219     cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4220                           &env->cpu_watchpoint[n]);
4221 }
4222 
4223 void hw_watchpoint_update_all(ARMCPU *cpu)
4224 {
4225     int i;
4226     CPUARMState *env = &cpu->env;
4227 
4228     /* Completely clear out existing QEMU watchpoints and our array, to
4229      * avoid possible stale entries following migration load.
4230      */
4231     cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4232     memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4233 
4234     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4235         hw_watchpoint_update(cpu, i);
4236     }
4237 }
4238 
4239 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4240                          uint64_t value)
4241 {
4242     ARMCPU *cpu = arm_env_get_cpu(env);
4243     int i = ri->crm;
4244 
4245     /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4246      * register reads and behaves as if values written are sign extended.
4247      * Bits [1:0] are RES0.
4248      */
4249     value = sextract64(value, 0, 49) & ~3ULL;
4250 
4251     raw_write(env, ri, value);
4252     hw_watchpoint_update(cpu, i);
4253 }
4254 
4255 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4256                          uint64_t value)
4257 {
4258     ARMCPU *cpu = arm_env_get_cpu(env);
4259     int i = ri->crm;
4260 
4261     raw_write(env, ri, value);
4262     hw_watchpoint_update(cpu, i);
4263 }
4264 
4265 void hw_breakpoint_update(ARMCPU *cpu, int n)
4266 {
4267     CPUARMState *env = &cpu->env;
4268     uint64_t bvr = env->cp15.dbgbvr[n];
4269     uint64_t bcr = env->cp15.dbgbcr[n];
4270     vaddr addr;
4271     int bt;
4272     int flags = BP_CPU;
4273 
4274     if (env->cpu_breakpoint[n]) {
4275         cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4276         env->cpu_breakpoint[n] = NULL;
4277     }
4278 
4279     if (!extract64(bcr, 0, 1)) {
4280         /* E bit clear : watchpoint disabled */
4281         return;
4282     }
4283 
4284     bt = extract64(bcr, 20, 4);
4285 
4286     switch (bt) {
4287     case 4: /* unlinked address mismatch (reserved if AArch64) */
4288     case 5: /* linked address mismatch (reserved if AArch64) */
4289         qemu_log_mask(LOG_UNIMP,
4290                       "arm: address mismatch breakpoint types not implemented");
4291         return;
4292     case 0: /* unlinked address match */
4293     case 1: /* linked address match */
4294     {
4295         /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4296          * we behave as if the register was sign extended. Bits [1:0] are
4297          * RES0. The BAS field is used to allow setting breakpoints on 16
4298          * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4299          * a bp will fire if the addresses covered by the bp and the addresses
4300          * covered by the insn overlap but the insn doesn't start at the
4301          * start of the bp address range. We choose to require the insn and
4302          * the bp to have the same address. The constraints on writing to
4303          * BAS enforced in dbgbcr_write mean we have only four cases:
4304          *  0b0000  => no breakpoint
4305          *  0b0011  => breakpoint on addr
4306          *  0b1100  => breakpoint on addr + 2
4307          *  0b1111  => breakpoint on addr
4308          * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4309          */
4310         int bas = extract64(bcr, 5, 4);
4311         addr = sextract64(bvr, 0, 49) & ~3ULL;
4312         if (bas == 0) {
4313             return;
4314         }
4315         if (bas == 0xc) {
4316             addr += 2;
4317         }
4318         break;
4319     }
4320     case 2: /* unlinked context ID match */
4321     case 8: /* unlinked VMID match (reserved if no EL2) */
4322     case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4323         qemu_log_mask(LOG_UNIMP,
4324                       "arm: unlinked context breakpoint types not implemented");
4325         return;
4326     case 9: /* linked VMID match (reserved if no EL2) */
4327     case 11: /* linked context ID and VMID match (reserved if no EL2) */
4328     case 3: /* linked context ID match */
4329     default:
4330         /* We must generate no events for Linked context matches (unless
4331          * they are linked to by some other bp/wp, which is handled in
4332          * updates for the linking bp/wp). We choose to also generate no events
4333          * for reserved values.
4334          */
4335         return;
4336     }
4337 
4338     cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4339 }
4340 
4341 void hw_breakpoint_update_all(ARMCPU *cpu)
4342 {
4343     int i;
4344     CPUARMState *env = &cpu->env;
4345 
4346     /* Completely clear out existing QEMU breakpoints and our array, to
4347      * avoid possible stale entries following migration load.
4348      */
4349     cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4350     memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4351 
4352     for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4353         hw_breakpoint_update(cpu, i);
4354     }
4355 }
4356 
4357 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4358                          uint64_t value)
4359 {
4360     ARMCPU *cpu = arm_env_get_cpu(env);
4361     int i = ri->crm;
4362 
4363     raw_write(env, ri, value);
4364     hw_breakpoint_update(cpu, i);
4365 }
4366 
4367 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4368                          uint64_t value)
4369 {
4370     ARMCPU *cpu = arm_env_get_cpu(env);
4371     int i = ri->crm;
4372 
4373     /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4374      * copy of BAS[0].
4375      */
4376     value = deposit64(value, 6, 1, extract64(value, 5, 1));
4377     value = deposit64(value, 8, 1, extract64(value, 7, 1));
4378 
4379     raw_write(env, ri, value);
4380     hw_breakpoint_update(cpu, i);
4381 }
4382 
4383 static void define_debug_regs(ARMCPU *cpu)
4384 {
4385     /* Define v7 and v8 architectural debug registers.
4386      * These are just dummy implementations for now.
4387      */
4388     int i;
4389     int wrps, brps, ctx_cmps;
4390     ARMCPRegInfo dbgdidr = {
4391         .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4392         .access = PL0_R, .accessfn = access_tda,
4393         .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4394     };
4395 
4396     /* Note that all these register fields hold "number of Xs minus 1". */
4397     brps = extract32(cpu->dbgdidr, 24, 4);
4398     wrps = extract32(cpu->dbgdidr, 28, 4);
4399     ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4400 
4401     assert(ctx_cmps <= brps);
4402 
4403     /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4404      * of the debug registers such as number of breakpoints;
4405      * check that if they both exist then they agree.
4406      */
4407     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4408         assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4409         assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4410         assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4411     }
4412 
4413     define_one_arm_cp_reg(cpu, &dbgdidr);
4414     define_arm_cp_regs(cpu, debug_cp_reginfo);
4415 
4416     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4417         define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4418     }
4419 
4420     for (i = 0; i < brps + 1; i++) {
4421         ARMCPRegInfo dbgregs[] = {
4422             { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4423               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4424               .access = PL1_RW, .accessfn = access_tda,
4425               .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4426               .writefn = dbgbvr_write, .raw_writefn = raw_write
4427             },
4428             { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4429               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4430               .access = PL1_RW, .accessfn = access_tda,
4431               .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4432               .writefn = dbgbcr_write, .raw_writefn = raw_write
4433             },
4434             REGINFO_SENTINEL
4435         };
4436         define_arm_cp_regs(cpu, dbgregs);
4437     }
4438 
4439     for (i = 0; i < wrps + 1; i++) {
4440         ARMCPRegInfo dbgregs[] = {
4441             { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4442               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4443               .access = PL1_RW, .accessfn = access_tda,
4444               .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4445               .writefn = dbgwvr_write, .raw_writefn = raw_write
4446             },
4447             { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4448               .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4449               .access = PL1_RW, .accessfn = access_tda,
4450               .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4451               .writefn = dbgwcr_write, .raw_writefn = raw_write
4452             },
4453             REGINFO_SENTINEL
4454         };
4455         define_arm_cp_regs(cpu, dbgregs);
4456     }
4457 }
4458 
4459 void register_cp_regs_for_features(ARMCPU *cpu)
4460 {
4461     /* Register all the coprocessor registers based on feature bits */
4462     CPUARMState *env = &cpu->env;
4463     if (arm_feature(env, ARM_FEATURE_M)) {
4464         /* M profile has no coprocessor registers */
4465         return;
4466     }
4467 
4468     define_arm_cp_regs(cpu, cp_reginfo);
4469     if (!arm_feature(env, ARM_FEATURE_V8)) {
4470         /* Must go early as it is full of wildcards that may be
4471          * overridden by later definitions.
4472          */
4473         define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4474     }
4475 
4476     if (arm_feature(env, ARM_FEATURE_V6)) {
4477         /* The ID registers all have impdef reset values */
4478         ARMCPRegInfo v6_idregs[] = {
4479             { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4480               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4481               .access = PL1_R, .type = ARM_CP_CONST,
4482               .resetvalue = cpu->id_pfr0 },
4483             { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4484               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4485               .access = PL1_R, .type = ARM_CP_CONST,
4486               .resetvalue = cpu->id_pfr1 },
4487             { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4488               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4489               .access = PL1_R, .type = ARM_CP_CONST,
4490               .resetvalue = cpu->id_dfr0 },
4491             { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4492               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4493               .access = PL1_R, .type = ARM_CP_CONST,
4494               .resetvalue = cpu->id_afr0 },
4495             { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
4496               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
4497               .access = PL1_R, .type = ARM_CP_CONST,
4498               .resetvalue = cpu->id_mmfr0 },
4499             { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
4500               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
4501               .access = PL1_R, .type = ARM_CP_CONST,
4502               .resetvalue = cpu->id_mmfr1 },
4503             { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
4504               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
4505               .access = PL1_R, .type = ARM_CP_CONST,
4506               .resetvalue = cpu->id_mmfr2 },
4507             { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
4508               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
4509               .access = PL1_R, .type = ARM_CP_CONST,
4510               .resetvalue = cpu->id_mmfr3 },
4511             { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
4512               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4513               .access = PL1_R, .type = ARM_CP_CONST,
4514               .resetvalue = cpu->id_isar0 },
4515             { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
4516               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
4517               .access = PL1_R, .type = ARM_CP_CONST,
4518               .resetvalue = cpu->id_isar1 },
4519             { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
4520               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4521               .access = PL1_R, .type = ARM_CP_CONST,
4522               .resetvalue = cpu->id_isar2 },
4523             { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
4524               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
4525               .access = PL1_R, .type = ARM_CP_CONST,
4526               .resetvalue = cpu->id_isar3 },
4527             { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
4528               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
4529               .access = PL1_R, .type = ARM_CP_CONST,
4530               .resetvalue = cpu->id_isar4 },
4531             { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
4532               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
4533               .access = PL1_R, .type = ARM_CP_CONST,
4534               .resetvalue = cpu->id_isar5 },
4535             { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
4536               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
4537               .access = PL1_R, .type = ARM_CP_CONST,
4538               .resetvalue = cpu->id_mmfr4 },
4539             /* 7 is as yet unallocated and must RAZ */
4540             { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH,
4541               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
4542               .access = PL1_R, .type = ARM_CP_CONST,
4543               .resetvalue = 0 },
4544             REGINFO_SENTINEL
4545         };
4546         define_arm_cp_regs(cpu, v6_idregs);
4547         define_arm_cp_regs(cpu, v6_cp_reginfo);
4548     } else {
4549         define_arm_cp_regs(cpu, not_v6_cp_reginfo);
4550     }
4551     if (arm_feature(env, ARM_FEATURE_V6K)) {
4552         define_arm_cp_regs(cpu, v6k_cp_reginfo);
4553     }
4554     if (arm_feature(env, ARM_FEATURE_V7MP) &&
4555         !arm_feature(env, ARM_FEATURE_MPU)) {
4556         define_arm_cp_regs(cpu, v7mp_cp_reginfo);
4557     }
4558     if (arm_feature(env, ARM_FEATURE_V7)) {
4559         /* v7 performance monitor control register: same implementor
4560          * field as main ID register, and we implement only the cycle
4561          * count register.
4562          */
4563 #ifndef CONFIG_USER_ONLY
4564         ARMCPRegInfo pmcr = {
4565             .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
4566             .access = PL0_RW,
4567             .type = ARM_CP_IO | ARM_CP_ALIAS,
4568             .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
4569             .accessfn = pmreg_access, .writefn = pmcr_write,
4570             .raw_writefn = raw_write,
4571         };
4572         ARMCPRegInfo pmcr64 = {
4573             .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
4574             .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
4575             .access = PL0_RW, .accessfn = pmreg_access,
4576             .type = ARM_CP_IO,
4577             .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
4578             .resetvalue = cpu->midr & 0xff000000,
4579             .writefn = pmcr_write, .raw_writefn = raw_write,
4580         };
4581         define_one_arm_cp_reg(cpu, &pmcr);
4582         define_one_arm_cp_reg(cpu, &pmcr64);
4583 #endif
4584         ARMCPRegInfo clidr = {
4585             .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
4586             .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
4587             .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
4588         };
4589         define_one_arm_cp_reg(cpu, &clidr);
4590         define_arm_cp_regs(cpu, v7_cp_reginfo);
4591         define_debug_regs(cpu);
4592     } else {
4593         define_arm_cp_regs(cpu, not_v7_cp_reginfo);
4594     }
4595     if (arm_feature(env, ARM_FEATURE_V8)) {
4596         /* AArch64 ID registers, which all have impdef reset values.
4597          * Note that within the ID register ranges the unused slots
4598          * must all RAZ, not UNDEF; future architecture versions may
4599          * define new registers here.
4600          */
4601         ARMCPRegInfo v8_idregs[] = {
4602             { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
4603               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
4604               .access = PL1_R, .type = ARM_CP_CONST,
4605               .resetvalue = cpu->id_aa64pfr0 },
4606             { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
4607               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
4608               .access = PL1_R, .type = ARM_CP_CONST,
4609               .resetvalue = cpu->id_aa64pfr1},
4610             { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4611               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
4612               .access = PL1_R, .type = ARM_CP_CONST,
4613               .resetvalue = 0 },
4614             { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4615               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
4616               .access = PL1_R, .type = ARM_CP_CONST,
4617               .resetvalue = 0 },
4618             { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4619               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
4620               .access = PL1_R, .type = ARM_CP_CONST,
4621               .resetvalue = 0 },
4622             { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4623               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
4624               .access = PL1_R, .type = ARM_CP_CONST,
4625               .resetvalue = 0 },
4626             { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4627               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
4628               .access = PL1_R, .type = ARM_CP_CONST,
4629               .resetvalue = 0 },
4630             { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4631               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
4632               .access = PL1_R, .type = ARM_CP_CONST,
4633               .resetvalue = 0 },
4634             { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
4635               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
4636               .access = PL1_R, .type = ARM_CP_CONST,
4637               .resetvalue = cpu->id_aa64dfr0 },
4638             { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
4639               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
4640               .access = PL1_R, .type = ARM_CP_CONST,
4641               .resetvalue = cpu->id_aa64dfr1 },
4642             { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4643               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
4644               .access = PL1_R, .type = ARM_CP_CONST,
4645               .resetvalue = 0 },
4646             { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4647               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
4648               .access = PL1_R, .type = ARM_CP_CONST,
4649               .resetvalue = 0 },
4650             { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
4651               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
4652               .access = PL1_R, .type = ARM_CP_CONST,
4653               .resetvalue = cpu->id_aa64afr0 },
4654             { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
4655               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
4656               .access = PL1_R, .type = ARM_CP_CONST,
4657               .resetvalue = cpu->id_aa64afr1 },
4658             { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4659               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
4660               .access = PL1_R, .type = ARM_CP_CONST,
4661               .resetvalue = 0 },
4662             { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4663               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
4664               .access = PL1_R, .type = ARM_CP_CONST,
4665               .resetvalue = 0 },
4666             { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
4667               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
4668               .access = PL1_R, .type = ARM_CP_CONST,
4669               .resetvalue = cpu->id_aa64isar0 },
4670             { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
4671               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
4672               .access = PL1_R, .type = ARM_CP_CONST,
4673               .resetvalue = cpu->id_aa64isar1 },
4674             { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4675               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
4676               .access = PL1_R, .type = ARM_CP_CONST,
4677               .resetvalue = 0 },
4678             { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4679               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
4680               .access = PL1_R, .type = ARM_CP_CONST,
4681               .resetvalue = 0 },
4682             { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4683               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
4684               .access = PL1_R, .type = ARM_CP_CONST,
4685               .resetvalue = 0 },
4686             { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4687               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
4688               .access = PL1_R, .type = ARM_CP_CONST,
4689               .resetvalue = 0 },
4690             { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4691               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
4692               .access = PL1_R, .type = ARM_CP_CONST,
4693               .resetvalue = 0 },
4694             { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4695               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
4696               .access = PL1_R, .type = ARM_CP_CONST,
4697               .resetvalue = 0 },
4698             { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
4699               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4700               .access = PL1_R, .type = ARM_CP_CONST,
4701               .resetvalue = cpu->id_aa64mmfr0 },
4702             { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
4703               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
4704               .access = PL1_R, .type = ARM_CP_CONST,
4705               .resetvalue = cpu->id_aa64mmfr1 },
4706             { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4707               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
4708               .access = PL1_R, .type = ARM_CP_CONST,
4709               .resetvalue = 0 },
4710             { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4711               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
4712               .access = PL1_R, .type = ARM_CP_CONST,
4713               .resetvalue = 0 },
4714             { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4715               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
4716               .access = PL1_R, .type = ARM_CP_CONST,
4717               .resetvalue = 0 },
4718             { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4719               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
4720               .access = PL1_R, .type = ARM_CP_CONST,
4721               .resetvalue = 0 },
4722             { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4723               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
4724               .access = PL1_R, .type = ARM_CP_CONST,
4725               .resetvalue = 0 },
4726             { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4727               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
4728               .access = PL1_R, .type = ARM_CP_CONST,
4729               .resetvalue = 0 },
4730             { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
4731               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
4732               .access = PL1_R, .type = ARM_CP_CONST,
4733               .resetvalue = cpu->mvfr0 },
4734             { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
4735               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
4736               .access = PL1_R, .type = ARM_CP_CONST,
4737               .resetvalue = cpu->mvfr1 },
4738             { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
4739               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
4740               .access = PL1_R, .type = ARM_CP_CONST,
4741               .resetvalue = cpu->mvfr2 },
4742             { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4743               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
4744               .access = PL1_R, .type = ARM_CP_CONST,
4745               .resetvalue = 0 },
4746             { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4747               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
4748               .access = PL1_R, .type = ARM_CP_CONST,
4749               .resetvalue = 0 },
4750             { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4751               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
4752               .access = PL1_R, .type = ARM_CP_CONST,
4753               .resetvalue = 0 },
4754             { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4755               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
4756               .access = PL1_R, .type = ARM_CP_CONST,
4757               .resetvalue = 0 },
4758             { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4759               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
4760               .access = PL1_R, .type = ARM_CP_CONST,
4761               .resetvalue = 0 },
4762             { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
4763               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
4764               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4765               .resetvalue = cpu->pmceid0 },
4766             { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
4767               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
4768               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4769               .resetvalue = cpu->pmceid0 },
4770             { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
4771               .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
4772               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4773               .resetvalue = cpu->pmceid1 },
4774             { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
4775               .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
4776               .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4777               .resetvalue = cpu->pmceid1 },
4778             REGINFO_SENTINEL
4779         };
4780         /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
4781         if (!arm_feature(env, ARM_FEATURE_EL3) &&
4782             !arm_feature(env, ARM_FEATURE_EL2)) {
4783             ARMCPRegInfo rvbar = {
4784                 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
4785                 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4786                 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
4787             };
4788             define_one_arm_cp_reg(cpu, &rvbar);
4789         }
4790         define_arm_cp_regs(cpu, v8_idregs);
4791         define_arm_cp_regs(cpu, v8_cp_reginfo);
4792     }
4793     if (arm_feature(env, ARM_FEATURE_EL2)) {
4794         uint64_t vmpidr_def = mpidr_read_val(env);
4795         ARMCPRegInfo vpidr_regs[] = {
4796             { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
4797               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4798               .access = PL2_RW, .accessfn = access_el3_aa32ns,
4799               .resetvalue = cpu->midr,
4800               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4801             { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
4802               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4803               .access = PL2_RW, .resetvalue = cpu->midr,
4804               .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4805             { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
4806               .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4807               .access = PL2_RW, .accessfn = access_el3_aa32ns,
4808               .resetvalue = vmpidr_def,
4809               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4810             { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
4811               .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4812               .access = PL2_RW,
4813               .resetvalue = vmpidr_def,
4814               .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4815             REGINFO_SENTINEL
4816         };
4817         define_arm_cp_regs(cpu, vpidr_regs);
4818         define_arm_cp_regs(cpu, el2_cp_reginfo);
4819         /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
4820         if (!arm_feature(env, ARM_FEATURE_EL3)) {
4821             ARMCPRegInfo rvbar = {
4822                 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
4823                 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
4824                 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
4825             };
4826             define_one_arm_cp_reg(cpu, &rvbar);
4827         }
4828     } else {
4829         /* If EL2 is missing but higher ELs are enabled, we need to
4830          * register the no_el2 reginfos.
4831          */
4832         if (arm_feature(env, ARM_FEATURE_EL3)) {
4833             /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
4834              * of MIDR_EL1 and MPIDR_EL1.
4835              */
4836             ARMCPRegInfo vpidr_regs[] = {
4837                 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4838                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4839                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4840                   .type = ARM_CP_CONST, .resetvalue = cpu->midr,
4841                   .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4842                 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4843                   .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4844                   .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4845                   .type = ARM_CP_NO_RAW,
4846                   .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
4847                 REGINFO_SENTINEL
4848             };
4849             define_arm_cp_regs(cpu, vpidr_regs);
4850             define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
4851         }
4852     }
4853     if (arm_feature(env, ARM_FEATURE_EL3)) {
4854         define_arm_cp_regs(cpu, el3_cp_reginfo);
4855         ARMCPRegInfo el3_regs[] = {
4856             { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
4857               .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
4858               .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
4859             { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
4860               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
4861               .access = PL3_RW,
4862               .raw_writefn = raw_write, .writefn = sctlr_write,
4863               .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
4864               .resetvalue = cpu->reset_sctlr },
4865             REGINFO_SENTINEL
4866         };
4867 
4868         define_arm_cp_regs(cpu, el3_regs);
4869     }
4870     /* The behaviour of NSACR is sufficiently various that we don't
4871      * try to describe it in a single reginfo:
4872      *  if EL3 is 64 bit, then trap to EL3 from S EL1,
4873      *     reads as constant 0xc00 from NS EL1 and NS EL2
4874      *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
4875      *  if v7 without EL3, register doesn't exist
4876      *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
4877      */
4878     if (arm_feature(env, ARM_FEATURE_EL3)) {
4879         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
4880             ARMCPRegInfo nsacr = {
4881                 .name = "NSACR", .type = ARM_CP_CONST,
4882                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4883                 .access = PL1_RW, .accessfn = nsacr_access,
4884                 .resetvalue = 0xc00
4885             };
4886             define_one_arm_cp_reg(cpu, &nsacr);
4887         } else {
4888             ARMCPRegInfo nsacr = {
4889                 .name = "NSACR",
4890                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4891                 .access = PL3_RW | PL1_R,
4892                 .resetvalue = 0,
4893                 .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
4894             };
4895             define_one_arm_cp_reg(cpu, &nsacr);
4896         }
4897     } else {
4898         if (arm_feature(env, ARM_FEATURE_V8)) {
4899             ARMCPRegInfo nsacr = {
4900                 .name = "NSACR", .type = ARM_CP_CONST,
4901                 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4902                 .access = PL1_R,
4903                 .resetvalue = 0xc00
4904             };
4905             define_one_arm_cp_reg(cpu, &nsacr);
4906         }
4907     }
4908 
4909     if (arm_feature(env, ARM_FEATURE_MPU)) {
4910         if (arm_feature(env, ARM_FEATURE_V6)) {
4911             /* PMSAv6 not implemented */
4912             assert(arm_feature(env, ARM_FEATURE_V7));
4913             define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
4914             define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
4915         } else {
4916             define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
4917         }
4918     } else {
4919         define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
4920         define_arm_cp_regs(cpu, vmsa_cp_reginfo);
4921     }
4922     if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
4923         define_arm_cp_regs(cpu, t2ee_cp_reginfo);
4924     }
4925     if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
4926         define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
4927     }
4928     if (arm_feature(env, ARM_FEATURE_VAPA)) {
4929         define_arm_cp_regs(cpu, vapa_cp_reginfo);
4930     }
4931     if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
4932         define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
4933     }
4934     if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
4935         define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
4936     }
4937     if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
4938         define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
4939     }
4940     if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
4941         define_arm_cp_regs(cpu, omap_cp_reginfo);
4942     }
4943     if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
4944         define_arm_cp_regs(cpu, strongarm_cp_reginfo);
4945     }
4946     if (arm_feature(env, ARM_FEATURE_XSCALE)) {
4947         define_arm_cp_regs(cpu, xscale_cp_reginfo);
4948     }
4949     if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
4950         define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
4951     }
4952     if (arm_feature(env, ARM_FEATURE_LPAE)) {
4953         define_arm_cp_regs(cpu, lpae_cp_reginfo);
4954     }
4955     /* Slightly awkwardly, the OMAP and StrongARM cores need all of
4956      * cp15 crn=0 to be writes-ignored, whereas for other cores they should
4957      * be read-only (ie write causes UNDEF exception).
4958      */
4959     {
4960         ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
4961             /* Pre-v8 MIDR space.
4962              * Note that the MIDR isn't a simple constant register because
4963              * of the TI925 behaviour where writes to another register can
4964              * cause the MIDR value to change.
4965              *
4966              * Unimplemented registers in the c15 0 0 0 space default to
4967              * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
4968              * and friends override accordingly.
4969              */
4970             { .name = "MIDR",
4971               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
4972               .access = PL1_R, .resetvalue = cpu->midr,
4973               .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
4974               .readfn = midr_read,
4975               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
4976               .type = ARM_CP_OVERRIDE },
4977             /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
4978             { .name = "DUMMY",
4979               .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
4980               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4981             { .name = "DUMMY",
4982               .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
4983               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4984             { .name = "DUMMY",
4985               .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
4986               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4987             { .name = "DUMMY",
4988               .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
4989               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4990             { .name = "DUMMY",
4991               .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
4992               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4993             REGINFO_SENTINEL
4994         };
4995         ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
4996             { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
4997               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
4998               .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
4999               .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5000               .readfn = midr_read },
5001             /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5002             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5003               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5004               .access = PL1_R, .resetvalue = cpu->midr },
5005             { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5006               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
5007               .access = PL1_R, .resetvalue = cpu->midr },
5008             { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
5009               .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
5010               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
5011             REGINFO_SENTINEL
5012         };
5013         ARMCPRegInfo id_cp_reginfo[] = {
5014             /* These are common to v8 and pre-v8 */
5015             { .name = "CTR",
5016               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
5017               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5018             { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
5019               .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
5020               .access = PL0_R, .accessfn = ctr_el0_access,
5021               .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5022             /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5023             { .name = "TCMTR",
5024               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
5025               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5026             REGINFO_SENTINEL
5027         };
5028         /* TLBTR is specific to VMSA */
5029         ARMCPRegInfo id_tlbtr_reginfo = {
5030               .name = "TLBTR",
5031               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
5032               .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
5033         };
5034         /* MPUIR is specific to PMSA V6+ */
5035         ARMCPRegInfo id_mpuir_reginfo = {
5036               .name = "MPUIR",
5037               .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5038               .access = PL1_R, .type = ARM_CP_CONST,
5039               .resetvalue = cpu->pmsav7_dregion << 8
5040         };
5041         ARMCPRegInfo crn0_wi_reginfo = {
5042             .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
5043             .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
5044             .type = ARM_CP_NOP | ARM_CP_OVERRIDE
5045         };
5046         if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
5047             arm_feature(env, ARM_FEATURE_STRONGARM)) {
5048             ARMCPRegInfo *r;
5049             /* Register the blanket "writes ignored" value first to cover the
5050              * whole space. Then update the specific ID registers to allow write
5051              * access, so that they ignore writes rather than causing them to
5052              * UNDEF.
5053              */
5054             define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5055             for (r = id_pre_v8_midr_cp_reginfo;
5056                  r->type != ARM_CP_SENTINEL; r++) {
5057                 r->access = PL1_RW;
5058             }
5059             for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
5060                 r->access = PL1_RW;
5061             }
5062             id_tlbtr_reginfo.access = PL1_RW;
5063             id_tlbtr_reginfo.access = PL1_RW;
5064         }
5065         if (arm_feature(env, ARM_FEATURE_V8)) {
5066             define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
5067         } else {
5068             define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
5069         }
5070         define_arm_cp_regs(cpu, id_cp_reginfo);
5071         if (!arm_feature(env, ARM_FEATURE_MPU)) {
5072             define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
5073         } else if (arm_feature(env, ARM_FEATURE_V7)) {
5074             define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
5075         }
5076     }
5077 
5078     if (arm_feature(env, ARM_FEATURE_MPIDR)) {
5079         define_arm_cp_regs(cpu, mpidr_cp_reginfo);
5080     }
5081 
5082     if (arm_feature(env, ARM_FEATURE_AUXCR)) {
5083         ARMCPRegInfo auxcr_reginfo[] = {
5084             { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
5085               .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
5086               .access = PL1_RW, .type = ARM_CP_CONST,
5087               .resetvalue = cpu->reset_auxcr },
5088             { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
5089               .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
5090               .access = PL2_RW, .type = ARM_CP_CONST,
5091               .resetvalue = 0 },
5092             { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
5093               .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
5094               .access = PL3_RW, .type = ARM_CP_CONST,
5095               .resetvalue = 0 },
5096             REGINFO_SENTINEL
5097         };
5098         define_arm_cp_regs(cpu, auxcr_reginfo);
5099     }
5100 
5101     if (arm_feature(env, ARM_FEATURE_CBAR)) {
5102         if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5103             /* 32 bit view is [31:18] 0...0 [43:32]. */
5104             uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
5105                 | extract64(cpu->reset_cbar, 32, 12);
5106             ARMCPRegInfo cbar_reginfo[] = {
5107                 { .name = "CBAR",
5108                   .type = ARM_CP_CONST,
5109                   .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5110                   .access = PL1_R, .resetvalue = cpu->reset_cbar },
5111                 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
5112                   .type = ARM_CP_CONST,
5113                   .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
5114                   .access = PL1_R, .resetvalue = cbar32 },
5115                 REGINFO_SENTINEL
5116             };
5117             /* We don't implement a r/w 64 bit CBAR currently */
5118             assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
5119             define_arm_cp_regs(cpu, cbar_reginfo);
5120         } else {
5121             ARMCPRegInfo cbar = {
5122                 .name = "CBAR",
5123                 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5124                 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
5125                 .fieldoffset = offsetof(CPUARMState,
5126                                         cp15.c15_config_base_address)
5127             };
5128             if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
5129                 cbar.access = PL1_R;
5130                 cbar.fieldoffset = 0;
5131                 cbar.type = ARM_CP_CONST;
5132             }
5133             define_one_arm_cp_reg(cpu, &cbar);
5134         }
5135     }
5136 
5137     if (arm_feature(env, ARM_FEATURE_VBAR)) {
5138         ARMCPRegInfo vbar_cp_reginfo[] = {
5139             { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
5140               .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
5141               .access = PL1_RW, .writefn = vbar_write,
5142               .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
5143                                      offsetof(CPUARMState, cp15.vbar_ns) },
5144               .resetvalue = 0 },
5145             REGINFO_SENTINEL
5146         };
5147         define_arm_cp_regs(cpu, vbar_cp_reginfo);
5148     }
5149 
5150     /* Generic registers whose values depend on the implementation */
5151     {
5152         ARMCPRegInfo sctlr = {
5153             .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
5154             .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5155             .access = PL1_RW,
5156             .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
5157                                    offsetof(CPUARMState, cp15.sctlr_ns) },
5158             .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
5159             .raw_writefn = raw_write,
5160         };
5161         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5162             /* Normally we would always end the TB on an SCTLR write, but Linux
5163              * arch/arm/mach-pxa/sleep.S expects two instructions following
5164              * an MMU enable to execute from cache.  Imitate this behaviour.
5165              */
5166             sctlr.type |= ARM_CP_SUPPRESS_TB_END;
5167         }
5168         define_one_arm_cp_reg(cpu, &sctlr);
5169     }
5170 }
5171 
5172 ARMCPU *cpu_arm_init(const char *cpu_model)
5173 {
5174     return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
5175 }
5176 
5177 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
5178 {
5179     CPUState *cs = CPU(cpu);
5180     CPUARMState *env = &cpu->env;
5181 
5182     if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5183         gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
5184                                  aarch64_fpu_gdb_set_reg,
5185                                  34, "aarch64-fpu.xml", 0);
5186     } else if (arm_feature(env, ARM_FEATURE_NEON)) {
5187         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5188                                  51, "arm-neon.xml", 0);
5189     } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
5190         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5191                                  35, "arm-vfp3.xml", 0);
5192     } else if (arm_feature(env, ARM_FEATURE_VFP)) {
5193         gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5194                                  19, "arm-vfp.xml", 0);
5195     }
5196 }
5197 
5198 /* Sort alphabetically by type name, except for "any". */
5199 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5200 {
5201     ObjectClass *class_a = (ObjectClass *)a;
5202     ObjectClass *class_b = (ObjectClass *)b;
5203     const char *name_a, *name_b;
5204 
5205     name_a = object_class_get_name(class_a);
5206     name_b = object_class_get_name(class_b);
5207     if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
5208         return 1;
5209     } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
5210         return -1;
5211     } else {
5212         return strcmp(name_a, name_b);
5213     }
5214 }
5215 
5216 static void arm_cpu_list_entry(gpointer data, gpointer user_data)
5217 {
5218     ObjectClass *oc = data;
5219     CPUListState *s = user_data;
5220     const char *typename;
5221     char *name;
5222 
5223     typename = object_class_get_name(oc);
5224     name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
5225     (*s->cpu_fprintf)(s->file, "  %s\n",
5226                       name);
5227     g_free(name);
5228 }
5229 
5230 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
5231 {
5232     CPUListState s = {
5233         .file = f,
5234         .cpu_fprintf = cpu_fprintf,
5235     };
5236     GSList *list;
5237 
5238     list = object_class_get_list(TYPE_ARM_CPU, false);
5239     list = g_slist_sort(list, arm_cpu_list_compare);
5240     (*cpu_fprintf)(f, "Available CPUs:\n");
5241     g_slist_foreach(list, arm_cpu_list_entry, &s);
5242     g_slist_free(list);
5243 #ifdef CONFIG_KVM
5244     /* The 'host' CPU type is dynamically registered only if KVM is
5245      * enabled, so we have to special-case it here:
5246      */
5247     (*cpu_fprintf)(f, "  host (only available in KVM mode)\n");
5248 #endif
5249 }
5250 
5251 static void arm_cpu_add_definition(gpointer data, gpointer user_data)
5252 {
5253     ObjectClass *oc = data;
5254     CpuDefinitionInfoList **cpu_list = user_data;
5255     CpuDefinitionInfoList *entry;
5256     CpuDefinitionInfo *info;
5257     const char *typename;
5258 
5259     typename = object_class_get_name(oc);
5260     info = g_malloc0(sizeof(*info));
5261     info->name = g_strndup(typename,
5262                            strlen(typename) - strlen("-" TYPE_ARM_CPU));
5263     info->q_typename = g_strdup(typename);
5264 
5265     entry = g_malloc0(sizeof(*entry));
5266     entry->value = info;
5267     entry->next = *cpu_list;
5268     *cpu_list = entry;
5269 }
5270 
5271 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
5272 {
5273     CpuDefinitionInfoList *cpu_list = NULL;
5274     GSList *list;
5275 
5276     list = object_class_get_list(TYPE_ARM_CPU, false);
5277     g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
5278     g_slist_free(list);
5279 
5280     return cpu_list;
5281 }
5282 
5283 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
5284                                    void *opaque, int state, int secstate,
5285                                    int crm, int opc1, int opc2)
5286 {
5287     /* Private utility function for define_one_arm_cp_reg_with_opaque():
5288      * add a single reginfo struct to the hash table.
5289      */
5290     uint32_t *key = g_new(uint32_t, 1);
5291     ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
5292     int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
5293     int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
5294 
5295     /* Reset the secure state to the specific incoming state.  This is
5296      * necessary as the register may have been defined with both states.
5297      */
5298     r2->secure = secstate;
5299 
5300     if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5301         /* Register is banked (using both entries in array).
5302          * Overwriting fieldoffset as the array is only used to define
5303          * banked registers but later only fieldoffset is used.
5304          */
5305         r2->fieldoffset = r->bank_fieldoffsets[ns];
5306     }
5307 
5308     if (state == ARM_CP_STATE_AA32) {
5309         if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5310             /* If the register is banked then we don't need to migrate or
5311              * reset the 32-bit instance in certain cases:
5312              *
5313              * 1) If the register has both 32-bit and 64-bit instances then we
5314              *    can count on the 64-bit instance taking care of the
5315              *    non-secure bank.
5316              * 2) If ARMv8 is enabled then we can count on a 64-bit version
5317              *    taking care of the secure bank.  This requires that separate
5318              *    32 and 64-bit definitions are provided.
5319              */
5320             if ((r->state == ARM_CP_STATE_BOTH && ns) ||
5321                 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
5322                 r2->type |= ARM_CP_ALIAS;
5323             }
5324         } else if ((secstate != r->secure) && !ns) {
5325             /* The register is not banked so we only want to allow migration of
5326              * the non-secure instance.
5327              */
5328             r2->type |= ARM_CP_ALIAS;
5329         }
5330 
5331         if (r->state == ARM_CP_STATE_BOTH) {
5332             /* We assume it is a cp15 register if the .cp field is left unset.
5333              */
5334             if (r2->cp == 0) {
5335                 r2->cp = 15;
5336             }
5337 
5338 #ifdef HOST_WORDS_BIGENDIAN
5339             if (r2->fieldoffset) {
5340                 r2->fieldoffset += sizeof(uint32_t);
5341             }
5342 #endif
5343         }
5344     }
5345     if (state == ARM_CP_STATE_AA64) {
5346         /* To allow abbreviation of ARMCPRegInfo
5347          * definitions, we treat cp == 0 as equivalent to
5348          * the value for "standard guest-visible sysreg".
5349          * STATE_BOTH definitions are also always "standard
5350          * sysreg" in their AArch64 view (the .cp value may
5351          * be non-zero for the benefit of the AArch32 view).
5352          */
5353         if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
5354             r2->cp = CP_REG_ARM64_SYSREG_CP;
5355         }
5356         *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
5357                                   r2->opc0, opc1, opc2);
5358     } else {
5359         *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
5360     }
5361     if (opaque) {
5362         r2->opaque = opaque;
5363     }
5364     /* reginfo passed to helpers is correct for the actual access,
5365      * and is never ARM_CP_STATE_BOTH:
5366      */
5367     r2->state = state;
5368     /* Make sure reginfo passed to helpers for wildcarded regs
5369      * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5370      */
5371     r2->crm = crm;
5372     r2->opc1 = opc1;
5373     r2->opc2 = opc2;
5374     /* By convention, for wildcarded registers only the first
5375      * entry is used for migration; the others are marked as
5376      * ALIAS so we don't try to transfer the register
5377      * multiple times. Special registers (ie NOP/WFI) are
5378      * never migratable and not even raw-accessible.
5379      */
5380     if ((r->type & ARM_CP_SPECIAL)) {
5381         r2->type |= ARM_CP_NO_RAW;
5382     }
5383     if (((r->crm == CP_ANY) && crm != 0) ||
5384         ((r->opc1 == CP_ANY) && opc1 != 0) ||
5385         ((r->opc2 == CP_ANY) && opc2 != 0)) {
5386         r2->type |= ARM_CP_ALIAS;
5387     }
5388 
5389     /* Check that raw accesses are either forbidden or handled. Note that
5390      * we can't assert this earlier because the setup of fieldoffset for
5391      * banked registers has to be done first.
5392      */
5393     if (!(r2->type & ARM_CP_NO_RAW)) {
5394         assert(!raw_accessors_invalid(r2));
5395     }
5396 
5397     /* Overriding of an existing definition must be explicitly
5398      * requested.
5399      */
5400     if (!(r->type & ARM_CP_OVERRIDE)) {
5401         ARMCPRegInfo *oldreg;
5402         oldreg = g_hash_table_lookup(cpu->cp_regs, key);
5403         if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
5404             fprintf(stderr, "Register redefined: cp=%d %d bit "
5405                     "crn=%d crm=%d opc1=%d opc2=%d, "
5406                     "was %s, now %s\n", r2->cp, 32 + 32 * is64,
5407                     r2->crn, r2->crm, r2->opc1, r2->opc2,
5408                     oldreg->name, r2->name);
5409             g_assert_not_reached();
5410         }
5411     }
5412     g_hash_table_insert(cpu->cp_regs, key, r2);
5413 }
5414 
5415 
5416 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
5417                                        const ARMCPRegInfo *r, void *opaque)
5418 {
5419     /* Define implementations of coprocessor registers.
5420      * We store these in a hashtable because typically
5421      * there are less than 150 registers in a space which
5422      * is 16*16*16*8*8 = 262144 in size.
5423      * Wildcarding is supported for the crm, opc1 and opc2 fields.
5424      * If a register is defined twice then the second definition is
5425      * used, so this can be used to define some generic registers and
5426      * then override them with implementation specific variations.
5427      * At least one of the original and the second definition should
5428      * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5429      * against accidental use.
5430      *
5431      * The state field defines whether the register is to be
5432      * visible in the AArch32 or AArch64 execution state. If the
5433      * state is set to ARM_CP_STATE_BOTH then we synthesise a
5434      * reginfo structure for the AArch32 view, which sees the lower
5435      * 32 bits of the 64 bit register.
5436      *
5437      * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5438      * be wildcarded. AArch64 registers are always considered to be 64
5439      * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5440      * the register, if any.
5441      */
5442     int crm, opc1, opc2, state;
5443     int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
5444     int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
5445     int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
5446     int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
5447     int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
5448     int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
5449     /* 64 bit registers have only CRm and Opc1 fields */
5450     assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
5451     /* op0 only exists in the AArch64 encodings */
5452     assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
5453     /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5454     assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
5455     /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5456      * encodes a minimum access level for the register. We roll this
5457      * runtime check into our general permission check code, so check
5458      * here that the reginfo's specified permissions are strict enough
5459      * to encompass the generic architectural permission check.
5460      */
5461     if (r->state != ARM_CP_STATE_AA32) {
5462         int mask = 0;
5463         switch (r->opc1) {
5464         case 0: case 1: case 2:
5465             /* min_EL EL1 */
5466             mask = PL1_RW;
5467             break;
5468         case 3:
5469             /* min_EL EL0 */
5470             mask = PL0_RW;
5471             break;
5472         case 4:
5473             /* min_EL EL2 */
5474             mask = PL2_RW;
5475             break;
5476         case 5:
5477             /* unallocated encoding, so not possible */
5478             assert(false);
5479             break;
5480         case 6:
5481             /* min_EL EL3 */
5482             mask = PL3_RW;
5483             break;
5484         case 7:
5485             /* min_EL EL1, secure mode only (we don't check the latter) */
5486             mask = PL1_RW;
5487             break;
5488         default:
5489             /* broken reginfo with out-of-range opc1 */
5490             assert(false);
5491             break;
5492         }
5493         /* assert our permissions are not too lax (stricter is fine) */
5494         assert((r->access & ~mask) == 0);
5495     }
5496 
5497     /* Check that the register definition has enough info to handle
5498      * reads and writes if they are permitted.
5499      */
5500     if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
5501         if (r->access & PL3_R) {
5502             assert((r->fieldoffset ||
5503                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5504                    r->readfn);
5505         }
5506         if (r->access & PL3_W) {
5507             assert((r->fieldoffset ||
5508                    (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5509                    r->writefn);
5510         }
5511     }
5512     /* Bad type field probably means missing sentinel at end of reg list */
5513     assert(cptype_valid(r->type));
5514     for (crm = crmmin; crm <= crmmax; crm++) {
5515         for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
5516             for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
5517                 for (state = ARM_CP_STATE_AA32;
5518                      state <= ARM_CP_STATE_AA64; state++) {
5519                     if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
5520                         continue;
5521                     }
5522                     if (state == ARM_CP_STATE_AA32) {
5523                         /* Under AArch32 CP registers can be common
5524                          * (same for secure and non-secure world) or banked.
5525                          */
5526                         switch (r->secure) {
5527                         case ARM_CP_SECSTATE_S:
5528                         case ARM_CP_SECSTATE_NS:
5529                             add_cpreg_to_hashtable(cpu, r, opaque, state,
5530                                                    r->secure, crm, opc1, opc2);
5531                             break;
5532                         default:
5533                             add_cpreg_to_hashtable(cpu, r, opaque, state,
5534                                                    ARM_CP_SECSTATE_S,
5535                                                    crm, opc1, opc2);
5536                             add_cpreg_to_hashtable(cpu, r, opaque, state,
5537                                                    ARM_CP_SECSTATE_NS,
5538                                                    crm, opc1, opc2);
5539                             break;
5540                         }
5541                     } else {
5542                         /* AArch64 registers get mapped to non-secure instance
5543                          * of AArch32 */
5544                         add_cpreg_to_hashtable(cpu, r, opaque, state,
5545                                                ARM_CP_SECSTATE_NS,
5546                                                crm, opc1, opc2);
5547                     }
5548                 }
5549             }
5550         }
5551     }
5552 }
5553 
5554 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
5555                                     const ARMCPRegInfo *regs, void *opaque)
5556 {
5557     /* Define a whole list of registers */
5558     const ARMCPRegInfo *r;
5559     for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
5560         define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
5561     }
5562 }
5563 
5564 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
5565 {
5566     return g_hash_table_lookup(cpregs, &encoded_cp);
5567 }
5568 
5569 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
5570                          uint64_t value)
5571 {
5572     /* Helper coprocessor write function for write-ignore registers */
5573 }
5574 
5575 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
5576 {
5577     /* Helper coprocessor write function for read-as-zero registers */
5578     return 0;
5579 }
5580 
5581 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
5582 {
5583     /* Helper coprocessor reset function for do-nothing-on-reset registers */
5584 }
5585 
5586 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
5587 {
5588     /* Return true if it is not valid for us to switch to
5589      * this CPU mode (ie all the UNPREDICTABLE cases in
5590      * the ARM ARM CPSRWriteByInstr pseudocode).
5591      */
5592 
5593     /* Changes to or from Hyp via MSR and CPS are illegal. */
5594     if (write_type == CPSRWriteByInstr &&
5595         ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
5596          mode == ARM_CPU_MODE_HYP)) {
5597         return 1;
5598     }
5599 
5600     switch (mode) {
5601     case ARM_CPU_MODE_USR:
5602         return 0;
5603     case ARM_CPU_MODE_SYS:
5604     case ARM_CPU_MODE_SVC:
5605     case ARM_CPU_MODE_ABT:
5606     case ARM_CPU_MODE_UND:
5607     case ARM_CPU_MODE_IRQ:
5608     case ARM_CPU_MODE_FIQ:
5609         /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5610          * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5611          */
5612         /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5613          * and CPS are treated as illegal mode changes.
5614          */
5615         if (write_type == CPSRWriteByInstr &&
5616             (env->cp15.hcr_el2 & HCR_TGE) &&
5617             (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
5618             !arm_is_secure_below_el3(env)) {
5619             return 1;
5620         }
5621         return 0;
5622     case ARM_CPU_MODE_HYP:
5623         return !arm_feature(env, ARM_FEATURE_EL2)
5624             || arm_current_el(env) < 2 || arm_is_secure(env);
5625     case ARM_CPU_MODE_MON:
5626         return arm_current_el(env) < 3;
5627     default:
5628         return 1;
5629     }
5630 }
5631 
5632 uint32_t cpsr_read(CPUARMState *env)
5633 {
5634     int ZF;
5635     ZF = (env->ZF == 0);
5636     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
5637         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
5638         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
5639         | ((env->condexec_bits & 0xfc) << 8)
5640         | (env->GE << 16) | (env->daif & CPSR_AIF);
5641 }
5642 
5643 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
5644                 CPSRWriteType write_type)
5645 {
5646     uint32_t changed_daif;
5647 
5648     if (mask & CPSR_NZCV) {
5649         env->ZF = (~val) & CPSR_Z;
5650         env->NF = val;
5651         env->CF = (val >> 29) & 1;
5652         env->VF = (val << 3) & 0x80000000;
5653     }
5654     if (mask & CPSR_Q)
5655         env->QF = ((val & CPSR_Q) != 0);
5656     if (mask & CPSR_T)
5657         env->thumb = ((val & CPSR_T) != 0);
5658     if (mask & CPSR_IT_0_1) {
5659         env->condexec_bits &= ~3;
5660         env->condexec_bits |= (val >> 25) & 3;
5661     }
5662     if (mask & CPSR_IT_2_7) {
5663         env->condexec_bits &= 3;
5664         env->condexec_bits |= (val >> 8) & 0xfc;
5665     }
5666     if (mask & CPSR_GE) {
5667         env->GE = (val >> 16) & 0xf;
5668     }
5669 
5670     /* In a V7 implementation that includes the security extensions but does
5671      * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
5672      * whether non-secure software is allowed to change the CPSR_F and CPSR_A
5673      * bits respectively.
5674      *
5675      * In a V8 implementation, it is permitted for privileged software to
5676      * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
5677      */
5678     if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
5679         arm_feature(env, ARM_FEATURE_EL3) &&
5680         !arm_feature(env, ARM_FEATURE_EL2) &&
5681         !arm_is_secure(env)) {
5682 
5683         changed_daif = (env->daif ^ val) & mask;
5684 
5685         if (changed_daif & CPSR_A) {
5686             /* Check to see if we are allowed to change the masking of async
5687              * abort exceptions from a non-secure state.
5688              */
5689             if (!(env->cp15.scr_el3 & SCR_AW)) {
5690                 qemu_log_mask(LOG_GUEST_ERROR,
5691                               "Ignoring attempt to switch CPSR_A flag from "
5692                               "non-secure world with SCR.AW bit clear\n");
5693                 mask &= ~CPSR_A;
5694             }
5695         }
5696 
5697         if (changed_daif & CPSR_F) {
5698             /* Check to see if we are allowed to change the masking of FIQ
5699              * exceptions from a non-secure state.
5700              */
5701             if (!(env->cp15.scr_el3 & SCR_FW)) {
5702                 qemu_log_mask(LOG_GUEST_ERROR,
5703                               "Ignoring attempt to switch CPSR_F flag from "
5704                               "non-secure world with SCR.FW bit clear\n");
5705                 mask &= ~CPSR_F;
5706             }
5707 
5708             /* Check whether non-maskable FIQ (NMFI) support is enabled.
5709              * If this bit is set software is not allowed to mask
5710              * FIQs, but is allowed to set CPSR_F to 0.
5711              */
5712             if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
5713                 (val & CPSR_F)) {
5714                 qemu_log_mask(LOG_GUEST_ERROR,
5715                               "Ignoring attempt to enable CPSR_F flag "
5716                               "(non-maskable FIQ [NMFI] support enabled)\n");
5717                 mask &= ~CPSR_F;
5718             }
5719         }
5720     }
5721 
5722     env->daif &= ~(CPSR_AIF & mask);
5723     env->daif |= val & CPSR_AIF & mask;
5724 
5725     if (write_type != CPSRWriteRaw &&
5726         ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
5727         if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
5728             /* Note that we can only get here in USR mode if this is a
5729              * gdb stub write; for this case we follow the architectural
5730              * behaviour for guest writes in USR mode of ignoring an attempt
5731              * to switch mode. (Those are caught by translate.c for writes
5732              * triggered by guest instructions.)
5733              */
5734             mask &= ~CPSR_M;
5735         } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
5736             /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
5737              * v7, and has defined behaviour in v8:
5738              *  + leave CPSR.M untouched
5739              *  + allow changes to the other CPSR fields
5740              *  + set PSTATE.IL
5741              * For user changes via the GDB stub, we don't set PSTATE.IL,
5742              * as this would be unnecessarily harsh for a user error.
5743              */
5744             mask &= ~CPSR_M;
5745             if (write_type != CPSRWriteByGDBStub &&
5746                 arm_feature(env, ARM_FEATURE_V8)) {
5747                 mask |= CPSR_IL;
5748                 val |= CPSR_IL;
5749             }
5750         } else {
5751             switch_mode(env, val & CPSR_M);
5752         }
5753     }
5754     mask &= ~CACHED_CPSR_BITS;
5755     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
5756 }
5757 
5758 /* Sign/zero extend */
5759 uint32_t HELPER(sxtb16)(uint32_t x)
5760 {
5761     uint32_t res;
5762     res = (uint16_t)(int8_t)x;
5763     res |= (uint32_t)(int8_t)(x >> 16) << 16;
5764     return res;
5765 }
5766 
5767 uint32_t HELPER(uxtb16)(uint32_t x)
5768 {
5769     uint32_t res;
5770     res = (uint16_t)(uint8_t)x;
5771     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
5772     return res;
5773 }
5774 
5775 int32_t HELPER(sdiv)(int32_t num, int32_t den)
5776 {
5777     if (den == 0)
5778       return 0;
5779     if (num == INT_MIN && den == -1)
5780       return INT_MIN;
5781     return num / den;
5782 }
5783 
5784 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
5785 {
5786     if (den == 0)
5787       return 0;
5788     return num / den;
5789 }
5790 
5791 uint32_t HELPER(rbit)(uint32_t x)
5792 {
5793     return revbit32(x);
5794 }
5795 
5796 #if defined(CONFIG_USER_ONLY)
5797 
5798 /* These should probably raise undefined insn exceptions.  */
5799 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
5800 {
5801     ARMCPU *cpu = arm_env_get_cpu(env);
5802 
5803     cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
5804 }
5805 
5806 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
5807 {
5808     ARMCPU *cpu = arm_env_get_cpu(env);
5809 
5810     cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
5811     return 0;
5812 }
5813 
5814 void switch_mode(CPUARMState *env, int mode)
5815 {
5816     ARMCPU *cpu = arm_env_get_cpu(env);
5817 
5818     if (mode != ARM_CPU_MODE_USR) {
5819         cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
5820     }
5821 }
5822 
5823 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
5824                                  uint32_t cur_el, bool secure)
5825 {
5826     return 1;
5827 }
5828 
5829 void aarch64_sync_64_to_32(CPUARMState *env)
5830 {
5831     g_assert_not_reached();
5832 }
5833 
5834 #else
5835 
5836 void switch_mode(CPUARMState *env, int mode)
5837 {
5838     int old_mode;
5839     int i;
5840 
5841     old_mode = env->uncached_cpsr & CPSR_M;
5842     if (mode == old_mode)
5843         return;
5844 
5845     if (old_mode == ARM_CPU_MODE_FIQ) {
5846         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
5847         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
5848     } else if (mode == ARM_CPU_MODE_FIQ) {
5849         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
5850         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
5851     }
5852 
5853     i = bank_number(old_mode);
5854     env->banked_r13[i] = env->regs[13];
5855     env->banked_r14[i] = env->regs[14];
5856     env->banked_spsr[i] = env->spsr;
5857 
5858     i = bank_number(mode);
5859     env->regs[13] = env->banked_r13[i];
5860     env->regs[14] = env->banked_r14[i];
5861     env->spsr = env->banked_spsr[i];
5862 }
5863 
5864 /* Physical Interrupt Target EL Lookup Table
5865  *
5866  * [ From ARM ARM section G1.13.4 (Table G1-15) ]
5867  *
5868  * The below multi-dimensional table is used for looking up the target
5869  * exception level given numerous condition criteria.  Specifically, the
5870  * target EL is based on SCR and HCR routing controls as well as the
5871  * currently executing EL and secure state.
5872  *
5873  *    Dimensions:
5874  *    target_el_table[2][2][2][2][2][4]
5875  *                    |  |  |  |  |  +--- Current EL
5876  *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
5877  *                    |  |  |  +--------- HCR mask override
5878  *                    |  |  +------------ SCR exec state control
5879  *                    |  +--------------- SCR mask override
5880  *                    +------------------ 32-bit(0)/64-bit(1) EL3
5881  *
5882  *    The table values are as such:
5883  *    0-3 = EL0-EL3
5884  *     -1 = Cannot occur
5885  *
5886  * The ARM ARM target EL table includes entries indicating that an "exception
5887  * is not taken".  The two cases where this is applicable are:
5888  *    1) An exception is taken from EL3 but the SCR does not have the exception
5889  *    routed to EL3.
5890  *    2) An exception is taken from EL2 but the HCR does not have the exception
5891  *    routed to EL2.
5892  * In these two cases, the below table contain a target of EL1.  This value is
5893  * returned as it is expected that the consumer of the table data will check
5894  * for "target EL >= current EL" to ensure the exception is not taken.
5895  *
5896  *            SCR     HCR
5897  *         64  EA     AMO                 From
5898  *        BIT IRQ     IMO      Non-secure         Secure
5899  *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
5900  */
5901 static const int8_t target_el_table[2][2][2][2][2][4] = {
5902     {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
5903        {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
5904       {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
5905        {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
5906      {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
5907        {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
5908       {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
5909        {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
5910     {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
5911        {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
5912       {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
5913        {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
5914      {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
5915        {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
5916       {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
5917        {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
5918 };
5919 
5920 /*
5921  * Determine the target EL for physical exceptions
5922  */
5923 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
5924                                  uint32_t cur_el, bool secure)
5925 {
5926     CPUARMState *env = cs->env_ptr;
5927     int rw;
5928     int scr;
5929     int hcr;
5930     int target_el;
5931     /* Is the highest EL AArch64? */
5932     int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
5933 
5934     if (arm_feature(env, ARM_FEATURE_EL3)) {
5935         rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
5936     } else {
5937         /* Either EL2 is the highest EL (and so the EL2 register width
5938          * is given by is64); or there is no EL2 or EL3, in which case
5939          * the value of 'rw' does not affect the table lookup anyway.
5940          */
5941         rw = is64;
5942     }
5943 
5944     switch (excp_idx) {
5945     case EXCP_IRQ:
5946         scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
5947         hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
5948         break;
5949     case EXCP_FIQ:
5950         scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
5951         hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
5952         break;
5953     default:
5954         scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
5955         hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
5956         break;
5957     };
5958 
5959     /* If HCR.TGE is set then HCR is treated as being 1 */
5960     hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
5961 
5962     /* Perform a table-lookup for the target EL given the current state */
5963     target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
5964 
5965     assert(target_el > 0);
5966 
5967     return target_el;
5968 }
5969 
5970 static void v7m_push(CPUARMState *env, uint32_t val)
5971 {
5972     CPUState *cs = CPU(arm_env_get_cpu(env));
5973 
5974     env->regs[13] -= 4;
5975     stl_phys(cs->as, env->regs[13], val);
5976 }
5977 
5978 static uint32_t v7m_pop(CPUARMState *env)
5979 {
5980     CPUState *cs = CPU(arm_env_get_cpu(env));
5981     uint32_t val;
5982 
5983     val = ldl_phys(cs->as, env->regs[13]);
5984     env->regs[13] += 4;
5985     return val;
5986 }
5987 
5988 /* Switch to V7M main or process stack pointer.  */
5989 static void switch_v7m_sp(CPUARMState *env, bool new_spsel)
5990 {
5991     uint32_t tmp;
5992     bool old_spsel = env->v7m.control & R_V7M_CONTROL_SPSEL_MASK;
5993 
5994     if (old_spsel != new_spsel) {
5995         tmp = env->v7m.other_sp;
5996         env->v7m.other_sp = env->regs[13];
5997         env->regs[13] = tmp;
5998 
5999         env->v7m.control = deposit32(env->v7m.control,
6000                                      R_V7M_CONTROL_SPSEL_SHIFT,
6001                                      R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
6002     }
6003 }
6004 
6005 static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
6006 {
6007     CPUState *cs = CPU(cpu);
6008     CPUARMState *env = &cpu->env;
6009     MemTxResult result;
6010     hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4;
6011     uint32_t addr;
6012 
6013     addr = address_space_ldl(cs->as, vec,
6014                              MEMTXATTRS_UNSPECIFIED, &result);
6015     if (result != MEMTX_OK) {
6016         /* Architecturally this should cause a HardFault setting HSFR.VECTTBL,
6017          * which would then be immediately followed by our failing to load
6018          * the entry vector for that HardFault, which is a Lockup case.
6019          * Since we don't model Lockup, we just report this guest error
6020          * via cpu_abort().
6021          */
6022         cpu_abort(cs, "Failed to read from exception vector table "
6023                   "entry %08x\n", (unsigned)vec);
6024     }
6025     return addr;
6026 }
6027 
6028 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
6029 {
6030     /* Do the "take the exception" parts of exception entry,
6031      * but not the pushing of state to the stack. This is
6032      * similar to the pseudocode ExceptionTaken() function.
6033      */
6034     CPUARMState *env = &cpu->env;
6035     uint32_t addr;
6036 
6037     armv7m_nvic_acknowledge_irq(env->nvic);
6038     switch_v7m_sp(env, 0);
6039     /* Clear IT bits */
6040     env->condexec_bits = 0;
6041     env->regs[14] = lr;
6042     addr = arm_v7m_load_vector(cpu);
6043     env->regs[15] = addr & 0xfffffffe;
6044     env->thumb = addr & 1;
6045 }
6046 
6047 static void v7m_push_stack(ARMCPU *cpu)
6048 {
6049     /* Do the "set up stack frame" part of exception entry,
6050      * similar to pseudocode PushStack().
6051      */
6052     CPUARMState *env = &cpu->env;
6053     uint32_t xpsr = xpsr_read(env);
6054 
6055     /* Align stack pointer if the guest wants that */
6056     if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
6057         env->regs[13] -= 4;
6058         xpsr |= 0x200;
6059     }
6060     /* Switch to the handler mode.  */
6061     v7m_push(env, xpsr);
6062     v7m_push(env, env->regs[15]);
6063     v7m_push(env, env->regs[14]);
6064     v7m_push(env, env->regs[12]);
6065     v7m_push(env, env->regs[3]);
6066     v7m_push(env, env->regs[2]);
6067     v7m_push(env, env->regs[1]);
6068     v7m_push(env, env->regs[0]);
6069 }
6070 
6071 static void do_v7m_exception_exit(ARMCPU *cpu)
6072 {
6073     CPUARMState *env = &cpu->env;
6074     uint32_t type;
6075     uint32_t xpsr;
6076     bool ufault = false;
6077     bool return_to_sp_process = false;
6078     bool return_to_handler = false;
6079     bool rettobase = false;
6080 
6081     /* We can only get here from an EXCP_EXCEPTION_EXIT, and
6082      * arm_v7m_do_unassigned_access() enforces the architectural rule
6083      * that jumps to magic addresses don't have magic behaviour unless
6084      * we're in Handler mode (compare pseudocode BXWritePC()).
6085      */
6086     assert(env->v7m.exception != 0);
6087 
6088     /* In the spec pseudocode ExceptionReturn() is called directly
6089      * from BXWritePC() and gets the full target PC value including
6090      * bit zero. In QEMU's implementation we treat it as a normal
6091      * jump-to-register (which is then caught later on), and so split
6092      * the target value up between env->regs[15] and env->thumb in
6093      * gen_bx(). Reconstitute it.
6094      */
6095     type = env->regs[15];
6096     if (env->thumb) {
6097         type |= 1;
6098     }
6099 
6100     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
6101                   " previous exception %d\n",
6102                   type, env->v7m.exception);
6103 
6104     if (extract32(type, 5, 23) != extract32(-1, 5, 23)) {
6105         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
6106                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", type);
6107     }
6108 
6109     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
6110         /* Auto-clear FAULTMASK on return from other than NMI */
6111         env->daif &= ~PSTATE_F;
6112     }
6113 
6114     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
6115     case -1:
6116         /* attempt to exit an exception that isn't active */
6117         ufault = true;
6118         break;
6119     case 0:
6120         /* still an irq active now */
6121         break;
6122     case 1:
6123         /* we returned to base exception level, no nesting.
6124          * (In the pseudocode this is written using "NestedActivation != 1"
6125          * where we have 'rettobase == false'.)
6126          */
6127         rettobase = true;
6128         break;
6129     default:
6130         g_assert_not_reached();
6131     }
6132 
6133     switch (type & 0xf) {
6134     case 1: /* Return to Handler */
6135         return_to_handler = true;
6136         break;
6137     case 13: /* Return to Thread using Process stack */
6138         return_to_sp_process = true;
6139         /* fall through */
6140     case 9: /* Return to Thread using Main stack */
6141         if (!rettobase &&
6142             !(env->v7m.ccr & R_V7M_CCR_NONBASETHRDENA_MASK)) {
6143             ufault = true;
6144         }
6145         break;
6146     default:
6147         ufault = true;
6148     }
6149 
6150     if (ufault) {
6151         /* Bad exception return: instead of popping the exception
6152          * stack, directly take a usage fault on the current stack.
6153          */
6154         env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
6155         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6156         v7m_exception_taken(cpu, type | 0xf0000000);
6157         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
6158                       "stackframe: failed exception return integrity check\n");
6159         return;
6160     }
6161 
6162     /* Switch to the target stack.  */
6163     switch_v7m_sp(env, return_to_sp_process);
6164     /* Pop registers.  */
6165     env->regs[0] = v7m_pop(env);
6166     env->regs[1] = v7m_pop(env);
6167     env->regs[2] = v7m_pop(env);
6168     env->regs[3] = v7m_pop(env);
6169     env->regs[12] = v7m_pop(env);
6170     env->regs[14] = v7m_pop(env);
6171     env->regs[15] = v7m_pop(env);
6172     if (env->regs[15] & 1) {
6173         qemu_log_mask(LOG_GUEST_ERROR,
6174                       "M profile return from interrupt with misaligned "
6175                       "PC is UNPREDICTABLE\n");
6176         /* Actual hardware seems to ignore the lsbit, and there are several
6177          * RTOSes out there which incorrectly assume the r15 in the stack
6178          * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
6179          */
6180         env->regs[15] &= ~1U;
6181     }
6182     xpsr = v7m_pop(env);
6183     xpsr_write(env, xpsr, 0xfffffdff);
6184     /* Undo stack alignment.  */
6185     if (xpsr & 0x200)
6186         env->regs[13] |= 4;
6187 
6188     /* The restored xPSR exception field will be zero if we're
6189      * resuming in Thread mode. If that doesn't match what the
6190      * exception return type specified then this is a UsageFault.
6191      */
6192     if (return_to_handler == (env->v7m.exception == 0)) {
6193         /* Take an INVPC UsageFault by pushing the stack again. */
6194         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6195         env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
6196         v7m_push_stack(cpu);
6197         v7m_exception_taken(cpu, type | 0xf0000000);
6198         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
6199                       "failed exception return integrity check\n");
6200         return;
6201     }
6202 
6203     /* Otherwise, we have a successful exception exit. */
6204     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
6205 }
6206 
6207 static void arm_log_exception(int idx)
6208 {
6209     if (qemu_loglevel_mask(CPU_LOG_INT)) {
6210         const char *exc = NULL;
6211 
6212         if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
6213             exc = excnames[idx];
6214         }
6215         if (!exc) {
6216             exc = "unknown";
6217         }
6218         qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
6219     }
6220 }
6221 
6222 void arm_v7m_cpu_do_interrupt(CPUState *cs)
6223 {
6224     ARMCPU *cpu = ARM_CPU(cs);
6225     CPUARMState *env = &cpu->env;
6226     uint32_t lr;
6227 
6228     arm_log_exception(cs->exception_index);
6229 
6230     lr = 0xfffffff1;
6231     if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
6232         lr |= 4;
6233     }
6234     if (env->v7m.exception == 0)
6235         lr |= 8;
6236 
6237     /* For exceptions we just mark as pending on the NVIC, and let that
6238        handle it.  */
6239     switch (cs->exception_index) {
6240     case EXCP_UDEF:
6241         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6242         env->v7m.cfsr |= R_V7M_CFSR_UNDEFINSTR_MASK;
6243         break;
6244     case EXCP_NOCP:
6245         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6246         env->v7m.cfsr |= R_V7M_CFSR_NOCP_MASK;
6247         break;
6248     case EXCP_INVSTATE:
6249         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6250         env->v7m.cfsr |= R_V7M_CFSR_INVSTATE_MASK;
6251         break;
6252     case EXCP_SWI:
6253         /* The PC already points to the next instruction.  */
6254         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
6255         break;
6256     case EXCP_PREFETCH_ABORT:
6257     case EXCP_DATA_ABORT:
6258         /* TODO: if we implemented the MPU registers, this is where we
6259          * should set the MMFAR, etc from exception.fsr and exception.vaddress.
6260          */
6261         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
6262         break;
6263     case EXCP_BKPT:
6264         if (semihosting_enabled()) {
6265             int nr;
6266             nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
6267             if (nr == 0xab) {
6268                 env->regs[15] += 2;
6269                 qemu_log_mask(CPU_LOG_INT,
6270                               "...handling as semihosting call 0x%x\n",
6271                               env->regs[0]);
6272                 env->regs[0] = do_arm_semihosting(env);
6273                 return;
6274             }
6275         }
6276         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
6277         break;
6278     case EXCP_IRQ:
6279         break;
6280     case EXCP_EXCEPTION_EXIT:
6281         do_v7m_exception_exit(cpu);
6282         return;
6283     default:
6284         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6285         return; /* Never happens.  Keep compiler happy.  */
6286     }
6287 
6288     v7m_push_stack(cpu);
6289     v7m_exception_taken(cpu, lr);
6290     qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
6291 }
6292 
6293 /* Function used to synchronize QEMU's AArch64 register set with AArch32
6294  * register set.  This is necessary when switching between AArch32 and AArch64
6295  * execution state.
6296  */
6297 void aarch64_sync_32_to_64(CPUARMState *env)
6298 {
6299     int i;
6300     uint32_t mode = env->uncached_cpsr & CPSR_M;
6301 
6302     /* We can blanket copy R[0:7] to X[0:7] */
6303     for (i = 0; i < 8; i++) {
6304         env->xregs[i] = env->regs[i];
6305     }
6306 
6307     /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
6308      * Otherwise, they come from the banked user regs.
6309      */
6310     if (mode == ARM_CPU_MODE_FIQ) {
6311         for (i = 8; i < 13; i++) {
6312             env->xregs[i] = env->usr_regs[i - 8];
6313         }
6314     } else {
6315         for (i = 8; i < 13; i++) {
6316             env->xregs[i] = env->regs[i];
6317         }
6318     }
6319 
6320     /* Registers x13-x23 are the various mode SP and FP registers. Registers
6321      * r13 and r14 are only copied if we are in that mode, otherwise we copy
6322      * from the mode banked register.
6323      */
6324     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
6325         env->xregs[13] = env->regs[13];
6326         env->xregs[14] = env->regs[14];
6327     } else {
6328         env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
6329         /* HYP is an exception in that it is copied from r14 */
6330         if (mode == ARM_CPU_MODE_HYP) {
6331             env->xregs[14] = env->regs[14];
6332         } else {
6333             env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
6334         }
6335     }
6336 
6337     if (mode == ARM_CPU_MODE_HYP) {
6338         env->xregs[15] = env->regs[13];
6339     } else {
6340         env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
6341     }
6342 
6343     if (mode == ARM_CPU_MODE_IRQ) {
6344         env->xregs[16] = env->regs[14];
6345         env->xregs[17] = env->regs[13];
6346     } else {
6347         env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
6348         env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
6349     }
6350 
6351     if (mode == ARM_CPU_MODE_SVC) {
6352         env->xregs[18] = env->regs[14];
6353         env->xregs[19] = env->regs[13];
6354     } else {
6355         env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
6356         env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
6357     }
6358 
6359     if (mode == ARM_CPU_MODE_ABT) {
6360         env->xregs[20] = env->regs[14];
6361         env->xregs[21] = env->regs[13];
6362     } else {
6363         env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
6364         env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
6365     }
6366 
6367     if (mode == ARM_CPU_MODE_UND) {
6368         env->xregs[22] = env->regs[14];
6369         env->xregs[23] = env->regs[13];
6370     } else {
6371         env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
6372         env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
6373     }
6374 
6375     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
6376      * mode, then we can copy from r8-r14.  Otherwise, we copy from the
6377      * FIQ bank for r8-r14.
6378      */
6379     if (mode == ARM_CPU_MODE_FIQ) {
6380         for (i = 24; i < 31; i++) {
6381             env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
6382         }
6383     } else {
6384         for (i = 24; i < 29; i++) {
6385             env->xregs[i] = env->fiq_regs[i - 24];
6386         }
6387         env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
6388         env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
6389     }
6390 
6391     env->pc = env->regs[15];
6392 }
6393 
6394 /* Function used to synchronize QEMU's AArch32 register set with AArch64
6395  * register set.  This is necessary when switching between AArch32 and AArch64
6396  * execution state.
6397  */
6398 void aarch64_sync_64_to_32(CPUARMState *env)
6399 {
6400     int i;
6401     uint32_t mode = env->uncached_cpsr & CPSR_M;
6402 
6403     /* We can blanket copy X[0:7] to R[0:7] */
6404     for (i = 0; i < 8; i++) {
6405         env->regs[i] = env->xregs[i];
6406     }
6407 
6408     /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
6409      * Otherwise, we copy x8-x12 into the banked user regs.
6410      */
6411     if (mode == ARM_CPU_MODE_FIQ) {
6412         for (i = 8; i < 13; i++) {
6413             env->usr_regs[i - 8] = env->xregs[i];
6414         }
6415     } else {
6416         for (i = 8; i < 13; i++) {
6417             env->regs[i] = env->xregs[i];
6418         }
6419     }
6420 
6421     /* Registers r13 & r14 depend on the current mode.
6422      * If we are in a given mode, we copy the corresponding x registers to r13
6423      * and r14.  Otherwise, we copy the x register to the banked r13 and r14
6424      * for the mode.
6425      */
6426     if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
6427         env->regs[13] = env->xregs[13];
6428         env->regs[14] = env->xregs[14];
6429     } else {
6430         env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
6431 
6432         /* HYP is an exception in that it does not have its own banked r14 but
6433          * shares the USR r14
6434          */
6435         if (mode == ARM_CPU_MODE_HYP) {
6436             env->regs[14] = env->xregs[14];
6437         } else {
6438             env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
6439         }
6440     }
6441 
6442     if (mode == ARM_CPU_MODE_HYP) {
6443         env->regs[13] = env->xregs[15];
6444     } else {
6445         env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
6446     }
6447 
6448     if (mode == ARM_CPU_MODE_IRQ) {
6449         env->regs[14] = env->xregs[16];
6450         env->regs[13] = env->xregs[17];
6451     } else {
6452         env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
6453         env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
6454     }
6455 
6456     if (mode == ARM_CPU_MODE_SVC) {
6457         env->regs[14] = env->xregs[18];
6458         env->regs[13] = env->xregs[19];
6459     } else {
6460         env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
6461         env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
6462     }
6463 
6464     if (mode == ARM_CPU_MODE_ABT) {
6465         env->regs[14] = env->xregs[20];
6466         env->regs[13] = env->xregs[21];
6467     } else {
6468         env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
6469         env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
6470     }
6471 
6472     if (mode == ARM_CPU_MODE_UND) {
6473         env->regs[14] = env->xregs[22];
6474         env->regs[13] = env->xregs[23];
6475     } else {
6476         env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
6477         env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
6478     }
6479 
6480     /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
6481      * mode, then we can copy to r8-r14.  Otherwise, we copy to the
6482      * FIQ bank for r8-r14.
6483      */
6484     if (mode == ARM_CPU_MODE_FIQ) {
6485         for (i = 24; i < 31; i++) {
6486             env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
6487         }
6488     } else {
6489         for (i = 24; i < 29; i++) {
6490             env->fiq_regs[i - 24] = env->xregs[i];
6491         }
6492         env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
6493         env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
6494     }
6495 
6496     env->regs[15] = env->pc;
6497 }
6498 
6499 static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
6500 {
6501     ARMCPU *cpu = ARM_CPU(cs);
6502     CPUARMState *env = &cpu->env;
6503     uint32_t addr;
6504     uint32_t mask;
6505     int new_mode;
6506     uint32_t offset;
6507     uint32_t moe;
6508 
6509     /* If this is a debug exception we must update the DBGDSCR.MOE bits */
6510     switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
6511     case EC_BREAKPOINT:
6512     case EC_BREAKPOINT_SAME_EL:
6513         moe = 1;
6514         break;
6515     case EC_WATCHPOINT:
6516     case EC_WATCHPOINT_SAME_EL:
6517         moe = 10;
6518         break;
6519     case EC_AA32_BKPT:
6520         moe = 3;
6521         break;
6522     case EC_VECTORCATCH:
6523         moe = 5;
6524         break;
6525     default:
6526         moe = 0;
6527         break;
6528     }
6529 
6530     if (moe) {
6531         env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
6532     }
6533 
6534     /* TODO: Vectored interrupt controller.  */
6535     switch (cs->exception_index) {
6536     case EXCP_UDEF:
6537         new_mode = ARM_CPU_MODE_UND;
6538         addr = 0x04;
6539         mask = CPSR_I;
6540         if (env->thumb)
6541             offset = 2;
6542         else
6543             offset = 4;
6544         break;
6545     case EXCP_SWI:
6546         new_mode = ARM_CPU_MODE_SVC;
6547         addr = 0x08;
6548         mask = CPSR_I;
6549         /* The PC already points to the next instruction.  */
6550         offset = 0;
6551         break;
6552     case EXCP_BKPT:
6553         env->exception.fsr = 2;
6554         /* Fall through to prefetch abort.  */
6555     case EXCP_PREFETCH_ABORT:
6556         A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
6557         A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
6558         qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
6559                       env->exception.fsr, (uint32_t)env->exception.vaddress);
6560         new_mode = ARM_CPU_MODE_ABT;
6561         addr = 0x0c;
6562         mask = CPSR_A | CPSR_I;
6563         offset = 4;
6564         break;
6565     case EXCP_DATA_ABORT:
6566         A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
6567         A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
6568         qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
6569                       env->exception.fsr,
6570                       (uint32_t)env->exception.vaddress);
6571         new_mode = ARM_CPU_MODE_ABT;
6572         addr = 0x10;
6573         mask = CPSR_A | CPSR_I;
6574         offset = 8;
6575         break;
6576     case EXCP_IRQ:
6577         new_mode = ARM_CPU_MODE_IRQ;
6578         addr = 0x18;
6579         /* Disable IRQ and imprecise data aborts.  */
6580         mask = CPSR_A | CPSR_I;
6581         offset = 4;
6582         if (env->cp15.scr_el3 & SCR_IRQ) {
6583             /* IRQ routed to monitor mode */
6584             new_mode = ARM_CPU_MODE_MON;
6585             mask |= CPSR_F;
6586         }
6587         break;
6588     case EXCP_FIQ:
6589         new_mode = ARM_CPU_MODE_FIQ;
6590         addr = 0x1c;
6591         /* Disable FIQ, IRQ and imprecise data aborts.  */
6592         mask = CPSR_A | CPSR_I | CPSR_F;
6593         if (env->cp15.scr_el3 & SCR_FIQ) {
6594             /* FIQ routed to monitor mode */
6595             new_mode = ARM_CPU_MODE_MON;
6596         }
6597         offset = 4;
6598         break;
6599     case EXCP_VIRQ:
6600         new_mode = ARM_CPU_MODE_IRQ;
6601         addr = 0x18;
6602         /* Disable IRQ and imprecise data aborts.  */
6603         mask = CPSR_A | CPSR_I;
6604         offset = 4;
6605         break;
6606     case EXCP_VFIQ:
6607         new_mode = ARM_CPU_MODE_FIQ;
6608         addr = 0x1c;
6609         /* Disable FIQ, IRQ and imprecise data aborts.  */
6610         mask = CPSR_A | CPSR_I | CPSR_F;
6611         offset = 4;
6612         break;
6613     case EXCP_SMC:
6614         new_mode = ARM_CPU_MODE_MON;
6615         addr = 0x08;
6616         mask = CPSR_A | CPSR_I | CPSR_F;
6617         offset = 0;
6618         break;
6619     default:
6620         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6621         return; /* Never happens.  Keep compiler happy.  */
6622     }
6623 
6624     if (new_mode == ARM_CPU_MODE_MON) {
6625         addr += env->cp15.mvbar;
6626     } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
6627         /* High vectors. When enabled, base address cannot be remapped. */
6628         addr += 0xffff0000;
6629     } else {
6630         /* ARM v7 architectures provide a vector base address register to remap
6631          * the interrupt vector table.
6632          * This register is only followed in non-monitor mode, and is banked.
6633          * Note: only bits 31:5 are valid.
6634          */
6635         addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
6636     }
6637 
6638     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
6639         env->cp15.scr_el3 &= ~SCR_NS;
6640     }
6641 
6642     switch_mode (env, new_mode);
6643     /* For exceptions taken to AArch32 we must clear the SS bit in both
6644      * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
6645      */
6646     env->uncached_cpsr &= ~PSTATE_SS;
6647     env->spsr = cpsr_read(env);
6648     /* Clear IT bits.  */
6649     env->condexec_bits = 0;
6650     /* Switch to the new mode, and to the correct instruction set.  */
6651     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
6652     /* Set new mode endianness */
6653     env->uncached_cpsr &= ~CPSR_E;
6654     if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
6655         env->uncached_cpsr |= CPSR_E;
6656     }
6657     env->daif |= mask;
6658     /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
6659      * and we should just guard the thumb mode on V4 */
6660     if (arm_feature(env, ARM_FEATURE_V4T)) {
6661         env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
6662     }
6663     env->regs[14] = env->regs[15] + offset;
6664     env->regs[15] = addr;
6665 }
6666 
6667 /* Handle exception entry to a target EL which is using AArch64 */
6668 static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
6669 {
6670     ARMCPU *cpu = ARM_CPU(cs);
6671     CPUARMState *env = &cpu->env;
6672     unsigned int new_el = env->exception.target_el;
6673     target_ulong addr = env->cp15.vbar_el[new_el];
6674     unsigned int new_mode = aarch64_pstate_mode(new_el, true);
6675 
6676     if (arm_current_el(env) < new_el) {
6677         /* Entry vector offset depends on whether the implemented EL
6678          * immediately lower than the target level is using AArch32 or AArch64
6679          */
6680         bool is_aa64;
6681 
6682         switch (new_el) {
6683         case 3:
6684             is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
6685             break;
6686         case 2:
6687             is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
6688             break;
6689         case 1:
6690             is_aa64 = is_a64(env);
6691             break;
6692         default:
6693             g_assert_not_reached();
6694         }
6695 
6696         if (is_aa64) {
6697             addr += 0x400;
6698         } else {
6699             addr += 0x600;
6700         }
6701     } else if (pstate_read(env) & PSTATE_SP) {
6702         addr += 0x200;
6703     }
6704 
6705     switch (cs->exception_index) {
6706     case EXCP_PREFETCH_ABORT:
6707     case EXCP_DATA_ABORT:
6708         env->cp15.far_el[new_el] = env->exception.vaddress;
6709         qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
6710                       env->cp15.far_el[new_el]);
6711         /* fall through */
6712     case EXCP_BKPT:
6713     case EXCP_UDEF:
6714     case EXCP_SWI:
6715     case EXCP_HVC:
6716     case EXCP_HYP_TRAP:
6717     case EXCP_SMC:
6718         env->cp15.esr_el[new_el] = env->exception.syndrome;
6719         break;
6720     case EXCP_IRQ:
6721     case EXCP_VIRQ:
6722         addr += 0x80;
6723         break;
6724     case EXCP_FIQ:
6725     case EXCP_VFIQ:
6726         addr += 0x100;
6727         break;
6728     case EXCP_SEMIHOST:
6729         qemu_log_mask(CPU_LOG_INT,
6730                       "...handling as semihosting call 0x%" PRIx64 "\n",
6731                       env->xregs[0]);
6732         env->xregs[0] = do_arm_semihosting(env);
6733         return;
6734     default:
6735         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6736     }
6737 
6738     if (is_a64(env)) {
6739         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
6740         aarch64_save_sp(env, arm_current_el(env));
6741         env->elr_el[new_el] = env->pc;
6742     } else {
6743         env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
6744         env->elr_el[new_el] = env->regs[15];
6745 
6746         aarch64_sync_32_to_64(env);
6747 
6748         env->condexec_bits = 0;
6749     }
6750     qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
6751                   env->elr_el[new_el]);
6752 
6753     pstate_write(env, PSTATE_DAIF | new_mode);
6754     env->aarch64 = 1;
6755     aarch64_restore_sp(env, new_el);
6756 
6757     env->pc = addr;
6758 
6759     qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
6760                   new_el, env->pc, pstate_read(env));
6761 }
6762 
6763 static inline bool check_for_semihosting(CPUState *cs)
6764 {
6765     /* Check whether this exception is a semihosting call; if so
6766      * then handle it and return true; otherwise return false.
6767      */
6768     ARMCPU *cpu = ARM_CPU(cs);
6769     CPUARMState *env = &cpu->env;
6770 
6771     if (is_a64(env)) {
6772         if (cs->exception_index == EXCP_SEMIHOST) {
6773             /* This is always the 64-bit semihosting exception.
6774              * The "is this usermode" and "is semihosting enabled"
6775              * checks have been done at translate time.
6776              */
6777             qemu_log_mask(CPU_LOG_INT,
6778                           "...handling as semihosting call 0x%" PRIx64 "\n",
6779                           env->xregs[0]);
6780             env->xregs[0] = do_arm_semihosting(env);
6781             return true;
6782         }
6783         return false;
6784     } else {
6785         uint32_t imm;
6786 
6787         /* Only intercept calls from privileged modes, to provide some
6788          * semblance of security.
6789          */
6790         if (cs->exception_index != EXCP_SEMIHOST &&
6791             (!semihosting_enabled() ||
6792              ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
6793             return false;
6794         }
6795 
6796         switch (cs->exception_index) {
6797         case EXCP_SEMIHOST:
6798             /* This is always a semihosting call; the "is this usermode"
6799              * and "is semihosting enabled" checks have been done at
6800              * translate time.
6801              */
6802             break;
6803         case EXCP_SWI:
6804             /* Check for semihosting interrupt.  */
6805             if (env->thumb) {
6806                 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
6807                     & 0xff;
6808                 if (imm == 0xab) {
6809                     break;
6810                 }
6811             } else {
6812                 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
6813                     & 0xffffff;
6814                 if (imm == 0x123456) {
6815                     break;
6816                 }
6817             }
6818             return false;
6819         case EXCP_BKPT:
6820             /* See if this is a semihosting syscall.  */
6821             if (env->thumb) {
6822                 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
6823                     & 0xff;
6824                 if (imm == 0xab) {
6825                     env->regs[15] += 2;
6826                     break;
6827                 }
6828             }
6829             return false;
6830         default:
6831             return false;
6832         }
6833 
6834         qemu_log_mask(CPU_LOG_INT,
6835                       "...handling as semihosting call 0x%x\n",
6836                       env->regs[0]);
6837         env->regs[0] = do_arm_semihosting(env);
6838         return true;
6839     }
6840 }
6841 
6842 /* Handle a CPU exception for A and R profile CPUs.
6843  * Do any appropriate logging, handle PSCI calls, and then hand off
6844  * to the AArch64-entry or AArch32-entry function depending on the
6845  * target exception level's register width.
6846  */
6847 void arm_cpu_do_interrupt(CPUState *cs)
6848 {
6849     ARMCPU *cpu = ARM_CPU(cs);
6850     CPUARMState *env = &cpu->env;
6851     unsigned int new_el = env->exception.target_el;
6852 
6853     assert(!arm_feature(env, ARM_FEATURE_M));
6854 
6855     arm_log_exception(cs->exception_index);
6856     qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
6857                   new_el);
6858     if (qemu_loglevel_mask(CPU_LOG_INT)
6859         && !excp_is_internal(cs->exception_index)) {
6860         qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
6861                       env->exception.syndrome >> ARM_EL_EC_SHIFT,
6862                       env->exception.syndrome);
6863     }
6864 
6865     if (arm_is_psci_call(cpu, cs->exception_index)) {
6866         arm_handle_psci_call(cpu);
6867         qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
6868         return;
6869     }
6870 
6871     /* Semihosting semantics depend on the register width of the
6872      * code that caused the exception, not the target exception level,
6873      * so must be handled here.
6874      */
6875     if (check_for_semihosting(cs)) {
6876         return;
6877     }
6878 
6879     assert(!excp_is_internal(cs->exception_index));
6880     if (arm_el_is_aa64(env, new_el)) {
6881         arm_cpu_do_interrupt_aarch64(cs);
6882     } else {
6883         arm_cpu_do_interrupt_aarch32(cs);
6884     }
6885 
6886     /* Hooks may change global state so BQL should be held, also the
6887      * BQL needs to be held for any modification of
6888      * cs->interrupt_request.
6889      */
6890     g_assert(qemu_mutex_iothread_locked());
6891 
6892     arm_call_el_change_hook(cpu);
6893 
6894     if (!kvm_enabled()) {
6895         cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
6896     }
6897 }
6898 
6899 /* Return the exception level which controls this address translation regime */
6900 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
6901 {
6902     switch (mmu_idx) {
6903     case ARMMMUIdx_S2NS:
6904     case ARMMMUIdx_S1E2:
6905         return 2;
6906     case ARMMMUIdx_S1E3:
6907         return 3;
6908     case ARMMMUIdx_S1SE0:
6909         return arm_el_is_aa64(env, 3) ? 1 : 3;
6910     case ARMMMUIdx_S1SE1:
6911     case ARMMMUIdx_S1NSE0:
6912     case ARMMMUIdx_S1NSE1:
6913         return 1;
6914     default:
6915         g_assert_not_reached();
6916     }
6917 }
6918 
6919 /* Return true if this address translation regime is secure */
6920 static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
6921 {
6922     switch (mmu_idx) {
6923     case ARMMMUIdx_S12NSE0:
6924     case ARMMMUIdx_S12NSE1:
6925     case ARMMMUIdx_S1NSE0:
6926     case ARMMMUIdx_S1NSE1:
6927     case ARMMMUIdx_S1E2:
6928     case ARMMMUIdx_S2NS:
6929         return false;
6930     case ARMMMUIdx_S1E3:
6931     case ARMMMUIdx_S1SE0:
6932     case ARMMMUIdx_S1SE1:
6933         return true;
6934     default:
6935         g_assert_not_reached();
6936     }
6937 }
6938 
6939 /* Return the SCTLR value which controls this address translation regime */
6940 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
6941 {
6942     return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
6943 }
6944 
6945 /* Return true if the specified stage of address translation is disabled */
6946 static inline bool regime_translation_disabled(CPUARMState *env,
6947                                                ARMMMUIdx mmu_idx)
6948 {
6949     if (mmu_idx == ARMMMUIdx_S2NS) {
6950         return (env->cp15.hcr_el2 & HCR_VM) == 0;
6951     }
6952     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
6953 }
6954 
6955 static inline bool regime_translation_big_endian(CPUARMState *env,
6956                                                  ARMMMUIdx mmu_idx)
6957 {
6958     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
6959 }
6960 
6961 /* Return the TCR controlling this translation regime */
6962 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
6963 {
6964     if (mmu_idx == ARMMMUIdx_S2NS) {
6965         return &env->cp15.vtcr_el2;
6966     }
6967     return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
6968 }
6969 
6970 /* Returns TBI0 value for current regime el */
6971 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
6972 {
6973     TCR *tcr;
6974     uint32_t el;
6975 
6976     /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
6977        * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
6978        */
6979     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
6980         mmu_idx += ARMMMUIdx_S1NSE0;
6981     }
6982 
6983     tcr = regime_tcr(env, mmu_idx);
6984     el = regime_el(env, mmu_idx);
6985 
6986     if (el > 1) {
6987         return extract64(tcr->raw_tcr, 20, 1);
6988     } else {
6989         return extract64(tcr->raw_tcr, 37, 1);
6990     }
6991 }
6992 
6993 /* Returns TBI1 value for current regime el */
6994 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
6995 {
6996     TCR *tcr;
6997     uint32_t el;
6998 
6999     /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
7000        * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
7001        */
7002     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
7003         mmu_idx += ARMMMUIdx_S1NSE0;
7004     }
7005 
7006     tcr = regime_tcr(env, mmu_idx);
7007     el = regime_el(env, mmu_idx);
7008 
7009     if (el > 1) {
7010         return 0;
7011     } else {
7012         return extract64(tcr->raw_tcr, 38, 1);
7013     }
7014 }
7015 
7016 /* Return the TTBR associated with this translation regime */
7017 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
7018                                    int ttbrn)
7019 {
7020     if (mmu_idx == ARMMMUIdx_S2NS) {
7021         return env->cp15.vttbr_el2;
7022     }
7023     if (ttbrn == 0) {
7024         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
7025     } else {
7026         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
7027     }
7028 }
7029 
7030 /* Return true if the translation regime is using LPAE format page tables */
7031 static inline bool regime_using_lpae_format(CPUARMState *env,
7032                                             ARMMMUIdx mmu_idx)
7033 {
7034     int el = regime_el(env, mmu_idx);
7035     if (el == 2 || arm_el_is_aa64(env, el)) {
7036         return true;
7037     }
7038     if (arm_feature(env, ARM_FEATURE_LPAE)
7039         && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
7040         return true;
7041     }
7042     return false;
7043 }
7044 
7045 /* Returns true if the stage 1 translation regime is using LPAE format page
7046  * tables. Used when raising alignment exceptions, whose FSR changes depending
7047  * on whether the long or short descriptor format is in use. */
7048 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
7049 {
7050     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
7051         mmu_idx += ARMMMUIdx_S1NSE0;
7052     }
7053 
7054     return regime_using_lpae_format(env, mmu_idx);
7055 }
7056 
7057 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
7058 {
7059     switch (mmu_idx) {
7060     case ARMMMUIdx_S1SE0:
7061     case ARMMMUIdx_S1NSE0:
7062         return true;
7063     default:
7064         return false;
7065     case ARMMMUIdx_S12NSE0:
7066     case ARMMMUIdx_S12NSE1:
7067         g_assert_not_reached();
7068     }
7069 }
7070 
7071 /* Translate section/page access permissions to page
7072  * R/W protection flags
7073  *
7074  * @env:         CPUARMState
7075  * @mmu_idx:     MMU index indicating required translation regime
7076  * @ap:          The 3-bit access permissions (AP[2:0])
7077  * @domain_prot: The 2-bit domain access permissions
7078  */
7079 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
7080                                 int ap, int domain_prot)
7081 {
7082     bool is_user = regime_is_user(env, mmu_idx);
7083 
7084     if (domain_prot == 3) {
7085         return PAGE_READ | PAGE_WRITE;
7086     }
7087 
7088     switch (ap) {
7089     case 0:
7090         if (arm_feature(env, ARM_FEATURE_V7)) {
7091             return 0;
7092         }
7093         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
7094         case SCTLR_S:
7095             return is_user ? 0 : PAGE_READ;
7096         case SCTLR_R:
7097             return PAGE_READ;
7098         default:
7099             return 0;
7100         }
7101     case 1:
7102         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
7103     case 2:
7104         if (is_user) {
7105             return PAGE_READ;
7106         } else {
7107             return PAGE_READ | PAGE_WRITE;
7108         }
7109     case 3:
7110         return PAGE_READ | PAGE_WRITE;
7111     case 4: /* Reserved.  */
7112         return 0;
7113     case 5:
7114         return is_user ? 0 : PAGE_READ;
7115     case 6:
7116         return PAGE_READ;
7117     case 7:
7118         if (!arm_feature(env, ARM_FEATURE_V6K)) {
7119             return 0;
7120         }
7121         return PAGE_READ;
7122     default:
7123         g_assert_not_reached();
7124     }
7125 }
7126 
7127 /* Translate section/page access permissions to page
7128  * R/W protection flags.
7129  *
7130  * @ap:      The 2-bit simple AP (AP[2:1])
7131  * @is_user: TRUE if accessing from PL0
7132  */
7133 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
7134 {
7135     switch (ap) {
7136     case 0:
7137         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
7138     case 1:
7139         return PAGE_READ | PAGE_WRITE;
7140     case 2:
7141         return is_user ? 0 : PAGE_READ;
7142     case 3:
7143         return PAGE_READ;
7144     default:
7145         g_assert_not_reached();
7146     }
7147 }
7148 
7149 static inline int
7150 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
7151 {
7152     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
7153 }
7154 
7155 /* Translate S2 section/page access permissions to protection flags
7156  *
7157  * @env:     CPUARMState
7158  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
7159  * @xn:      XN (execute-never) bit
7160  */
7161 static int get_S2prot(CPUARMState *env, int s2ap, int xn)
7162 {
7163     int prot = 0;
7164 
7165     if (s2ap & 1) {
7166         prot |= PAGE_READ;
7167     }
7168     if (s2ap & 2) {
7169         prot |= PAGE_WRITE;
7170     }
7171     if (!xn) {
7172         if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
7173             prot |= PAGE_EXEC;
7174         }
7175     }
7176     return prot;
7177 }
7178 
7179 /* Translate section/page access permissions to protection flags
7180  *
7181  * @env:     CPUARMState
7182  * @mmu_idx: MMU index indicating required translation regime
7183  * @is_aa64: TRUE if AArch64
7184  * @ap:      The 2-bit simple AP (AP[2:1])
7185  * @ns:      NS (non-secure) bit
7186  * @xn:      XN (execute-never) bit
7187  * @pxn:     PXN (privileged execute-never) bit
7188  */
7189 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
7190                       int ap, int ns, int xn, int pxn)
7191 {
7192     bool is_user = regime_is_user(env, mmu_idx);
7193     int prot_rw, user_rw;
7194     bool have_wxn;
7195     int wxn = 0;
7196 
7197     assert(mmu_idx != ARMMMUIdx_S2NS);
7198 
7199     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
7200     if (is_user) {
7201         prot_rw = user_rw;
7202     } else {
7203         prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
7204     }
7205 
7206     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
7207         return prot_rw;
7208     }
7209 
7210     /* TODO have_wxn should be replaced with
7211      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
7212      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
7213      * compatible processors have EL2, which is required for [U]WXN.
7214      */
7215     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
7216 
7217     if (have_wxn) {
7218         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
7219     }
7220 
7221     if (is_aa64) {
7222         switch (regime_el(env, mmu_idx)) {
7223         case 1:
7224             if (!is_user) {
7225                 xn = pxn || (user_rw & PAGE_WRITE);
7226             }
7227             break;
7228         case 2:
7229         case 3:
7230             break;
7231         }
7232     } else if (arm_feature(env, ARM_FEATURE_V7)) {
7233         switch (regime_el(env, mmu_idx)) {
7234         case 1:
7235         case 3:
7236             if (is_user) {
7237                 xn = xn || !(user_rw & PAGE_READ);
7238             } else {
7239                 int uwxn = 0;
7240                 if (have_wxn) {
7241                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
7242                 }
7243                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
7244                      (uwxn && (user_rw & PAGE_WRITE));
7245             }
7246             break;
7247         case 2:
7248             break;
7249         }
7250     } else {
7251         xn = wxn = 0;
7252     }
7253 
7254     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
7255         return prot_rw;
7256     }
7257     return prot_rw | PAGE_EXEC;
7258 }
7259 
7260 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
7261                                      uint32_t *table, uint32_t address)
7262 {
7263     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
7264     TCR *tcr = regime_tcr(env, mmu_idx);
7265 
7266     if (address & tcr->mask) {
7267         if (tcr->raw_tcr & TTBCR_PD1) {
7268             /* Translation table walk disabled for TTBR1 */
7269             return false;
7270         }
7271         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
7272     } else {
7273         if (tcr->raw_tcr & TTBCR_PD0) {
7274             /* Translation table walk disabled for TTBR0 */
7275             return false;
7276         }
7277         *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
7278     }
7279     *table |= (address >> 18) & 0x3ffc;
7280     return true;
7281 }
7282 
7283 /* Translate a S1 pagetable walk through S2 if needed.  */
7284 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
7285                                hwaddr addr, MemTxAttrs txattrs,
7286                                uint32_t *fsr,
7287                                ARMMMUFaultInfo *fi)
7288 {
7289     if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
7290         !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
7291         target_ulong s2size;
7292         hwaddr s2pa;
7293         int s2prot;
7294         int ret;
7295 
7296         ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
7297                                  &txattrs, &s2prot, &s2size, fsr, fi);
7298         if (ret) {
7299             fi->s2addr = addr;
7300             fi->stage2 = true;
7301             fi->s1ptw = true;
7302             return ~0;
7303         }
7304         addr = s2pa;
7305     }
7306     return addr;
7307 }
7308 
7309 /* All loads done in the course of a page table walk go through here.
7310  * TODO: rather than ignoring errors from physical memory reads (which
7311  * are external aborts in ARM terminology) we should propagate this
7312  * error out so that we can turn it into a Data Abort if this walk
7313  * was being done for a CPU load/store or an address translation instruction
7314  * (but not if it was for a debug access).
7315  */
7316 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
7317                             ARMMMUIdx mmu_idx, uint32_t *fsr,
7318                             ARMMMUFaultInfo *fi)
7319 {
7320     ARMCPU *cpu = ARM_CPU(cs);
7321     CPUARMState *env = &cpu->env;
7322     MemTxAttrs attrs = {};
7323     AddressSpace *as;
7324 
7325     attrs.secure = is_secure;
7326     as = arm_addressspace(cs, attrs);
7327     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
7328     if (fi->s1ptw) {
7329         return 0;
7330     }
7331     if (regime_translation_big_endian(env, mmu_idx)) {
7332         return address_space_ldl_be(as, addr, attrs, NULL);
7333     } else {
7334         return address_space_ldl_le(as, addr, attrs, NULL);
7335     }
7336 }
7337 
7338 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
7339                             ARMMMUIdx mmu_idx, uint32_t *fsr,
7340                             ARMMMUFaultInfo *fi)
7341 {
7342     ARMCPU *cpu = ARM_CPU(cs);
7343     CPUARMState *env = &cpu->env;
7344     MemTxAttrs attrs = {};
7345     AddressSpace *as;
7346 
7347     attrs.secure = is_secure;
7348     as = arm_addressspace(cs, attrs);
7349     addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
7350     if (fi->s1ptw) {
7351         return 0;
7352     }
7353     if (regime_translation_big_endian(env, mmu_idx)) {
7354         return address_space_ldq_be(as, addr, attrs, NULL);
7355     } else {
7356         return address_space_ldq_le(as, addr, attrs, NULL);
7357     }
7358 }
7359 
7360 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
7361                              int access_type, ARMMMUIdx mmu_idx,
7362                              hwaddr *phys_ptr, int *prot,
7363                              target_ulong *page_size, uint32_t *fsr,
7364                              ARMMMUFaultInfo *fi)
7365 {
7366     CPUState *cs = CPU(arm_env_get_cpu(env));
7367     int code;
7368     uint32_t table;
7369     uint32_t desc;
7370     int type;
7371     int ap;
7372     int domain = 0;
7373     int domain_prot;
7374     hwaddr phys_addr;
7375     uint32_t dacr;
7376 
7377     /* Pagetable walk.  */
7378     /* Lookup l1 descriptor.  */
7379     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
7380         /* Section translation fault if page walk is disabled by PD0 or PD1 */
7381         code = 5;
7382         goto do_fault;
7383     }
7384     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7385                        mmu_idx, fsr, fi);
7386     type = (desc & 3);
7387     domain = (desc >> 5) & 0x0f;
7388     if (regime_el(env, mmu_idx) == 1) {
7389         dacr = env->cp15.dacr_ns;
7390     } else {
7391         dacr = env->cp15.dacr_s;
7392     }
7393     domain_prot = (dacr >> (domain * 2)) & 3;
7394     if (type == 0) {
7395         /* Section translation fault.  */
7396         code = 5;
7397         goto do_fault;
7398     }
7399     if (domain_prot == 0 || domain_prot == 2) {
7400         if (type == 2)
7401             code = 9; /* Section domain fault.  */
7402         else
7403             code = 11; /* Page domain fault.  */
7404         goto do_fault;
7405     }
7406     if (type == 2) {
7407         /* 1Mb section.  */
7408         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7409         ap = (desc >> 10) & 3;
7410         code = 13;
7411         *page_size = 1024 * 1024;
7412     } else {
7413         /* Lookup l2 entry.  */
7414         if (type == 1) {
7415             /* Coarse pagetable.  */
7416             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
7417         } else {
7418             /* Fine pagetable.  */
7419             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
7420         }
7421         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7422                            mmu_idx, fsr, fi);
7423         switch (desc & 3) {
7424         case 0: /* Page translation fault.  */
7425             code = 7;
7426             goto do_fault;
7427         case 1: /* 64k page.  */
7428             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
7429             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
7430             *page_size = 0x10000;
7431             break;
7432         case 2: /* 4k page.  */
7433             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7434             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
7435             *page_size = 0x1000;
7436             break;
7437         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
7438             if (type == 1) {
7439                 /* ARMv6/XScale extended small page format */
7440                 if (arm_feature(env, ARM_FEATURE_XSCALE)
7441                     || arm_feature(env, ARM_FEATURE_V6)) {
7442                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7443                     *page_size = 0x1000;
7444                 } else {
7445                     /* UNPREDICTABLE in ARMv5; we choose to take a
7446                      * page translation fault.
7447                      */
7448                     code = 7;
7449                     goto do_fault;
7450                 }
7451             } else {
7452                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
7453                 *page_size = 0x400;
7454             }
7455             ap = (desc >> 4) & 3;
7456             break;
7457         default:
7458             /* Never happens, but compiler isn't smart enough to tell.  */
7459             abort();
7460         }
7461         code = 15;
7462     }
7463     *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
7464     *prot |= *prot ? PAGE_EXEC : 0;
7465     if (!(*prot & (1 << access_type))) {
7466         /* Access permission fault.  */
7467         goto do_fault;
7468     }
7469     *phys_ptr = phys_addr;
7470     return false;
7471 do_fault:
7472     *fsr = code | (domain << 4);
7473     return true;
7474 }
7475 
7476 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
7477                              int access_type, ARMMMUIdx mmu_idx,
7478                              hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
7479                              target_ulong *page_size, uint32_t *fsr,
7480                              ARMMMUFaultInfo *fi)
7481 {
7482     CPUState *cs = CPU(arm_env_get_cpu(env));
7483     int code;
7484     uint32_t table;
7485     uint32_t desc;
7486     uint32_t xn;
7487     uint32_t pxn = 0;
7488     int type;
7489     int ap;
7490     int domain = 0;
7491     int domain_prot;
7492     hwaddr phys_addr;
7493     uint32_t dacr;
7494     bool ns;
7495 
7496     /* Pagetable walk.  */
7497     /* Lookup l1 descriptor.  */
7498     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
7499         /* Section translation fault if page walk is disabled by PD0 or PD1 */
7500         code = 5;
7501         goto do_fault;
7502     }
7503     desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7504                        mmu_idx, fsr, fi);
7505     type = (desc & 3);
7506     if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
7507         /* Section translation fault, or attempt to use the encoding
7508          * which is Reserved on implementations without PXN.
7509          */
7510         code = 5;
7511         goto do_fault;
7512     }
7513     if ((type == 1) || !(desc & (1 << 18))) {
7514         /* Page or Section.  */
7515         domain = (desc >> 5) & 0x0f;
7516     }
7517     if (regime_el(env, mmu_idx) == 1) {
7518         dacr = env->cp15.dacr_ns;
7519     } else {
7520         dacr = env->cp15.dacr_s;
7521     }
7522     domain_prot = (dacr >> (domain * 2)) & 3;
7523     if (domain_prot == 0 || domain_prot == 2) {
7524         if (type != 1) {
7525             code = 9; /* Section domain fault.  */
7526         } else {
7527             code = 11; /* Page domain fault.  */
7528         }
7529         goto do_fault;
7530     }
7531     if (type != 1) {
7532         if (desc & (1 << 18)) {
7533             /* Supersection.  */
7534             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
7535             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
7536             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
7537             *page_size = 0x1000000;
7538         } else {
7539             /* Section.  */
7540             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7541             *page_size = 0x100000;
7542         }
7543         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
7544         xn = desc & (1 << 4);
7545         pxn = desc & 1;
7546         code = 13;
7547         ns = extract32(desc, 19, 1);
7548     } else {
7549         if (arm_feature(env, ARM_FEATURE_PXN)) {
7550             pxn = (desc >> 2) & 1;
7551         }
7552         ns = extract32(desc, 3, 1);
7553         /* Lookup l2 entry.  */
7554         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
7555         desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7556                            mmu_idx, fsr, fi);
7557         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
7558         switch (desc & 3) {
7559         case 0: /* Page translation fault.  */
7560             code = 7;
7561             goto do_fault;
7562         case 1: /* 64k page.  */
7563             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
7564             xn = desc & (1 << 15);
7565             *page_size = 0x10000;
7566             break;
7567         case 2: case 3: /* 4k page.  */
7568             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7569             xn = desc & 1;
7570             *page_size = 0x1000;
7571             break;
7572         default:
7573             /* Never happens, but compiler isn't smart enough to tell.  */
7574             abort();
7575         }
7576         code = 15;
7577     }
7578     if (domain_prot == 3) {
7579         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
7580     } else {
7581         if (pxn && !regime_is_user(env, mmu_idx)) {
7582             xn = 1;
7583         }
7584         if (xn && access_type == 2)
7585             goto do_fault;
7586 
7587         if (arm_feature(env, ARM_FEATURE_V6K) &&
7588                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
7589             /* The simplified model uses AP[0] as an access control bit.  */
7590             if ((ap & 1) == 0) {
7591                 /* Access flag fault.  */
7592                 code = (code == 15) ? 6 : 3;
7593                 goto do_fault;
7594             }
7595             *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
7596         } else {
7597             *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
7598         }
7599         if (*prot && !xn) {
7600             *prot |= PAGE_EXEC;
7601         }
7602         if (!(*prot & (1 << access_type))) {
7603             /* Access permission fault.  */
7604             goto do_fault;
7605         }
7606     }
7607     if (ns) {
7608         /* The NS bit will (as required by the architecture) have no effect if
7609          * the CPU doesn't support TZ or this is a non-secure translation
7610          * regime, because the attribute will already be non-secure.
7611          */
7612         attrs->secure = false;
7613     }
7614     *phys_ptr = phys_addr;
7615     return false;
7616 do_fault:
7617     *fsr = code | (domain << 4);
7618     return true;
7619 }
7620 
7621 /* Fault type for long-descriptor MMU fault reporting; this corresponds
7622  * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
7623  */
7624 typedef enum {
7625     translation_fault = 1,
7626     access_fault = 2,
7627     permission_fault = 3,
7628 } MMUFaultType;
7629 
7630 /*
7631  * check_s2_mmu_setup
7632  * @cpu:        ARMCPU
7633  * @is_aa64:    True if the translation regime is in AArch64 state
7634  * @startlevel: Suggested starting level
7635  * @inputsize:  Bitsize of IPAs
7636  * @stride:     Page-table stride (See the ARM ARM)
7637  *
7638  * Returns true if the suggested S2 translation parameters are OK and
7639  * false otherwise.
7640  */
7641 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
7642                                int inputsize, int stride)
7643 {
7644     const int grainsize = stride + 3;
7645     int startsizecheck;
7646 
7647     /* Negative levels are never allowed.  */
7648     if (level < 0) {
7649         return false;
7650     }
7651 
7652     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
7653     if (startsizecheck < 1 || startsizecheck > stride + 4) {
7654         return false;
7655     }
7656 
7657     if (is_aa64) {
7658         CPUARMState *env = &cpu->env;
7659         unsigned int pamax = arm_pamax(cpu);
7660 
7661         switch (stride) {
7662         case 13: /* 64KB Pages.  */
7663             if (level == 0 || (level == 1 && pamax <= 42)) {
7664                 return false;
7665             }
7666             break;
7667         case 11: /* 16KB Pages.  */
7668             if (level == 0 || (level == 1 && pamax <= 40)) {
7669                 return false;
7670             }
7671             break;
7672         case 9: /* 4KB Pages.  */
7673             if (level == 0 && pamax <= 42) {
7674                 return false;
7675             }
7676             break;
7677         default:
7678             g_assert_not_reached();
7679         }
7680 
7681         /* Inputsize checks.  */
7682         if (inputsize > pamax &&
7683             (arm_el_is_aa64(env, 1) || inputsize > 40)) {
7684             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
7685             return false;
7686         }
7687     } else {
7688         /* AArch32 only supports 4KB pages. Assert on that.  */
7689         assert(stride == 9);
7690 
7691         if (level == 0) {
7692             return false;
7693         }
7694     }
7695     return true;
7696 }
7697 
7698 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
7699                                int access_type, ARMMMUIdx mmu_idx,
7700                                hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
7701                                target_ulong *page_size_ptr, uint32_t *fsr,
7702                                ARMMMUFaultInfo *fi)
7703 {
7704     ARMCPU *cpu = arm_env_get_cpu(env);
7705     CPUState *cs = CPU(cpu);
7706     /* Read an LPAE long-descriptor translation table. */
7707     MMUFaultType fault_type = translation_fault;
7708     uint32_t level;
7709     uint32_t epd = 0;
7710     int32_t t0sz, t1sz;
7711     uint32_t tg;
7712     uint64_t ttbr;
7713     int ttbr_select;
7714     hwaddr descaddr, indexmask, indexmask_grainsize;
7715     uint32_t tableattrs;
7716     target_ulong page_size;
7717     uint32_t attrs;
7718     int32_t stride = 9;
7719     int32_t addrsize;
7720     int inputsize;
7721     int32_t tbi = 0;
7722     TCR *tcr = regime_tcr(env, mmu_idx);
7723     int ap, ns, xn, pxn;
7724     uint32_t el = regime_el(env, mmu_idx);
7725     bool ttbr1_valid = true;
7726     uint64_t descaddrmask;
7727     bool aarch64 = arm_el_is_aa64(env, el);
7728 
7729     /* TODO:
7730      * This code does not handle the different format TCR for VTCR_EL2.
7731      * This code also does not support shareability levels.
7732      * Attribute and permission bit handling should also be checked when adding
7733      * support for those page table walks.
7734      */
7735     if (aarch64) {
7736         level = 0;
7737         addrsize = 64;
7738         if (el > 1) {
7739             if (mmu_idx != ARMMMUIdx_S2NS) {
7740                 tbi = extract64(tcr->raw_tcr, 20, 1);
7741             }
7742         } else {
7743             if (extract64(address, 55, 1)) {
7744                 tbi = extract64(tcr->raw_tcr, 38, 1);
7745             } else {
7746                 tbi = extract64(tcr->raw_tcr, 37, 1);
7747             }
7748         }
7749         tbi *= 8;
7750 
7751         /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
7752          * invalid.
7753          */
7754         if (el > 1) {
7755             ttbr1_valid = false;
7756         }
7757     } else {
7758         level = 1;
7759         addrsize = 32;
7760         /* There is no TTBR1 for EL2 */
7761         if (el == 2) {
7762             ttbr1_valid = false;
7763         }
7764     }
7765 
7766     /* Determine whether this address is in the region controlled by
7767      * TTBR0 or TTBR1 (or if it is in neither region and should fault).
7768      * This is a Non-secure PL0/1 stage 1 translation, so controlled by
7769      * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
7770      */
7771     if (aarch64) {
7772         /* AArch64 translation.  */
7773         t0sz = extract32(tcr->raw_tcr, 0, 6);
7774         t0sz = MIN(t0sz, 39);
7775         t0sz = MAX(t0sz, 16);
7776     } else if (mmu_idx != ARMMMUIdx_S2NS) {
7777         /* AArch32 stage 1 translation.  */
7778         t0sz = extract32(tcr->raw_tcr, 0, 3);
7779     } else {
7780         /* AArch32 stage 2 translation.  */
7781         bool sext = extract32(tcr->raw_tcr, 4, 1);
7782         bool sign = extract32(tcr->raw_tcr, 3, 1);
7783         /* Address size is 40-bit for a stage 2 translation,
7784          * and t0sz can be negative (from -8 to 7),
7785          * so we need to adjust it to use the TTBR selecting logic below.
7786          */
7787         addrsize = 40;
7788         t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8;
7789 
7790         /* If the sign-extend bit is not the same as t0sz[3], the result
7791          * is unpredictable. Flag this as a guest error.  */
7792         if (sign != sext) {
7793             qemu_log_mask(LOG_GUEST_ERROR,
7794                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
7795         }
7796     }
7797     t1sz = extract32(tcr->raw_tcr, 16, 6);
7798     if (aarch64) {
7799         t1sz = MIN(t1sz, 39);
7800         t1sz = MAX(t1sz, 16);
7801     }
7802     if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) {
7803         /* there is a ttbr0 region and we are in it (high bits all zero) */
7804         ttbr_select = 0;
7805     } else if (ttbr1_valid && t1sz &&
7806                !extract64(~address, addrsize - t1sz, t1sz - tbi)) {
7807         /* there is a ttbr1 region and we are in it (high bits all one) */
7808         ttbr_select = 1;
7809     } else if (!t0sz) {
7810         /* ttbr0 region is "everything not in the ttbr1 region" */
7811         ttbr_select = 0;
7812     } else if (!t1sz && ttbr1_valid) {
7813         /* ttbr1 region is "everything not in the ttbr0 region" */
7814         ttbr_select = 1;
7815     } else {
7816         /* in the gap between the two regions, this is a Translation fault */
7817         fault_type = translation_fault;
7818         goto do_fault;
7819     }
7820 
7821     /* Note that QEMU ignores shareability and cacheability attributes,
7822      * so we don't need to do anything with the SH, ORGN, IRGN fields
7823      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
7824      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
7825      * implement any ASID-like capability so we can ignore it (instead
7826      * we will always flush the TLB any time the ASID is changed).
7827      */
7828     if (ttbr_select == 0) {
7829         ttbr = regime_ttbr(env, mmu_idx, 0);
7830         if (el < 2) {
7831             epd = extract32(tcr->raw_tcr, 7, 1);
7832         }
7833         inputsize = addrsize - t0sz;
7834 
7835         tg = extract32(tcr->raw_tcr, 14, 2);
7836         if (tg == 1) { /* 64KB pages */
7837             stride = 13;
7838         }
7839         if (tg == 2) { /* 16KB pages */
7840             stride = 11;
7841         }
7842     } else {
7843         /* We should only be here if TTBR1 is valid */
7844         assert(ttbr1_valid);
7845 
7846         ttbr = regime_ttbr(env, mmu_idx, 1);
7847         epd = extract32(tcr->raw_tcr, 23, 1);
7848         inputsize = addrsize - t1sz;
7849 
7850         tg = extract32(tcr->raw_tcr, 30, 2);
7851         if (tg == 3)  { /* 64KB pages */
7852             stride = 13;
7853         }
7854         if (tg == 1) { /* 16KB pages */
7855             stride = 11;
7856         }
7857     }
7858 
7859     /* Here we should have set up all the parameters for the translation:
7860      * inputsize, ttbr, epd, stride, tbi
7861      */
7862 
7863     if (epd) {
7864         /* Translation table walk disabled => Translation fault on TLB miss
7865          * Note: This is always 0 on 64-bit EL2 and EL3.
7866          */
7867         goto do_fault;
7868     }
7869 
7870     if (mmu_idx != ARMMMUIdx_S2NS) {
7871         /* The starting level depends on the virtual address size (which can
7872          * be up to 48 bits) and the translation granule size. It indicates
7873          * the number of strides (stride bits at a time) needed to
7874          * consume the bits of the input address. In the pseudocode this is:
7875          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
7876          * where their 'inputsize' is our 'inputsize', 'grainsize' is
7877          * our 'stride + 3' and 'stride' is our 'stride'.
7878          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
7879          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
7880          * = 4 - (inputsize - 4) / stride;
7881          */
7882         level = 4 - (inputsize - 4) / stride;
7883     } else {
7884         /* For stage 2 translations the starting level is specified by the
7885          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
7886          */
7887         uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
7888         uint32_t startlevel;
7889         bool ok;
7890 
7891         if (!aarch64 || stride == 9) {
7892             /* AArch32 or 4KB pages */
7893             startlevel = 2 - sl0;
7894         } else {
7895             /* 16KB or 64KB pages */
7896             startlevel = 3 - sl0;
7897         }
7898 
7899         /* Check that the starting level is valid. */
7900         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
7901                                 inputsize, stride);
7902         if (!ok) {
7903             fault_type = translation_fault;
7904             goto do_fault;
7905         }
7906         level = startlevel;
7907     }
7908 
7909     indexmask_grainsize = (1ULL << (stride + 3)) - 1;
7910     indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
7911 
7912     /* Now we can extract the actual base address from the TTBR */
7913     descaddr = extract64(ttbr, 0, 48);
7914     descaddr &= ~indexmask;
7915 
7916     /* The address field in the descriptor goes up to bit 39 for ARMv7
7917      * but up to bit 47 for ARMv8, but we use the descaddrmask
7918      * up to bit 39 for AArch32, because we don't need other bits in that case
7919      * to construct next descriptor address (anyway they should be all zeroes).
7920      */
7921     descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
7922                    ~indexmask_grainsize;
7923 
7924     /* Secure accesses start with the page table in secure memory and
7925      * can be downgraded to non-secure at any step. Non-secure accesses
7926      * remain non-secure. We implement this by just ORing in the NSTable/NS
7927      * bits at each step.
7928      */
7929     tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
7930     for (;;) {
7931         uint64_t descriptor;
7932         bool nstable;
7933 
7934         descaddr |= (address >> (stride * (4 - level))) & indexmask;
7935         descaddr &= ~7ULL;
7936         nstable = extract32(tableattrs, 4, 1);
7937         descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
7938         if (fi->s1ptw) {
7939             goto do_fault;
7940         }
7941 
7942         if (!(descriptor & 1) ||
7943             (!(descriptor & 2) && (level == 3))) {
7944             /* Invalid, or the Reserved level 3 encoding */
7945             goto do_fault;
7946         }
7947         descaddr = descriptor & descaddrmask;
7948 
7949         if ((descriptor & 2) && (level < 3)) {
7950             /* Table entry. The top five bits are attributes which  may
7951              * propagate down through lower levels of the table (and
7952              * which are all arranged so that 0 means "no effect", so
7953              * we can gather them up by ORing in the bits at each level).
7954              */
7955             tableattrs |= extract64(descriptor, 59, 5);
7956             level++;
7957             indexmask = indexmask_grainsize;
7958             continue;
7959         }
7960         /* Block entry at level 1 or 2, or page entry at level 3.
7961          * These are basically the same thing, although the number
7962          * of bits we pull in from the vaddr varies.
7963          */
7964         page_size = (1ULL << ((stride * (4 - level)) + 3));
7965         descaddr |= (address & (page_size - 1));
7966         /* Extract attributes from the descriptor */
7967         attrs = extract64(descriptor, 2, 10)
7968             | (extract64(descriptor, 52, 12) << 10);
7969 
7970         if (mmu_idx == ARMMMUIdx_S2NS) {
7971             /* Stage 2 table descriptors do not include any attribute fields */
7972             break;
7973         }
7974         /* Merge in attributes from table descriptors */
7975         attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
7976         attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
7977         /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
7978          * means "force PL1 access only", which means forcing AP[1] to 0.
7979          */
7980         if (extract32(tableattrs, 2, 1)) {
7981             attrs &= ~(1 << 4);
7982         }
7983         attrs |= nstable << 3; /* NS */
7984         break;
7985     }
7986     /* Here descaddr is the final physical address, and attributes
7987      * are all in attrs.
7988      */
7989     fault_type = access_fault;
7990     if ((attrs & (1 << 8)) == 0) {
7991         /* Access flag */
7992         goto do_fault;
7993     }
7994 
7995     ap = extract32(attrs, 4, 2);
7996     xn = extract32(attrs, 12, 1);
7997 
7998     if (mmu_idx == ARMMMUIdx_S2NS) {
7999         ns = true;
8000         *prot = get_S2prot(env, ap, xn);
8001     } else {
8002         ns = extract32(attrs, 3, 1);
8003         pxn = extract32(attrs, 11, 1);
8004         *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
8005     }
8006 
8007     fault_type = permission_fault;
8008     if (!(*prot & (1 << access_type))) {
8009         goto do_fault;
8010     }
8011 
8012     if (ns) {
8013         /* The NS bit will (as required by the architecture) have no effect if
8014          * the CPU doesn't support TZ or this is a non-secure translation
8015          * regime, because the attribute will already be non-secure.
8016          */
8017         txattrs->secure = false;
8018     }
8019     *phys_ptr = descaddr;
8020     *page_size_ptr = page_size;
8021     return false;
8022 
8023 do_fault:
8024     /* Long-descriptor format IFSR/DFSR value */
8025     *fsr = (1 << 9) | (fault_type << 2) | level;
8026     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
8027     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
8028     return true;
8029 }
8030 
8031 static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
8032                                                 ARMMMUIdx mmu_idx,
8033                                                 int32_t address, int *prot)
8034 {
8035     *prot = PAGE_READ | PAGE_WRITE;
8036     switch (address) {
8037     case 0xF0000000 ... 0xFFFFFFFF:
8038         if (regime_sctlr(env, mmu_idx) & SCTLR_V) { /* hivecs execing is ok */
8039             *prot |= PAGE_EXEC;
8040         }
8041         break;
8042     case 0x00000000 ... 0x7FFFFFFF:
8043         *prot |= PAGE_EXEC;
8044         break;
8045     }
8046 
8047 }
8048 
8049 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
8050                                  int access_type, ARMMMUIdx mmu_idx,
8051                                  hwaddr *phys_ptr, int *prot, uint32_t *fsr)
8052 {
8053     ARMCPU *cpu = arm_env_get_cpu(env);
8054     int n;
8055     bool is_user = regime_is_user(env, mmu_idx);
8056 
8057     *phys_ptr = address;
8058     *prot = 0;
8059 
8060     if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
8061         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
8062     } else { /* MPU enabled */
8063         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
8064             /* region search */
8065             uint32_t base = env->pmsav7.drbar[n];
8066             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
8067             uint32_t rmask;
8068             bool srdis = false;
8069 
8070             if (!(env->pmsav7.drsr[n] & 0x1)) {
8071                 continue;
8072             }
8073 
8074             if (!rsize) {
8075                 qemu_log_mask(LOG_GUEST_ERROR, "DRSR.Rsize field can not be 0");
8076                 continue;
8077             }
8078             rsize++;
8079             rmask = (1ull << rsize) - 1;
8080 
8081             if (base & rmask) {
8082                 qemu_log_mask(LOG_GUEST_ERROR, "DRBAR %" PRIx32 " misaligned "
8083                               "to DRSR region size, mask = %" PRIx32,
8084                               base, rmask);
8085                 continue;
8086             }
8087 
8088             if (address < base || address > base + rmask) {
8089                 continue;
8090             }
8091 
8092             /* Region matched */
8093 
8094             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
8095                 int i, snd;
8096                 uint32_t srdis_mask;
8097 
8098                 rsize -= 3; /* sub region size (power of 2) */
8099                 snd = ((address - base) >> rsize) & 0x7;
8100                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
8101 
8102                 srdis_mask = srdis ? 0x3 : 0x0;
8103                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
8104                     /* This will check in groups of 2, 4 and then 8, whether
8105                      * the subregion bits are consistent. rsize is incremented
8106                      * back up to give the region size, considering consistent
8107                      * adjacent subregions as one region. Stop testing if rsize
8108                      * is already big enough for an entire QEMU page.
8109                      */
8110                     int snd_rounded = snd & ~(i - 1);
8111                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
8112                                                      snd_rounded + 8, i);
8113                     if (srdis_mask ^ srdis_multi) {
8114                         break;
8115                     }
8116                     srdis_mask = (srdis_mask << i) | srdis_mask;
8117                     rsize++;
8118                 }
8119             }
8120             if (rsize < TARGET_PAGE_BITS) {
8121                 qemu_log_mask(LOG_UNIMP, "No support for MPU (sub)region"
8122                               "alignment of %" PRIu32 " bits. Minimum is %d\n",
8123                               rsize, TARGET_PAGE_BITS);
8124                 continue;
8125             }
8126             if (srdis) {
8127                 continue;
8128             }
8129             break;
8130         }
8131 
8132         if (n == -1) { /* no hits */
8133             if (cpu->pmsav7_dregion &&
8134                 (is_user || !(regime_sctlr(env, mmu_idx) & SCTLR_BR))) {
8135                 /* background fault */
8136                 *fsr = 0;
8137                 return true;
8138             }
8139             get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
8140         } else { /* a MPU hit! */
8141             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
8142 
8143             if (is_user) { /* User mode AP bit decoding */
8144                 switch (ap) {
8145                 case 0:
8146                 case 1:
8147                 case 5:
8148                     break; /* no access */
8149                 case 3:
8150                     *prot |= PAGE_WRITE;
8151                     /* fall through */
8152                 case 2:
8153                 case 6:
8154                     *prot |= PAGE_READ | PAGE_EXEC;
8155                     break;
8156                 default:
8157                     qemu_log_mask(LOG_GUEST_ERROR,
8158                                   "Bad value for AP bits in DRACR %"
8159                                   PRIx32 "\n", ap);
8160                 }
8161             } else { /* Priv. mode AP bits decoding */
8162                 switch (ap) {
8163                 case 0:
8164                     break; /* no access */
8165                 case 1:
8166                 case 2:
8167                 case 3:
8168                     *prot |= PAGE_WRITE;
8169                     /* fall through */
8170                 case 5:
8171                 case 6:
8172                     *prot |= PAGE_READ | PAGE_EXEC;
8173                     break;
8174                 default:
8175                     qemu_log_mask(LOG_GUEST_ERROR,
8176                                   "Bad value for AP bits in DRACR %"
8177                                   PRIx32 "\n", ap);
8178                 }
8179             }
8180 
8181             /* execute never */
8182             if (env->pmsav7.dracr[n] & (1 << 12)) {
8183                 *prot &= ~PAGE_EXEC;
8184             }
8185         }
8186     }
8187 
8188     *fsr = 0x00d; /* Permission fault */
8189     return !(*prot & (1 << access_type));
8190 }
8191 
8192 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
8193                                  int access_type, ARMMMUIdx mmu_idx,
8194                                  hwaddr *phys_ptr, int *prot, uint32_t *fsr)
8195 {
8196     int n;
8197     uint32_t mask;
8198     uint32_t base;
8199     bool is_user = regime_is_user(env, mmu_idx);
8200 
8201     *phys_ptr = address;
8202     for (n = 7; n >= 0; n--) {
8203         base = env->cp15.c6_region[n];
8204         if ((base & 1) == 0) {
8205             continue;
8206         }
8207         mask = 1 << ((base >> 1) & 0x1f);
8208         /* Keep this shift separate from the above to avoid an
8209            (undefined) << 32.  */
8210         mask = (mask << 1) - 1;
8211         if (((base ^ address) & ~mask) == 0) {
8212             break;
8213         }
8214     }
8215     if (n < 0) {
8216         *fsr = 2;
8217         return true;
8218     }
8219 
8220     if (access_type == 2) {
8221         mask = env->cp15.pmsav5_insn_ap;
8222     } else {
8223         mask = env->cp15.pmsav5_data_ap;
8224     }
8225     mask = (mask >> (n * 4)) & 0xf;
8226     switch (mask) {
8227     case 0:
8228         *fsr = 1;
8229         return true;
8230     case 1:
8231         if (is_user) {
8232             *fsr = 1;
8233             return true;
8234         }
8235         *prot = PAGE_READ | PAGE_WRITE;
8236         break;
8237     case 2:
8238         *prot = PAGE_READ;
8239         if (!is_user) {
8240             *prot |= PAGE_WRITE;
8241         }
8242         break;
8243     case 3:
8244         *prot = PAGE_READ | PAGE_WRITE;
8245         break;
8246     case 5:
8247         if (is_user) {
8248             *fsr = 1;
8249             return true;
8250         }
8251         *prot = PAGE_READ;
8252         break;
8253     case 6:
8254         *prot = PAGE_READ;
8255         break;
8256     default:
8257         /* Bad permission.  */
8258         *fsr = 1;
8259         return true;
8260     }
8261     *prot |= PAGE_EXEC;
8262     return false;
8263 }
8264 
8265 /* get_phys_addr - get the physical address for this virtual address
8266  *
8267  * Find the physical address corresponding to the given virtual address,
8268  * by doing a translation table walk on MMU based systems or using the
8269  * MPU state on MPU based systems.
8270  *
8271  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
8272  * prot and page_size may not be filled in, and the populated fsr value provides
8273  * information on why the translation aborted, in the format of a
8274  * DFSR/IFSR fault register, with the following caveats:
8275  *  * we honour the short vs long DFSR format differences.
8276  *  * the WnR bit is never set (the caller must do this).
8277  *  * for PSMAv5 based systems we don't bother to return a full FSR format
8278  *    value.
8279  *
8280  * @env: CPUARMState
8281  * @address: virtual address to get physical address for
8282  * @access_type: 0 for read, 1 for write, 2 for execute
8283  * @mmu_idx: MMU index indicating required translation regime
8284  * @phys_ptr: set to the physical address corresponding to the virtual address
8285  * @attrs: set to the memory transaction attributes to use
8286  * @prot: set to the permissions for the page containing phys_ptr
8287  * @page_size: set to the size of the page containing phys_ptr
8288  * @fsr: set to the DFSR/IFSR value on failure
8289  */
8290 static bool get_phys_addr(CPUARMState *env, target_ulong address,
8291                           int access_type, ARMMMUIdx mmu_idx,
8292                           hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
8293                           target_ulong *page_size, uint32_t *fsr,
8294                           ARMMMUFaultInfo *fi)
8295 {
8296     if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8297         /* Call ourselves recursively to do the stage 1 and then stage 2
8298          * translations.
8299          */
8300         if (arm_feature(env, ARM_FEATURE_EL2)) {
8301             hwaddr ipa;
8302             int s2_prot;
8303             int ret;
8304 
8305             ret = get_phys_addr(env, address, access_type,
8306                                 mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
8307                                 prot, page_size, fsr, fi);
8308 
8309             /* If S1 fails or S2 is disabled, return early.  */
8310             if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
8311                 *phys_ptr = ipa;
8312                 return ret;
8313             }
8314 
8315             /* S1 is done. Now do S2 translation.  */
8316             ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
8317                                      phys_ptr, attrs, &s2_prot,
8318                                      page_size, fsr, fi);
8319             fi->s2addr = ipa;
8320             /* Combine the S1 and S2 perms.  */
8321             *prot &= s2_prot;
8322             return ret;
8323         } else {
8324             /*
8325              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
8326              */
8327             mmu_idx += ARMMMUIdx_S1NSE0;
8328         }
8329     }
8330 
8331     /* The page table entries may downgrade secure to non-secure, but
8332      * cannot upgrade an non-secure translation regime's attributes
8333      * to secure.
8334      */
8335     attrs->secure = regime_is_secure(env, mmu_idx);
8336     attrs->user = regime_is_user(env, mmu_idx);
8337 
8338     /* Fast Context Switch Extension. This doesn't exist at all in v8.
8339      * In v7 and earlier it affects all stage 1 translations.
8340      */
8341     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
8342         && !arm_feature(env, ARM_FEATURE_V8)) {
8343         if (regime_el(env, mmu_idx) == 3) {
8344             address += env->cp15.fcseidr_s;
8345         } else {
8346             address += env->cp15.fcseidr_ns;
8347         }
8348     }
8349 
8350     /* pmsav7 has special handling for when MPU is disabled so call it before
8351      * the common MMU/MPU disabled check below.
8352      */
8353     if (arm_feature(env, ARM_FEATURE_MPU) &&
8354         arm_feature(env, ARM_FEATURE_V7)) {
8355         *page_size = TARGET_PAGE_SIZE;
8356         return get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
8357                                     phys_ptr, prot, fsr);
8358     }
8359 
8360     if (regime_translation_disabled(env, mmu_idx)) {
8361         /* MMU/MPU disabled.  */
8362         *phys_ptr = address;
8363         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
8364         *page_size = TARGET_PAGE_SIZE;
8365         return 0;
8366     }
8367 
8368     if (arm_feature(env, ARM_FEATURE_MPU)) {
8369         /* Pre-v7 MPU */
8370         *page_size = TARGET_PAGE_SIZE;
8371         return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
8372                                     phys_ptr, prot, fsr);
8373     }
8374 
8375     if (regime_using_lpae_format(env, mmu_idx)) {
8376         return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
8377                                   attrs, prot, page_size, fsr, fi);
8378     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
8379         return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
8380                                 attrs, prot, page_size, fsr, fi);
8381     } else {
8382         return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
8383                                 prot, page_size, fsr, fi);
8384     }
8385 }
8386 
8387 /* Walk the page table and (if the mapping exists) add the page
8388  * to the TLB. Return false on success, or true on failure. Populate
8389  * fsr with ARM DFSR/IFSR fault register format value on failure.
8390  */
8391 bool arm_tlb_fill(CPUState *cs, vaddr address,
8392                   int access_type, int mmu_idx, uint32_t *fsr,
8393                   ARMMMUFaultInfo *fi)
8394 {
8395     ARMCPU *cpu = ARM_CPU(cs);
8396     CPUARMState *env = &cpu->env;
8397     hwaddr phys_addr;
8398     target_ulong page_size;
8399     int prot;
8400     int ret;
8401     MemTxAttrs attrs = {};
8402 
8403     ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
8404                         &attrs, &prot, &page_size, fsr, fi);
8405     if (!ret) {
8406         /* Map a single [sub]page.  */
8407         phys_addr &= TARGET_PAGE_MASK;
8408         address &= TARGET_PAGE_MASK;
8409         tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
8410                                 prot, mmu_idx, page_size);
8411         return 0;
8412     }
8413 
8414     return ret;
8415 }
8416 
8417 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
8418                                          MemTxAttrs *attrs)
8419 {
8420     ARMCPU *cpu = ARM_CPU(cs);
8421     CPUARMState *env = &cpu->env;
8422     hwaddr phys_addr;
8423     target_ulong page_size;
8424     int prot;
8425     bool ret;
8426     uint32_t fsr;
8427     ARMMMUFaultInfo fi = {};
8428 
8429     *attrs = (MemTxAttrs) {};
8430 
8431     ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
8432                         attrs, &prot, &page_size, &fsr, &fi);
8433 
8434     if (ret) {
8435         return -1;
8436     }
8437     return phys_addr;
8438 }
8439 
8440 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
8441 {
8442     uint32_t mask;
8443     unsigned el = arm_current_el(env);
8444 
8445     /* First handle registers which unprivileged can read */
8446 
8447     switch (reg) {
8448     case 0 ... 7: /* xPSR sub-fields */
8449         mask = 0;
8450         if ((reg & 1) && el) {
8451             mask |= 0x000001ff; /* IPSR (unpriv. reads as zero) */
8452         }
8453         if (!(reg & 4)) {
8454             mask |= 0xf8000000; /* APSR */
8455         }
8456         /* EPSR reads as zero */
8457         return xpsr_read(env) & mask;
8458         break;
8459     case 20: /* CONTROL */
8460         return env->v7m.control;
8461     }
8462 
8463     if (el == 0) {
8464         return 0; /* unprivileged reads others as zero */
8465     }
8466 
8467     switch (reg) {
8468     case 8: /* MSP */
8469         return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
8470             env->v7m.other_sp : env->regs[13];
8471     case 9: /* PSP */
8472         return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
8473             env->regs[13] : env->v7m.other_sp;
8474     case 16: /* PRIMASK */
8475         return (env->daif & PSTATE_I) != 0;
8476     case 17: /* BASEPRI */
8477     case 18: /* BASEPRI_MAX */
8478         return env->v7m.basepri;
8479     case 19: /* FAULTMASK */
8480         return (env->daif & PSTATE_F) != 0;
8481     default:
8482         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
8483                                        " register %d\n", reg);
8484         return 0;
8485     }
8486 }
8487 
8488 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
8489 {
8490     if (arm_current_el(env) == 0 && reg > 7) {
8491         /* only xPSR sub-fields may be written by unprivileged */
8492         return;
8493     }
8494 
8495     switch (reg) {
8496     case 0 ... 7: /* xPSR sub-fields */
8497         /* only APSR is actually writable */
8498         if (reg & 4) {
8499             xpsr_write(env, val, 0xf8000000); /* APSR */
8500         }
8501         break;
8502     case 8: /* MSP */
8503         if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
8504             env->v7m.other_sp = val;
8505         } else {
8506             env->regs[13] = val;
8507         }
8508         break;
8509     case 9: /* PSP */
8510         if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
8511             env->regs[13] = val;
8512         } else {
8513             env->v7m.other_sp = val;
8514         }
8515         break;
8516     case 16: /* PRIMASK */
8517         if (val & 1) {
8518             env->daif |= PSTATE_I;
8519         } else {
8520             env->daif &= ~PSTATE_I;
8521         }
8522         break;
8523     case 17: /* BASEPRI */
8524         env->v7m.basepri = val & 0xff;
8525         break;
8526     case 18: /* BASEPRI_MAX */
8527         val &= 0xff;
8528         if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
8529             env->v7m.basepri = val;
8530         break;
8531     case 19: /* FAULTMASK */
8532         if (val & 1) {
8533             env->daif |= PSTATE_F;
8534         } else {
8535             env->daif &= ~PSTATE_F;
8536         }
8537         break;
8538     case 20: /* CONTROL */
8539         switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
8540         env->v7m.control = val & (R_V7M_CONTROL_SPSEL_MASK |
8541                                   R_V7M_CONTROL_NPRIV_MASK);
8542         break;
8543     default:
8544         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
8545                                        " register %d\n", reg);
8546         return;
8547     }
8548 }
8549 
8550 #endif
8551 
8552 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
8553 {
8554     /* Implement DC ZVA, which zeroes a fixed-length block of memory.
8555      * Note that we do not implement the (architecturally mandated)
8556      * alignment fault for attempts to use this on Device memory
8557      * (which matches the usual QEMU behaviour of not implementing either
8558      * alignment faults or any memory attribute handling).
8559      */
8560 
8561     ARMCPU *cpu = arm_env_get_cpu(env);
8562     uint64_t blocklen = 4 << cpu->dcz_blocksize;
8563     uint64_t vaddr = vaddr_in & ~(blocklen - 1);
8564 
8565 #ifndef CONFIG_USER_ONLY
8566     {
8567         /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
8568          * the block size so we might have to do more than one TLB lookup.
8569          * We know that in fact for any v8 CPU the page size is at least 4K
8570          * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
8571          * 1K as an artefact of legacy v5 subpage support being present in the
8572          * same QEMU executable.
8573          */
8574         int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
8575         void *hostaddr[maxidx];
8576         int try, i;
8577         unsigned mmu_idx = cpu_mmu_index(env, false);
8578         TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
8579 
8580         for (try = 0; try < 2; try++) {
8581 
8582             for (i = 0; i < maxidx; i++) {
8583                 hostaddr[i] = tlb_vaddr_to_host(env,
8584                                                 vaddr + TARGET_PAGE_SIZE * i,
8585                                                 1, mmu_idx);
8586                 if (!hostaddr[i]) {
8587                     break;
8588                 }
8589             }
8590             if (i == maxidx) {
8591                 /* If it's all in the TLB it's fair game for just writing to;
8592                  * we know we don't need to update dirty status, etc.
8593                  */
8594                 for (i = 0; i < maxidx - 1; i++) {
8595                     memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
8596                 }
8597                 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
8598                 return;
8599             }
8600             /* OK, try a store and see if we can populate the tlb. This
8601              * might cause an exception if the memory isn't writable,
8602              * in which case we will longjmp out of here. We must for
8603              * this purpose use the actual register value passed to us
8604              * so that we get the fault address right.
8605              */
8606             helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
8607             /* Now we can populate the other TLB entries, if any */
8608             for (i = 0; i < maxidx; i++) {
8609                 uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
8610                 if (va != (vaddr_in & TARGET_PAGE_MASK)) {
8611                     helper_ret_stb_mmu(env, va, 0, oi, GETPC());
8612                 }
8613             }
8614         }
8615 
8616         /* Slow path (probably attempt to do this to an I/O device or
8617          * similar, or clearing of a block of code we have translations
8618          * cached for). Just do a series of byte writes as the architecture
8619          * demands. It's not worth trying to use a cpu_physical_memory_map(),
8620          * memset(), unmap() sequence here because:
8621          *  + we'd need to account for the blocksize being larger than a page
8622          *  + the direct-RAM access case is almost always going to be dealt
8623          *    with in the fastpath code above, so there's no speed benefit
8624          *  + we would have to deal with the map returning NULL because the
8625          *    bounce buffer was in use
8626          */
8627         for (i = 0; i < blocklen; i++) {
8628             helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
8629         }
8630     }
8631 #else
8632     memset(g2h(vaddr), 0, blocklen);
8633 #endif
8634 }
8635 
8636 /* Note that signed overflow is undefined in C.  The following routines are
8637    careful to use unsigned types where modulo arithmetic is required.
8638    Failure to do so _will_ break on newer gcc.  */
8639 
8640 /* Signed saturating arithmetic.  */
8641 
8642 /* Perform 16-bit signed saturating addition.  */
8643 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
8644 {
8645     uint16_t res;
8646 
8647     res = a + b;
8648     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
8649         if (a & 0x8000)
8650             res = 0x8000;
8651         else
8652             res = 0x7fff;
8653     }
8654     return res;
8655 }
8656 
8657 /* Perform 8-bit signed saturating addition.  */
8658 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
8659 {
8660     uint8_t res;
8661 
8662     res = a + b;
8663     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
8664         if (a & 0x80)
8665             res = 0x80;
8666         else
8667             res = 0x7f;
8668     }
8669     return res;
8670 }
8671 
8672 /* Perform 16-bit signed saturating subtraction.  */
8673 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
8674 {
8675     uint16_t res;
8676 
8677     res = a - b;
8678     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
8679         if (a & 0x8000)
8680             res = 0x8000;
8681         else
8682             res = 0x7fff;
8683     }
8684     return res;
8685 }
8686 
8687 /* Perform 8-bit signed saturating subtraction.  */
8688 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
8689 {
8690     uint8_t res;
8691 
8692     res = a - b;
8693     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
8694         if (a & 0x80)
8695             res = 0x80;
8696         else
8697             res = 0x7f;
8698     }
8699     return res;
8700 }
8701 
8702 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
8703 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
8704 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
8705 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
8706 #define PFX q
8707 
8708 #include "op_addsub.h"
8709 
8710 /* Unsigned saturating arithmetic.  */
8711 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
8712 {
8713     uint16_t res;
8714     res = a + b;
8715     if (res < a)
8716         res = 0xffff;
8717     return res;
8718 }
8719 
8720 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
8721 {
8722     if (a > b)
8723         return a - b;
8724     else
8725         return 0;
8726 }
8727 
8728 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
8729 {
8730     uint8_t res;
8731     res = a + b;
8732     if (res < a)
8733         res = 0xff;
8734     return res;
8735 }
8736 
8737 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
8738 {
8739     if (a > b)
8740         return a - b;
8741     else
8742         return 0;
8743 }
8744 
8745 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
8746 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
8747 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
8748 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
8749 #define PFX uq
8750 
8751 #include "op_addsub.h"
8752 
8753 /* Signed modulo arithmetic.  */
8754 #define SARITH16(a, b, n, op) do { \
8755     int32_t sum; \
8756     sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
8757     RESULT(sum, n, 16); \
8758     if (sum >= 0) \
8759         ge |= 3 << (n * 2); \
8760     } while(0)
8761 
8762 #define SARITH8(a, b, n, op) do { \
8763     int32_t sum; \
8764     sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
8765     RESULT(sum, n, 8); \
8766     if (sum >= 0) \
8767         ge |= 1 << n; \
8768     } while(0)
8769 
8770 
8771 #define ADD16(a, b, n) SARITH16(a, b, n, +)
8772 #define SUB16(a, b, n) SARITH16(a, b, n, -)
8773 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
8774 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
8775 #define PFX s
8776 #define ARITH_GE
8777 
8778 #include "op_addsub.h"
8779 
8780 /* Unsigned modulo arithmetic.  */
8781 #define ADD16(a, b, n) do { \
8782     uint32_t sum; \
8783     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
8784     RESULT(sum, n, 16); \
8785     if ((sum >> 16) == 1) \
8786         ge |= 3 << (n * 2); \
8787     } while(0)
8788 
8789 #define ADD8(a, b, n) do { \
8790     uint32_t sum; \
8791     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
8792     RESULT(sum, n, 8); \
8793     if ((sum >> 8) == 1) \
8794         ge |= 1 << n; \
8795     } while(0)
8796 
8797 #define SUB16(a, b, n) do { \
8798     uint32_t sum; \
8799     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
8800     RESULT(sum, n, 16); \
8801     if ((sum >> 16) == 0) \
8802         ge |= 3 << (n * 2); \
8803     } while(0)
8804 
8805 #define SUB8(a, b, n) do { \
8806     uint32_t sum; \
8807     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
8808     RESULT(sum, n, 8); \
8809     if ((sum >> 8) == 0) \
8810         ge |= 1 << n; \
8811     } while(0)
8812 
8813 #define PFX u
8814 #define ARITH_GE
8815 
8816 #include "op_addsub.h"
8817 
8818 /* Halved signed arithmetic.  */
8819 #define ADD16(a, b, n) \
8820   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
8821 #define SUB16(a, b, n) \
8822   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
8823 #define ADD8(a, b, n) \
8824   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
8825 #define SUB8(a, b, n) \
8826   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
8827 #define PFX sh
8828 
8829 #include "op_addsub.h"
8830 
8831 /* Halved unsigned arithmetic.  */
8832 #define ADD16(a, b, n) \
8833   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
8834 #define SUB16(a, b, n) \
8835   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
8836 #define ADD8(a, b, n) \
8837   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
8838 #define SUB8(a, b, n) \
8839   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
8840 #define PFX uh
8841 
8842 #include "op_addsub.h"
8843 
8844 static inline uint8_t do_usad(uint8_t a, uint8_t b)
8845 {
8846     if (a > b)
8847         return a - b;
8848     else
8849         return b - a;
8850 }
8851 
8852 /* Unsigned sum of absolute byte differences.  */
8853 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
8854 {
8855     uint32_t sum;
8856     sum = do_usad(a, b);
8857     sum += do_usad(a >> 8, b >> 8);
8858     sum += do_usad(a >> 16, b >>16);
8859     sum += do_usad(a >> 24, b >> 24);
8860     return sum;
8861 }
8862 
8863 /* For ARMv6 SEL instruction.  */
8864 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
8865 {
8866     uint32_t mask;
8867 
8868     mask = 0;
8869     if (flags & 1)
8870         mask |= 0xff;
8871     if (flags & 2)
8872         mask |= 0xff00;
8873     if (flags & 4)
8874         mask |= 0xff0000;
8875     if (flags & 8)
8876         mask |= 0xff000000;
8877     return (a & mask) | (b & ~mask);
8878 }
8879 
8880 /* VFP support.  We follow the convention used for VFP instructions:
8881    Single precision routines have a "s" suffix, double precision a
8882    "d" suffix.  */
8883 
8884 /* Convert host exception flags to vfp form.  */
8885 static inline int vfp_exceptbits_from_host(int host_bits)
8886 {
8887     int target_bits = 0;
8888 
8889     if (host_bits & float_flag_invalid)
8890         target_bits |= 1;
8891     if (host_bits & float_flag_divbyzero)
8892         target_bits |= 2;
8893     if (host_bits & float_flag_overflow)
8894         target_bits |= 4;
8895     if (host_bits & (float_flag_underflow | float_flag_output_denormal))
8896         target_bits |= 8;
8897     if (host_bits & float_flag_inexact)
8898         target_bits |= 0x10;
8899     if (host_bits & float_flag_input_denormal)
8900         target_bits |= 0x80;
8901     return target_bits;
8902 }
8903 
8904 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
8905 {
8906     int i;
8907     uint32_t fpscr;
8908 
8909     fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
8910             | (env->vfp.vec_len << 16)
8911             | (env->vfp.vec_stride << 20);
8912     i = get_float_exception_flags(&env->vfp.fp_status);
8913     i |= get_float_exception_flags(&env->vfp.standard_fp_status);
8914     fpscr |= vfp_exceptbits_from_host(i);
8915     return fpscr;
8916 }
8917 
8918 uint32_t vfp_get_fpscr(CPUARMState *env)
8919 {
8920     return HELPER(vfp_get_fpscr)(env);
8921 }
8922 
8923 /* Convert vfp exception flags to target form.  */
8924 static inline int vfp_exceptbits_to_host(int target_bits)
8925 {
8926     int host_bits = 0;
8927 
8928     if (target_bits & 1)
8929         host_bits |= float_flag_invalid;
8930     if (target_bits & 2)
8931         host_bits |= float_flag_divbyzero;
8932     if (target_bits & 4)
8933         host_bits |= float_flag_overflow;
8934     if (target_bits & 8)
8935         host_bits |= float_flag_underflow;
8936     if (target_bits & 0x10)
8937         host_bits |= float_flag_inexact;
8938     if (target_bits & 0x80)
8939         host_bits |= float_flag_input_denormal;
8940     return host_bits;
8941 }
8942 
8943 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
8944 {
8945     int i;
8946     uint32_t changed;
8947 
8948     changed = env->vfp.xregs[ARM_VFP_FPSCR];
8949     env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
8950     env->vfp.vec_len = (val >> 16) & 7;
8951     env->vfp.vec_stride = (val >> 20) & 3;
8952 
8953     changed ^= val;
8954     if (changed & (3 << 22)) {
8955         i = (val >> 22) & 3;
8956         switch (i) {
8957         case FPROUNDING_TIEEVEN:
8958             i = float_round_nearest_even;
8959             break;
8960         case FPROUNDING_POSINF:
8961             i = float_round_up;
8962             break;
8963         case FPROUNDING_NEGINF:
8964             i = float_round_down;
8965             break;
8966         case FPROUNDING_ZERO:
8967             i = float_round_to_zero;
8968             break;
8969         }
8970         set_float_rounding_mode(i, &env->vfp.fp_status);
8971     }
8972     if (changed & (1 << 24)) {
8973         set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
8974         set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
8975     }
8976     if (changed & (1 << 25))
8977         set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
8978 
8979     i = vfp_exceptbits_to_host(val);
8980     set_float_exception_flags(i, &env->vfp.fp_status);
8981     set_float_exception_flags(0, &env->vfp.standard_fp_status);
8982 }
8983 
8984 void vfp_set_fpscr(CPUARMState *env, uint32_t val)
8985 {
8986     HELPER(vfp_set_fpscr)(env, val);
8987 }
8988 
8989 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
8990 
8991 #define VFP_BINOP(name) \
8992 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
8993 { \
8994     float_status *fpst = fpstp; \
8995     return float32_ ## name(a, b, fpst); \
8996 } \
8997 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
8998 { \
8999     float_status *fpst = fpstp; \
9000     return float64_ ## name(a, b, fpst); \
9001 }
9002 VFP_BINOP(add)
9003 VFP_BINOP(sub)
9004 VFP_BINOP(mul)
9005 VFP_BINOP(div)
9006 VFP_BINOP(min)
9007 VFP_BINOP(max)
9008 VFP_BINOP(minnum)
9009 VFP_BINOP(maxnum)
9010 #undef VFP_BINOP
9011 
9012 float32 VFP_HELPER(neg, s)(float32 a)
9013 {
9014     return float32_chs(a);
9015 }
9016 
9017 float64 VFP_HELPER(neg, d)(float64 a)
9018 {
9019     return float64_chs(a);
9020 }
9021 
9022 float32 VFP_HELPER(abs, s)(float32 a)
9023 {
9024     return float32_abs(a);
9025 }
9026 
9027 float64 VFP_HELPER(abs, d)(float64 a)
9028 {
9029     return float64_abs(a);
9030 }
9031 
9032 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
9033 {
9034     return float32_sqrt(a, &env->vfp.fp_status);
9035 }
9036 
9037 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
9038 {
9039     return float64_sqrt(a, &env->vfp.fp_status);
9040 }
9041 
9042 /* XXX: check quiet/signaling case */
9043 #define DO_VFP_cmp(p, type) \
9044 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
9045 { \
9046     uint32_t flags; \
9047     switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
9048     case 0: flags = 0x6; break; \
9049     case -1: flags = 0x8; break; \
9050     case 1: flags = 0x2; break; \
9051     default: case 2: flags = 0x3; break; \
9052     } \
9053     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
9054         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
9055 } \
9056 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
9057 { \
9058     uint32_t flags; \
9059     switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
9060     case 0: flags = 0x6; break; \
9061     case -1: flags = 0x8; break; \
9062     case 1: flags = 0x2; break; \
9063     default: case 2: flags = 0x3; break; \
9064     } \
9065     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
9066         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
9067 }
9068 DO_VFP_cmp(s, float32)
9069 DO_VFP_cmp(d, float64)
9070 #undef DO_VFP_cmp
9071 
9072 /* Integer to float and float to integer conversions */
9073 
9074 #define CONV_ITOF(name, fsz, sign) \
9075     float##fsz HELPER(name)(uint32_t x, void *fpstp) \
9076 { \
9077     float_status *fpst = fpstp; \
9078     return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
9079 }
9080 
9081 #define CONV_FTOI(name, fsz, sign, round) \
9082 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
9083 { \
9084     float_status *fpst = fpstp; \
9085     if (float##fsz##_is_any_nan(x)) { \
9086         float_raise(float_flag_invalid, fpst); \
9087         return 0; \
9088     } \
9089     return float##fsz##_to_##sign##int32##round(x, fpst); \
9090 }
9091 
9092 #define FLOAT_CONVS(name, p, fsz, sign) \
9093 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
9094 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
9095 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
9096 
9097 FLOAT_CONVS(si, s, 32, )
9098 FLOAT_CONVS(si, d, 64, )
9099 FLOAT_CONVS(ui, s, 32, u)
9100 FLOAT_CONVS(ui, d, 64, u)
9101 
9102 #undef CONV_ITOF
9103 #undef CONV_FTOI
9104 #undef FLOAT_CONVS
9105 
9106 /* floating point conversion */
9107 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
9108 {
9109     float64 r = float32_to_float64(x, &env->vfp.fp_status);
9110     /* ARM requires that S<->D conversion of any kind of NaN generates
9111      * a quiet NaN by forcing the most significant frac bit to 1.
9112      */
9113     return float64_maybe_silence_nan(r, &env->vfp.fp_status);
9114 }
9115 
9116 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
9117 {
9118     float32 r =  float64_to_float32(x, &env->vfp.fp_status);
9119     /* ARM requires that S<->D conversion of any kind of NaN generates
9120      * a quiet NaN by forcing the most significant frac bit to 1.
9121      */
9122     return float32_maybe_silence_nan(r, &env->vfp.fp_status);
9123 }
9124 
9125 /* VFP3 fixed point conversion.  */
9126 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
9127 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t  x, uint32_t shift, \
9128                                      void *fpstp) \
9129 { \
9130     float_status *fpst = fpstp; \
9131     float##fsz tmp; \
9132     tmp = itype##_to_##float##fsz(x, fpst); \
9133     return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
9134 }
9135 
9136 /* Notice that we want only input-denormal exception flags from the
9137  * scalbn operation: the other possible flags (overflow+inexact if
9138  * we overflow to infinity, output-denormal) aren't correct for the
9139  * complete scale-and-convert operation.
9140  */
9141 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
9142 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
9143                                              uint32_t shift, \
9144                                              void *fpstp) \
9145 { \
9146     float_status *fpst = fpstp; \
9147     int old_exc_flags = get_float_exception_flags(fpst); \
9148     float##fsz tmp; \
9149     if (float##fsz##_is_any_nan(x)) { \
9150         float_raise(float_flag_invalid, fpst); \
9151         return 0; \
9152     } \
9153     tmp = float##fsz##_scalbn(x, shift, fpst); \
9154     old_exc_flags |= get_float_exception_flags(fpst) \
9155         & float_flag_input_denormal; \
9156     set_float_exception_flags(old_exc_flags, fpst); \
9157     return float##fsz##_to_##itype##round(tmp, fpst); \
9158 }
9159 
9160 #define VFP_CONV_FIX(name, p, fsz, isz, itype)                   \
9161 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
9162 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
9163 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
9164 
9165 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype)               \
9166 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
9167 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
9168 
9169 VFP_CONV_FIX(sh, d, 64, 64, int16)
9170 VFP_CONV_FIX(sl, d, 64, 64, int32)
9171 VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
9172 VFP_CONV_FIX(uh, d, 64, 64, uint16)
9173 VFP_CONV_FIX(ul, d, 64, 64, uint32)
9174 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
9175 VFP_CONV_FIX(sh, s, 32, 32, int16)
9176 VFP_CONV_FIX(sl, s, 32, 32, int32)
9177 VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
9178 VFP_CONV_FIX(uh, s, 32, 32, uint16)
9179 VFP_CONV_FIX(ul, s, 32, 32, uint32)
9180 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
9181 #undef VFP_CONV_FIX
9182 #undef VFP_CONV_FIX_FLOAT
9183 #undef VFP_CONV_FLOAT_FIX_ROUND
9184 
9185 /* Set the current fp rounding mode and return the old one.
9186  * The argument is a softfloat float_round_ value.
9187  */
9188 uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
9189 {
9190     float_status *fp_status = &env->vfp.fp_status;
9191 
9192     uint32_t prev_rmode = get_float_rounding_mode(fp_status);
9193     set_float_rounding_mode(rmode, fp_status);
9194 
9195     return prev_rmode;
9196 }
9197 
9198 /* Set the current fp rounding mode in the standard fp status and return
9199  * the old one. This is for NEON instructions that need to change the
9200  * rounding mode but wish to use the standard FPSCR values for everything
9201  * else. Always set the rounding mode back to the correct value after
9202  * modifying it.
9203  * The argument is a softfloat float_round_ value.
9204  */
9205 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
9206 {
9207     float_status *fp_status = &env->vfp.standard_fp_status;
9208 
9209     uint32_t prev_rmode = get_float_rounding_mode(fp_status);
9210     set_float_rounding_mode(rmode, fp_status);
9211 
9212     return prev_rmode;
9213 }
9214 
9215 /* Half precision conversions.  */
9216 static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
9217 {
9218     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9219     float32 r = float16_to_float32(make_float16(a), ieee, s);
9220     if (ieee) {
9221         return float32_maybe_silence_nan(r, s);
9222     }
9223     return r;
9224 }
9225 
9226 static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
9227 {
9228     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9229     float16 r = float32_to_float16(a, ieee, s);
9230     if (ieee) {
9231         r = float16_maybe_silence_nan(r, s);
9232     }
9233     return float16_val(r);
9234 }
9235 
9236 float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
9237 {
9238     return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
9239 }
9240 
9241 uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
9242 {
9243     return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
9244 }
9245 
9246 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
9247 {
9248     return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
9249 }
9250 
9251 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
9252 {
9253     return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
9254 }
9255 
9256 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
9257 {
9258     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9259     float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
9260     if (ieee) {
9261         return float64_maybe_silence_nan(r, &env->vfp.fp_status);
9262     }
9263     return r;
9264 }
9265 
9266 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
9267 {
9268     int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9269     float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
9270     if (ieee) {
9271         r = float16_maybe_silence_nan(r, &env->vfp.fp_status);
9272     }
9273     return float16_val(r);
9274 }
9275 
9276 #define float32_two make_float32(0x40000000)
9277 #define float32_three make_float32(0x40400000)
9278 #define float32_one_point_five make_float32(0x3fc00000)
9279 
9280 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
9281 {
9282     float_status *s = &env->vfp.standard_fp_status;
9283     if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
9284         (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
9285         if (!(float32_is_zero(a) || float32_is_zero(b))) {
9286             float_raise(float_flag_input_denormal, s);
9287         }
9288         return float32_two;
9289     }
9290     return float32_sub(float32_two, float32_mul(a, b, s), s);
9291 }
9292 
9293 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
9294 {
9295     float_status *s = &env->vfp.standard_fp_status;
9296     float32 product;
9297     if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
9298         (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
9299         if (!(float32_is_zero(a) || float32_is_zero(b))) {
9300             float_raise(float_flag_input_denormal, s);
9301         }
9302         return float32_one_point_five;
9303     }
9304     product = float32_mul(a, b, s);
9305     return float32_div(float32_sub(float32_three, product, s), float32_two, s);
9306 }
9307 
9308 /* NEON helpers.  */
9309 
9310 /* Constants 256 and 512 are used in some helpers; we avoid relying on
9311  * int->float conversions at run-time.  */
9312 #define float64_256 make_float64(0x4070000000000000LL)
9313 #define float64_512 make_float64(0x4080000000000000LL)
9314 #define float32_maxnorm make_float32(0x7f7fffff)
9315 #define float64_maxnorm make_float64(0x7fefffffffffffffLL)
9316 
9317 /* Reciprocal functions
9318  *
9319  * The algorithm that must be used to calculate the estimate
9320  * is specified by the ARM ARM, see FPRecipEstimate()
9321  */
9322 
9323 static float64 recip_estimate(float64 a, float_status *real_fp_status)
9324 {
9325     /* These calculations mustn't set any fp exception flags,
9326      * so we use a local copy of the fp_status.
9327      */
9328     float_status dummy_status = *real_fp_status;
9329     float_status *s = &dummy_status;
9330     /* q = (int)(a * 512.0) */
9331     float64 q = float64_mul(float64_512, a, s);
9332     int64_t q_int = float64_to_int64_round_to_zero(q, s);
9333 
9334     /* r = 1.0 / (((double)q + 0.5) / 512.0) */
9335     q = int64_to_float64(q_int, s);
9336     q = float64_add(q, float64_half, s);
9337     q = float64_div(q, float64_512, s);
9338     q = float64_div(float64_one, q, s);
9339 
9340     /* s = (int)(256.0 * r + 0.5) */
9341     q = float64_mul(q, float64_256, s);
9342     q = float64_add(q, float64_half, s);
9343     q_int = float64_to_int64_round_to_zero(q, s);
9344 
9345     /* return (double)s / 256.0 */
9346     return float64_div(int64_to_float64(q_int, s), float64_256, s);
9347 }
9348 
9349 /* Common wrapper to call recip_estimate */
9350 static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
9351 {
9352     uint64_t val64 = float64_val(num);
9353     uint64_t frac = extract64(val64, 0, 52);
9354     int64_t exp = extract64(val64, 52, 11);
9355     uint64_t sbit;
9356     float64 scaled, estimate;
9357 
9358     /* Generate the scaled number for the estimate function */
9359     if (exp == 0) {
9360         if (extract64(frac, 51, 1) == 0) {
9361             exp = -1;
9362             frac = extract64(frac, 0, 50) << 2;
9363         } else {
9364             frac = extract64(frac, 0, 51) << 1;
9365         }
9366     }
9367 
9368     /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
9369     scaled = make_float64((0x3feULL << 52)
9370                           | extract64(frac, 44, 8) << 44);
9371 
9372     estimate = recip_estimate(scaled, fpst);
9373 
9374     /* Build new result */
9375     val64 = float64_val(estimate);
9376     sbit = 0x8000000000000000ULL & val64;
9377     exp = off - exp;
9378     frac = extract64(val64, 0, 52);
9379 
9380     if (exp == 0) {
9381         frac = 1ULL << 51 | extract64(frac, 1, 51);
9382     } else if (exp == -1) {
9383         frac = 1ULL << 50 | extract64(frac, 2, 50);
9384         exp = 0;
9385     }
9386 
9387     return make_float64(sbit | (exp << 52) | frac);
9388 }
9389 
9390 static bool round_to_inf(float_status *fpst, bool sign_bit)
9391 {
9392     switch (fpst->float_rounding_mode) {
9393     case float_round_nearest_even: /* Round to Nearest */
9394         return true;
9395     case float_round_up: /* Round to +Inf */
9396         return !sign_bit;
9397     case float_round_down: /* Round to -Inf */
9398         return sign_bit;
9399     case float_round_to_zero: /* Round to Zero */
9400         return false;
9401     }
9402 
9403     g_assert_not_reached();
9404 }
9405 
9406 float32 HELPER(recpe_f32)(float32 input, void *fpstp)
9407 {
9408     float_status *fpst = fpstp;
9409     float32 f32 = float32_squash_input_denormal(input, fpst);
9410     uint32_t f32_val = float32_val(f32);
9411     uint32_t f32_sbit = 0x80000000ULL & f32_val;
9412     int32_t f32_exp = extract32(f32_val, 23, 8);
9413     uint32_t f32_frac = extract32(f32_val, 0, 23);
9414     float64 f64, r64;
9415     uint64_t r64_val;
9416     int64_t r64_exp;
9417     uint64_t r64_frac;
9418 
9419     if (float32_is_any_nan(f32)) {
9420         float32 nan = f32;
9421         if (float32_is_signaling_nan(f32, fpst)) {
9422             float_raise(float_flag_invalid, fpst);
9423             nan = float32_maybe_silence_nan(f32, fpst);
9424         }
9425         if (fpst->default_nan_mode) {
9426             nan =  float32_default_nan(fpst);
9427         }
9428         return nan;
9429     } else if (float32_is_infinity(f32)) {
9430         return float32_set_sign(float32_zero, float32_is_neg(f32));
9431     } else if (float32_is_zero(f32)) {
9432         float_raise(float_flag_divbyzero, fpst);
9433         return float32_set_sign(float32_infinity, float32_is_neg(f32));
9434     } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
9435         /* Abs(value) < 2.0^-128 */
9436         float_raise(float_flag_overflow | float_flag_inexact, fpst);
9437         if (round_to_inf(fpst, f32_sbit)) {
9438             return float32_set_sign(float32_infinity, float32_is_neg(f32));
9439         } else {
9440             return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
9441         }
9442     } else if (f32_exp >= 253 && fpst->flush_to_zero) {
9443         float_raise(float_flag_underflow, fpst);
9444         return float32_set_sign(float32_zero, float32_is_neg(f32));
9445     }
9446 
9447 
9448     f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
9449     r64 = call_recip_estimate(f64, 253, fpst);
9450     r64_val = float64_val(r64);
9451     r64_exp = extract64(r64_val, 52, 11);
9452     r64_frac = extract64(r64_val, 0, 52);
9453 
9454     /* result = sign : result_exp<7:0> : fraction<51:29>; */
9455     return make_float32(f32_sbit |
9456                         (r64_exp & 0xff) << 23 |
9457                         extract64(r64_frac, 29, 24));
9458 }
9459 
9460 float64 HELPER(recpe_f64)(float64 input, void *fpstp)
9461 {
9462     float_status *fpst = fpstp;
9463     float64 f64 = float64_squash_input_denormal(input, fpst);
9464     uint64_t f64_val = float64_val(f64);
9465     uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
9466     int64_t f64_exp = extract64(f64_val, 52, 11);
9467     float64 r64;
9468     uint64_t r64_val;
9469     int64_t r64_exp;
9470     uint64_t r64_frac;
9471 
9472     /* Deal with any special cases */
9473     if (float64_is_any_nan(f64)) {
9474         float64 nan = f64;
9475         if (float64_is_signaling_nan(f64, fpst)) {
9476             float_raise(float_flag_invalid, fpst);
9477             nan = float64_maybe_silence_nan(f64, fpst);
9478         }
9479         if (fpst->default_nan_mode) {
9480             nan =  float64_default_nan(fpst);
9481         }
9482         return nan;
9483     } else if (float64_is_infinity(f64)) {
9484         return float64_set_sign(float64_zero, float64_is_neg(f64));
9485     } else if (float64_is_zero(f64)) {
9486         float_raise(float_flag_divbyzero, fpst);
9487         return float64_set_sign(float64_infinity, float64_is_neg(f64));
9488     } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
9489         /* Abs(value) < 2.0^-1024 */
9490         float_raise(float_flag_overflow | float_flag_inexact, fpst);
9491         if (round_to_inf(fpst, f64_sbit)) {
9492             return float64_set_sign(float64_infinity, float64_is_neg(f64));
9493         } else {
9494             return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
9495         }
9496     } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
9497         float_raise(float_flag_underflow, fpst);
9498         return float64_set_sign(float64_zero, float64_is_neg(f64));
9499     }
9500 
9501     r64 = call_recip_estimate(f64, 2045, fpst);
9502     r64_val = float64_val(r64);
9503     r64_exp = extract64(r64_val, 52, 11);
9504     r64_frac = extract64(r64_val, 0, 52);
9505 
9506     /* result = sign : result_exp<10:0> : fraction<51:0> */
9507     return make_float64(f64_sbit |
9508                         ((r64_exp & 0x7ff) << 52) |
9509                         r64_frac);
9510 }
9511 
9512 /* The algorithm that must be used to calculate the estimate
9513  * is specified by the ARM ARM.
9514  */
9515 static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
9516 {
9517     /* These calculations mustn't set any fp exception flags,
9518      * so we use a local copy of the fp_status.
9519      */
9520     float_status dummy_status = *real_fp_status;
9521     float_status *s = &dummy_status;
9522     float64 q;
9523     int64_t q_int;
9524 
9525     if (float64_lt(a, float64_half, s)) {
9526         /* range 0.25 <= a < 0.5 */
9527 
9528         /* a in units of 1/512 rounded down */
9529         /* q0 = (int)(a * 512.0);  */
9530         q = float64_mul(float64_512, a, s);
9531         q_int = float64_to_int64_round_to_zero(q, s);
9532 
9533         /* reciprocal root r */
9534         /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
9535         q = int64_to_float64(q_int, s);
9536         q = float64_add(q, float64_half, s);
9537         q = float64_div(q, float64_512, s);
9538         q = float64_sqrt(q, s);
9539         q = float64_div(float64_one, q, s);
9540     } else {
9541         /* range 0.5 <= a < 1.0 */
9542 
9543         /* a in units of 1/256 rounded down */
9544         /* q1 = (int)(a * 256.0); */
9545         q = float64_mul(float64_256, a, s);
9546         int64_t q_int = float64_to_int64_round_to_zero(q, s);
9547 
9548         /* reciprocal root r */
9549         /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
9550         q = int64_to_float64(q_int, s);
9551         q = float64_add(q, float64_half, s);
9552         q = float64_div(q, float64_256, s);
9553         q = float64_sqrt(q, s);
9554         q = float64_div(float64_one, q, s);
9555     }
9556     /* r in units of 1/256 rounded to nearest */
9557     /* s = (int)(256.0 * r + 0.5); */
9558 
9559     q = float64_mul(q, float64_256,s );
9560     q = float64_add(q, float64_half, s);
9561     q_int = float64_to_int64_round_to_zero(q, s);
9562 
9563     /* return (double)s / 256.0;*/
9564     return float64_div(int64_to_float64(q_int, s), float64_256, s);
9565 }
9566 
9567 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
9568 {
9569     float_status *s = fpstp;
9570     float32 f32 = float32_squash_input_denormal(input, s);
9571     uint32_t val = float32_val(f32);
9572     uint32_t f32_sbit = 0x80000000 & val;
9573     int32_t f32_exp = extract32(val, 23, 8);
9574     uint32_t f32_frac = extract32(val, 0, 23);
9575     uint64_t f64_frac;
9576     uint64_t val64;
9577     int result_exp;
9578     float64 f64;
9579 
9580     if (float32_is_any_nan(f32)) {
9581         float32 nan = f32;
9582         if (float32_is_signaling_nan(f32, s)) {
9583             float_raise(float_flag_invalid, s);
9584             nan = float32_maybe_silence_nan(f32, s);
9585         }
9586         if (s->default_nan_mode) {
9587             nan =  float32_default_nan(s);
9588         }
9589         return nan;
9590     } else if (float32_is_zero(f32)) {
9591         float_raise(float_flag_divbyzero, s);
9592         return float32_set_sign(float32_infinity, float32_is_neg(f32));
9593     } else if (float32_is_neg(f32)) {
9594         float_raise(float_flag_invalid, s);
9595         return float32_default_nan(s);
9596     } else if (float32_is_infinity(f32)) {
9597         return float32_zero;
9598     }
9599 
9600     /* Scale and normalize to a double-precision value between 0.25 and 1.0,
9601      * preserving the parity of the exponent.  */
9602 
9603     f64_frac = ((uint64_t) f32_frac) << 29;
9604     if (f32_exp == 0) {
9605         while (extract64(f64_frac, 51, 1) == 0) {
9606             f64_frac = f64_frac << 1;
9607             f32_exp = f32_exp-1;
9608         }
9609         f64_frac = extract64(f64_frac, 0, 51) << 1;
9610     }
9611 
9612     if (extract64(f32_exp, 0, 1) == 0) {
9613         f64 = make_float64(((uint64_t) f32_sbit) << 32
9614                            | (0x3feULL << 52)
9615                            | f64_frac);
9616     } else {
9617         f64 = make_float64(((uint64_t) f32_sbit) << 32
9618                            | (0x3fdULL << 52)
9619                            | f64_frac);
9620     }
9621 
9622     result_exp = (380 - f32_exp) / 2;
9623 
9624     f64 = recip_sqrt_estimate(f64, s);
9625 
9626     val64 = float64_val(f64);
9627 
9628     val = ((result_exp & 0xff) << 23)
9629         | ((val64 >> 29)  & 0x7fffff);
9630     return make_float32(val);
9631 }
9632 
9633 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
9634 {
9635     float_status *s = fpstp;
9636     float64 f64 = float64_squash_input_denormal(input, s);
9637     uint64_t val = float64_val(f64);
9638     uint64_t f64_sbit = 0x8000000000000000ULL & val;
9639     int64_t f64_exp = extract64(val, 52, 11);
9640     uint64_t f64_frac = extract64(val, 0, 52);
9641     int64_t result_exp;
9642     uint64_t result_frac;
9643 
9644     if (float64_is_any_nan(f64)) {
9645         float64 nan = f64;
9646         if (float64_is_signaling_nan(f64, s)) {
9647             float_raise(float_flag_invalid, s);
9648             nan = float64_maybe_silence_nan(f64, s);
9649         }
9650         if (s->default_nan_mode) {
9651             nan =  float64_default_nan(s);
9652         }
9653         return nan;
9654     } else if (float64_is_zero(f64)) {
9655         float_raise(float_flag_divbyzero, s);
9656         return float64_set_sign(float64_infinity, float64_is_neg(f64));
9657     } else if (float64_is_neg(f64)) {
9658         float_raise(float_flag_invalid, s);
9659         return float64_default_nan(s);
9660     } else if (float64_is_infinity(f64)) {
9661         return float64_zero;
9662     }
9663 
9664     /* Scale and normalize to a double-precision value between 0.25 and 1.0,
9665      * preserving the parity of the exponent.  */
9666 
9667     if (f64_exp == 0) {
9668         while (extract64(f64_frac, 51, 1) == 0) {
9669             f64_frac = f64_frac << 1;
9670             f64_exp = f64_exp - 1;
9671         }
9672         f64_frac = extract64(f64_frac, 0, 51) << 1;
9673     }
9674 
9675     if (extract64(f64_exp, 0, 1) == 0) {
9676         f64 = make_float64(f64_sbit
9677                            | (0x3feULL << 52)
9678                            | f64_frac);
9679     } else {
9680         f64 = make_float64(f64_sbit
9681                            | (0x3fdULL << 52)
9682                            | f64_frac);
9683     }
9684 
9685     result_exp = (3068 - f64_exp) / 2;
9686 
9687     f64 = recip_sqrt_estimate(f64, s);
9688 
9689     result_frac = extract64(float64_val(f64), 0, 52);
9690 
9691     return make_float64(f64_sbit |
9692                         ((result_exp & 0x7ff) << 52) |
9693                         result_frac);
9694 }
9695 
9696 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
9697 {
9698     float_status *s = fpstp;
9699     float64 f64;
9700 
9701     if ((a & 0x80000000) == 0) {
9702         return 0xffffffff;
9703     }
9704 
9705     f64 = make_float64((0x3feULL << 52)
9706                        | ((int64_t)(a & 0x7fffffff) << 21));
9707 
9708     f64 = recip_estimate(f64, s);
9709 
9710     return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
9711 }
9712 
9713 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
9714 {
9715     float_status *fpst = fpstp;
9716     float64 f64;
9717 
9718     if ((a & 0xc0000000) == 0) {
9719         return 0xffffffff;
9720     }
9721 
9722     if (a & 0x80000000) {
9723         f64 = make_float64((0x3feULL << 52)
9724                            | ((uint64_t)(a & 0x7fffffff) << 21));
9725     } else { /* bits 31-30 == '01' */
9726         f64 = make_float64((0x3fdULL << 52)
9727                            | ((uint64_t)(a & 0x3fffffff) << 22));
9728     }
9729 
9730     f64 = recip_sqrt_estimate(f64, fpst);
9731 
9732     return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
9733 }
9734 
9735 /* VFPv4 fused multiply-accumulate */
9736 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
9737 {
9738     float_status *fpst = fpstp;
9739     return float32_muladd(a, b, c, 0, fpst);
9740 }
9741 
9742 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
9743 {
9744     float_status *fpst = fpstp;
9745     return float64_muladd(a, b, c, 0, fpst);
9746 }
9747 
9748 /* ARMv8 round to integral */
9749 float32 HELPER(rints_exact)(float32 x, void *fp_status)
9750 {
9751     return float32_round_to_int(x, fp_status);
9752 }
9753 
9754 float64 HELPER(rintd_exact)(float64 x, void *fp_status)
9755 {
9756     return float64_round_to_int(x, fp_status);
9757 }
9758 
9759 float32 HELPER(rints)(float32 x, void *fp_status)
9760 {
9761     int old_flags = get_float_exception_flags(fp_status), new_flags;
9762     float32 ret;
9763 
9764     ret = float32_round_to_int(x, fp_status);
9765 
9766     /* Suppress any inexact exceptions the conversion produced */
9767     if (!(old_flags & float_flag_inexact)) {
9768         new_flags = get_float_exception_flags(fp_status);
9769         set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
9770     }
9771 
9772     return ret;
9773 }
9774 
9775 float64 HELPER(rintd)(float64 x, void *fp_status)
9776 {
9777     int old_flags = get_float_exception_flags(fp_status), new_flags;
9778     float64 ret;
9779 
9780     ret = float64_round_to_int(x, fp_status);
9781 
9782     new_flags = get_float_exception_flags(fp_status);
9783 
9784     /* Suppress any inexact exceptions the conversion produced */
9785     if (!(old_flags & float_flag_inexact)) {
9786         new_flags = get_float_exception_flags(fp_status);
9787         set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
9788     }
9789 
9790     return ret;
9791 }
9792 
9793 /* Convert ARM rounding mode to softfloat */
9794 int arm_rmode_to_sf(int rmode)
9795 {
9796     switch (rmode) {
9797     case FPROUNDING_TIEAWAY:
9798         rmode = float_round_ties_away;
9799         break;
9800     case FPROUNDING_ODD:
9801         /* FIXME: add support for TIEAWAY and ODD */
9802         qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
9803                       rmode);
9804     case FPROUNDING_TIEEVEN:
9805     default:
9806         rmode = float_round_nearest_even;
9807         break;
9808     case FPROUNDING_POSINF:
9809         rmode = float_round_up;
9810         break;
9811     case FPROUNDING_NEGINF:
9812         rmode = float_round_down;
9813         break;
9814     case FPROUNDING_ZERO:
9815         rmode = float_round_to_zero;
9816         break;
9817     }
9818     return rmode;
9819 }
9820 
9821 /* CRC helpers.
9822  * The upper bytes of val (above the number specified by 'bytes') must have
9823  * been zeroed out by the caller.
9824  */
9825 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
9826 {
9827     uint8_t buf[4];
9828 
9829     stl_le_p(buf, val);
9830 
9831     /* zlib crc32 converts the accumulator and output to one's complement.  */
9832     return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
9833 }
9834 
9835 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
9836 {
9837     uint8_t buf[4];
9838 
9839     stl_le_p(buf, val);
9840 
9841     /* Linux crc32c converts the output to one's complement.  */
9842     return crc32c(acc, buf, bytes) ^ 0xffffffff;
9843 }
9844