xref: /openbmc/qemu/target/arm/helper.c (revision 5093bee0)
1  /*
2   * ARM generic helpers.
3   *
4   * This code is licensed under the GNU GPL v2 or later.
5   *
6   * SPDX-License-Identifier: GPL-2.0-or-later
7   */
8  
9  #include "qemu/osdep.h"
10  #include "qemu/log.h"
11  #include "trace.h"
12  #include "cpu.h"
13  #include "internals.h"
14  #include "cpu-features.h"
15  #include "exec/helper-proto.h"
16  #include "qemu/main-loop.h"
17  #include "qemu/timer.h"
18  #include "qemu/bitops.h"
19  #include "qemu/crc32c.h"
20  #include "qemu/qemu-print.h"
21  #include "exec/exec-all.h"
22  #include <zlib.h> /* For crc32 */
23  #include "hw/irq.h"
24  #include "sysemu/cpu-timers.h"
25  #include "sysemu/kvm.h"
26  #include "sysemu/tcg.h"
27  #include "qapi/error.h"
28  #include "qemu/guest-random.h"
29  #ifdef CONFIG_TCG
30  #include "semihosting/common-semi.h"
31  #endif
32  #include "cpregs.h"
33  #include "target/arm/gtimer.h"
34  
35  #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
36  
37  static void switch_mode(CPUARMState *env, int mode);
38  
39  static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
40  {
41      assert(ri->fieldoffset);
42      if (cpreg_field_is_64bit(ri)) {
43          return CPREG_FIELD64(env, ri);
44      } else {
45          return CPREG_FIELD32(env, ri);
46      }
47  }
48  
49  void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
50  {
51      assert(ri->fieldoffset);
52      if (cpreg_field_is_64bit(ri)) {
53          CPREG_FIELD64(env, ri) = value;
54      } else {
55          CPREG_FIELD32(env, ri) = value;
56      }
57  }
58  
59  static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
60  {
61      return (char *)env + ri->fieldoffset;
62  }
63  
64  uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
65  {
66      /* Raw read of a coprocessor register (as needed for migration, etc). */
67      if (ri->type & ARM_CP_CONST) {
68          return ri->resetvalue;
69      } else if (ri->raw_readfn) {
70          return ri->raw_readfn(env, ri);
71      } else if (ri->readfn) {
72          return ri->readfn(env, ri);
73      } else {
74          return raw_read(env, ri);
75      }
76  }
77  
78  static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
79                               uint64_t v)
80  {
81      /*
82       * Raw write of a coprocessor register (as needed for migration, etc).
83       * Note that constant registers are treated as write-ignored; the
84       * caller should check for success by whether a readback gives the
85       * value written.
86       */
87      if (ri->type & ARM_CP_CONST) {
88          return;
89      } else if (ri->raw_writefn) {
90          ri->raw_writefn(env, ri, v);
91      } else if (ri->writefn) {
92          ri->writefn(env, ri, v);
93      } else {
94          raw_write(env, ri, v);
95      }
96  }
97  
98  static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
99  {
100     /*
101      * Return true if the regdef would cause an assertion if you called
102      * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
103      * program bug for it not to have the NO_RAW flag).
104      * NB that returning false here doesn't necessarily mean that calling
105      * read/write_raw_cp_reg() is safe, because we can't distinguish "has
106      * read/write access functions which are safe for raw use" from "has
107      * read/write access functions which have side effects but has forgotten
108      * to provide raw access functions".
109      * The tests here line up with the conditions in read/write_raw_cp_reg()
110      * and assertions in raw_read()/raw_write().
111      */
112      if ((ri->type & ARM_CP_CONST) ||
113          ri->fieldoffset ||
114          ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
115          return false;
116      }
117      return true;
118  }
119  
120  bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
121  {
122      /* Write the coprocessor state from cpu->env to the (index,value) list. */
123      int i;
124      bool ok = true;
125  
126      for (i = 0; i < cpu->cpreg_array_len; i++) {
127          uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
128          const ARMCPRegInfo *ri;
129          uint64_t newval;
130  
131          ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
132          if (!ri) {
133              ok = false;
134              continue;
135          }
136          if (ri->type & ARM_CP_NO_RAW) {
137              continue;
138          }
139  
140          newval = read_raw_cp_reg(&cpu->env, ri);
141          if (kvm_sync) {
142              /*
143               * Only sync if the previous list->cpustate sync succeeded.
144               * Rather than tracking the success/failure state for every
145               * item in the list, we just recheck "does the raw write we must
146               * have made in write_list_to_cpustate() read back OK" here.
147               */
148              uint64_t oldval = cpu->cpreg_values[i];
149  
150              if (oldval == newval) {
151                  continue;
152              }
153  
154              write_raw_cp_reg(&cpu->env, ri, oldval);
155              if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
156                  continue;
157              }
158  
159              write_raw_cp_reg(&cpu->env, ri, newval);
160          }
161          cpu->cpreg_values[i] = newval;
162      }
163      return ok;
164  }
165  
166  bool write_list_to_cpustate(ARMCPU *cpu)
167  {
168      int i;
169      bool ok = true;
170  
171      for (i = 0; i < cpu->cpreg_array_len; i++) {
172          uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
173          uint64_t v = cpu->cpreg_values[i];
174          const ARMCPRegInfo *ri;
175  
176          ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
177          if (!ri) {
178              ok = false;
179              continue;
180          }
181          if (ri->type & ARM_CP_NO_RAW) {
182              continue;
183          }
184          /*
185           * Write value and confirm it reads back as written
186           * (to catch read-only registers and partially read-only
187           * registers where the incoming migration value doesn't match)
188           */
189          write_raw_cp_reg(&cpu->env, ri, v);
190          if (read_raw_cp_reg(&cpu->env, ri) != v) {
191              ok = false;
192          }
193      }
194      return ok;
195  }
196  
197  static void add_cpreg_to_list(gpointer key, gpointer opaque)
198  {
199      ARMCPU *cpu = opaque;
200      uint32_t regidx = (uintptr_t)key;
201      const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
202  
203      if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
204          cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
205          /* The value array need not be initialized at this point */
206          cpu->cpreg_array_len++;
207      }
208  }
209  
210  static void count_cpreg(gpointer key, gpointer opaque)
211  {
212      ARMCPU *cpu = opaque;
213      const ARMCPRegInfo *ri;
214  
215      ri = g_hash_table_lookup(cpu->cp_regs, key);
216  
217      if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
218          cpu->cpreg_array_len++;
219      }
220  }
221  
222  static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
223  {
224      uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
225      uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
226  
227      if (aidx > bidx) {
228          return 1;
229      }
230      if (aidx < bidx) {
231          return -1;
232      }
233      return 0;
234  }
235  
236  void init_cpreg_list(ARMCPU *cpu)
237  {
238      /*
239       * Initialise the cpreg_tuples[] array based on the cp_regs hash.
240       * Note that we require cpreg_tuples[] to be sorted by key ID.
241       */
242      GList *keys;
243      int arraylen;
244  
245      keys = g_hash_table_get_keys(cpu->cp_regs);
246      keys = g_list_sort(keys, cpreg_key_compare);
247  
248      cpu->cpreg_array_len = 0;
249  
250      g_list_foreach(keys, count_cpreg, cpu);
251  
252      arraylen = cpu->cpreg_array_len;
253      cpu->cpreg_indexes = g_new(uint64_t, arraylen);
254      cpu->cpreg_values = g_new(uint64_t, arraylen);
255      cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
256      cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
257      cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
258      cpu->cpreg_array_len = 0;
259  
260      g_list_foreach(keys, add_cpreg_to_list, cpu);
261  
262      assert(cpu->cpreg_array_len == arraylen);
263  
264      g_list_free(keys);
265  }
266  
267  static bool arm_pan_enabled(CPUARMState *env)
268  {
269      if (is_a64(env)) {
270          if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
271              return false;
272          }
273          return env->pstate & PSTATE_PAN;
274      } else {
275          return env->uncached_cpsr & CPSR_PAN;
276      }
277  }
278  
279  /*
280   * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
281   */
282  static CPAccessResult access_el3_aa32ns(CPUARMState *env,
283                                          const ARMCPRegInfo *ri,
284                                          bool isread)
285  {
286      if (!is_a64(env) && arm_current_el(env) == 3 &&
287          arm_is_secure_below_el3(env)) {
288          return CP_ACCESS_TRAP_UNCATEGORIZED;
289      }
290      return CP_ACCESS_OK;
291  }
292  
293  /*
294   * Some secure-only AArch32 registers trap to EL3 if used from
295   * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
296   * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
297   * We assume that the .access field is set to PL1_RW.
298   */
299  static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
300                                              const ARMCPRegInfo *ri,
301                                              bool isread)
302  {
303      if (arm_current_el(env) == 3) {
304          return CP_ACCESS_OK;
305      }
306      if (arm_is_secure_below_el3(env)) {
307          if (env->cp15.scr_el3 & SCR_EEL2) {
308              return CP_ACCESS_TRAP_EL2;
309          }
310          return CP_ACCESS_TRAP_EL3;
311      }
312      /* This will be EL1 NS and EL2 NS, which just UNDEF */
313      return CP_ACCESS_TRAP_UNCATEGORIZED;
314  }
315  
316  /*
317   * Check for traps to performance monitor registers, which are controlled
318   * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
319   */
320  static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
321                                   bool isread)
322  {
323      int el = arm_current_el(env);
324      uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
325  
326      if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
327          return CP_ACCESS_TRAP_EL2;
328      }
329      if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
330          return CP_ACCESS_TRAP_EL3;
331      }
332      return CP_ACCESS_OK;
333  }
334  
335  /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
336  CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
337                                 bool isread)
338  {
339      if (arm_current_el(env) == 1) {
340          uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
341          if (arm_hcr_el2_eff(env) & trap) {
342              return CP_ACCESS_TRAP_EL2;
343          }
344      }
345      return CP_ACCESS_OK;
346  }
347  
348  /* Check for traps from EL1 due to HCR_EL2.TSW.  */
349  static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
350                                   bool isread)
351  {
352      if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
353          return CP_ACCESS_TRAP_EL2;
354      }
355      return CP_ACCESS_OK;
356  }
357  
358  /* Check for traps from EL1 due to HCR_EL2.TACR.  */
359  static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
360                                    bool isread)
361  {
362      if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
363          return CP_ACCESS_TRAP_EL2;
364      }
365      return CP_ACCESS_OK;
366  }
367  
368  /* Check for traps from EL1 due to HCR_EL2.TTLB. */
369  static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
370                                    bool isread)
371  {
372      if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
373          return CP_ACCESS_TRAP_EL2;
374      }
375      return CP_ACCESS_OK;
376  }
377  
378  /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */
379  static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri,
380                                      bool isread)
381  {
382      if (arm_current_el(env) == 1 &&
383          (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) {
384          return CP_ACCESS_TRAP_EL2;
385      }
386      return CP_ACCESS_OK;
387  }
388  
389  #ifdef TARGET_AARCH64
390  /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
391  static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
392                                      bool isread)
393  {
394      if (arm_current_el(env) == 1 &&
395          (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) {
396          return CP_ACCESS_TRAP_EL2;
397      }
398      return CP_ACCESS_OK;
399  }
400  #endif
401  
402  static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
403  {
404      ARMCPU *cpu = env_archcpu(env);
405  
406      raw_write(env, ri, value);
407      tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
408  }
409  
410  static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
411  {
412      ARMCPU *cpu = env_archcpu(env);
413  
414      if (raw_read(env, ri) != value) {
415          /*
416           * Unlike real hardware the qemu TLB uses virtual addresses,
417           * not modified virtual addresses, so this causes a TLB flush.
418           */
419          tlb_flush(CPU(cpu));
420          raw_write(env, ri, value);
421      }
422  }
423  
424  static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
425                               uint64_t value)
426  {
427      ARMCPU *cpu = env_archcpu(env);
428  
429      if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
430          && !extended_addresses_enabled(env)) {
431          /*
432           * For VMSA (when not using the LPAE long descriptor page table
433           * format) this register includes the ASID, so do a TLB flush.
434           * For PMSA it is purely a process ID and no action is needed.
435           */
436          tlb_flush(CPU(cpu));
437      }
438      raw_write(env, ri, value);
439  }
440  
441  static int alle1_tlbmask(CPUARMState *env)
442  {
443      /*
444       * Note that the 'ALL' scope must invalidate both stage 1 and
445       * stage 2 translations, whereas most other scopes only invalidate
446       * stage 1 translations.
447       */
448      return (ARMMMUIdxBit_E10_1 |
449              ARMMMUIdxBit_E10_1_PAN |
450              ARMMMUIdxBit_E10_0 |
451              ARMMMUIdxBit_Stage2 |
452              ARMMMUIdxBit_Stage2_S);
453  }
454  
455  
456  /* IS variants of TLB operations must affect all cores */
457  static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
458                               uint64_t value)
459  {
460      CPUState *cs = env_cpu(env);
461  
462      tlb_flush_all_cpus_synced(cs);
463  }
464  
465  static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
466                               uint64_t value)
467  {
468      CPUState *cs = env_cpu(env);
469  
470      tlb_flush_all_cpus_synced(cs);
471  }
472  
473  static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
474                               uint64_t value)
475  {
476      CPUState *cs = env_cpu(env);
477  
478      tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
479  }
480  
481  static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
482                               uint64_t value)
483  {
484      CPUState *cs = env_cpu(env);
485  
486      tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
487  }
488  
489  /*
490   * Non-IS variants of TLB operations are upgraded to
491   * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
492   * force broadcast of these operations.
493   */
494  static bool tlb_force_broadcast(CPUARMState *env)
495  {
496      return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
497  }
498  
499  static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
500                            uint64_t value)
501  {
502      /* Invalidate all (TLBIALL) */
503      CPUState *cs = env_cpu(env);
504  
505      if (tlb_force_broadcast(env)) {
506          tlb_flush_all_cpus_synced(cs);
507      } else {
508          tlb_flush(cs);
509      }
510  }
511  
512  static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
513                            uint64_t value)
514  {
515      /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
516      CPUState *cs = env_cpu(env);
517  
518      value &= TARGET_PAGE_MASK;
519      if (tlb_force_broadcast(env)) {
520          tlb_flush_page_all_cpus_synced(cs, value);
521      } else {
522          tlb_flush_page(cs, value);
523      }
524  }
525  
526  static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
527                             uint64_t value)
528  {
529      /* Invalidate by ASID (TLBIASID) */
530      CPUState *cs = env_cpu(env);
531  
532      if (tlb_force_broadcast(env)) {
533          tlb_flush_all_cpus_synced(cs);
534      } else {
535          tlb_flush(cs);
536      }
537  }
538  
539  static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
540                             uint64_t value)
541  {
542      /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
543      CPUState *cs = env_cpu(env);
544  
545      value &= TARGET_PAGE_MASK;
546      if (tlb_force_broadcast(env)) {
547          tlb_flush_page_all_cpus_synced(cs, value);
548      } else {
549          tlb_flush_page(cs, value);
550      }
551  }
552  
553  static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
554                                 uint64_t value)
555  {
556      CPUState *cs = env_cpu(env);
557  
558      tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
559  }
560  
561  static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
562                                    uint64_t value)
563  {
564      CPUState *cs = env_cpu(env);
565  
566      tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env));
567  }
568  
569  
570  static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
571                                uint64_t value)
572  {
573      CPUState *cs = env_cpu(env);
574  
575      tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
576  }
577  
578  static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
579                                   uint64_t value)
580  {
581      CPUState *cs = env_cpu(env);
582  
583      tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
584  }
585  
586  static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
587                                uint64_t value)
588  {
589      CPUState *cs = env_cpu(env);
590      uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
591  
592      tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
593  }
594  
595  static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
596                                   uint64_t value)
597  {
598      CPUState *cs = env_cpu(env);
599      uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
600  
601      tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
602                                               ARMMMUIdxBit_E2);
603  }
604  
605  static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
606                                  uint64_t value)
607  {
608      CPUState *cs = env_cpu(env);
609      uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
610  
611      tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2);
612  }
613  
614  static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
615                                  uint64_t value)
616  {
617      CPUState *cs = env_cpu(env);
618      uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12;
619  
620      tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2);
621  }
622  
623  static const ARMCPRegInfo cp_reginfo[] = {
624      /*
625       * Define the secure and non-secure FCSE identifier CP registers
626       * separately because there is no secure bank in V8 (no _EL3).  This allows
627       * the secure register to be properly reset and migrated. There is also no
628       * v8 EL1 version of the register so the non-secure instance stands alone.
629       */
630      { .name = "FCSEIDR",
631        .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
632        .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
633        .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
634        .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
635      { .name = "FCSEIDR_S",
636        .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
637        .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
638        .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
639        .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
640      /*
641       * Define the secure and non-secure context identifier CP registers
642       * separately because there is no secure bank in V8 (no _EL3).  This allows
643       * the secure register to be properly reset and migrated.  In the
644       * non-secure case, the 32-bit register will have reset and migration
645       * disabled during registration as it is handled by the 64-bit instance.
646       */
647      { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
648        .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
649        .access = PL1_RW, .accessfn = access_tvm_trvm,
650        .fgt = FGT_CONTEXTIDR_EL1,
651        .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
652        .secure = ARM_CP_SECSTATE_NS,
653        .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
654        .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
655      { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
656        .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
657        .access = PL1_RW, .accessfn = access_tvm_trvm,
658        .secure = ARM_CP_SECSTATE_S,
659        .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
660        .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
661  };
662  
663  static const ARMCPRegInfo not_v8_cp_reginfo[] = {
664      /*
665       * NB: Some of these registers exist in v8 but with more precise
666       * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
667       */
668      /* MMU Domain access control / MPU write buffer control */
669      { .name = "DACR",
670        .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
671        .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
672        .writefn = dacr_write, .raw_writefn = raw_write,
673        .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
674                               offsetoflow32(CPUARMState, cp15.dacr_ns) } },
675      /*
676       * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
677       * For v6 and v5, these mappings are overly broad.
678       */
679      { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
680        .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
681      { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
682        .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
683      { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
684        .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
685      { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
686        .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
687      /* Cache maintenance ops; some of this space may be overridden later. */
688      { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
689        .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
690        .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
691  };
692  
693  static const ARMCPRegInfo not_v6_cp_reginfo[] = {
694      /*
695       * Not all pre-v6 cores implemented this WFI, so this is slightly
696       * over-broad.
697       */
698      { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
699        .access = PL1_W, .type = ARM_CP_WFI },
700  };
701  
702  static const ARMCPRegInfo not_v7_cp_reginfo[] = {
703      /*
704       * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
705       * is UNPREDICTABLE; we choose to NOP as most implementations do).
706       */
707      { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
708        .access = PL1_W, .type = ARM_CP_WFI },
709      /*
710       * L1 cache lockdown. Not architectural in v6 and earlier but in practice
711       * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
712       * OMAPCP will override this space.
713       */
714      { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
715        .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
716        .resetvalue = 0 },
717      { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
718        .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
719        .resetvalue = 0 },
720      /* v6 doesn't have the cache ID registers but Linux reads them anyway */
721      { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
722        .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
723        .resetvalue = 0 },
724      /*
725       * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
726       * implementing it as RAZ means the "debug architecture version" bits
727       * will read as a reserved value, which should cause Linux to not try
728       * to use the debug hardware.
729       */
730      { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
731        .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
732      /*
733       * MMU TLB control. Note that the wildcarding means we cover not just
734       * the unified TLB ops but also the dside/iside/inner-shareable variants.
735       */
736      { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
737        .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
738        .type = ARM_CP_NO_RAW },
739      { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
740        .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
741        .type = ARM_CP_NO_RAW },
742      { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
743        .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
744        .type = ARM_CP_NO_RAW },
745      { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
746        .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
747        .type = ARM_CP_NO_RAW },
748      { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
749        .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
750      { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
751        .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
752  };
753  
754  static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
755                          uint64_t value)
756  {
757      uint32_t mask = 0;
758  
759      /* In ARMv8 most bits of CPACR_EL1 are RES0. */
760      if (!arm_feature(env, ARM_FEATURE_V8)) {
761          /*
762           * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
763           * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
764           * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
765           */
766          if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
767              /* VFP coprocessor: cp10 & cp11 [23:20] */
768              mask |= R_CPACR_ASEDIS_MASK |
769                      R_CPACR_D32DIS_MASK |
770                      R_CPACR_CP11_MASK |
771                      R_CPACR_CP10_MASK;
772  
773              if (!arm_feature(env, ARM_FEATURE_NEON)) {
774                  /* ASEDIS [31] bit is RAO/WI */
775                  value |= R_CPACR_ASEDIS_MASK;
776              }
777  
778              /*
779               * VFPv3 and upwards with NEON implement 32 double precision
780               * registers (D0-D31).
781               */
782              if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
783                  /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
784                  value |= R_CPACR_D32DIS_MASK;
785              }
786          }
787          value &= mask;
788      }
789  
790      /*
791       * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
792       * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
793       */
794      if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
795          !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
796          mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
797          value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
798      }
799  
800      env->cp15.cpacr_el1 = value;
801  }
802  
803  static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
804  {
805      /*
806       * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
807       * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
808       */
809      uint64_t value = env->cp15.cpacr_el1;
810  
811      if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
812          !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
813          value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
814      }
815      return value;
816  }
817  
818  
819  static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
820  {
821      /*
822       * Call cpacr_write() so that we reset with the correct RAO bits set
823       * for our CPU features.
824       */
825      cpacr_write(env, ri, 0);
826  }
827  
828  static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
829                                     bool isread)
830  {
831      if (arm_feature(env, ARM_FEATURE_V8)) {
832          /* Check if CPACR accesses are to be trapped to EL2 */
833          if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
834              FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
835              return CP_ACCESS_TRAP_EL2;
836          /* Check if CPACR accesses are to be trapped to EL3 */
837          } else if (arm_current_el(env) < 3 &&
838                     FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
839              return CP_ACCESS_TRAP_EL3;
840          }
841      }
842  
843      return CP_ACCESS_OK;
844  }
845  
846  static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
847                                    bool isread)
848  {
849      /* Check if CPTR accesses are set to trap to EL3 */
850      if (arm_current_el(env) == 2 &&
851          FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
852          return CP_ACCESS_TRAP_EL3;
853      }
854  
855      return CP_ACCESS_OK;
856  }
857  
858  static const ARMCPRegInfo v6_cp_reginfo[] = {
859      /* prefetch by MVA in v6, NOP in v7 */
860      { .name = "MVA_prefetch",
861        .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
862        .access = PL1_W, .type = ARM_CP_NOP },
863      /*
864       * We need to break the TB after ISB to execute self-modifying code
865       * correctly and also to take any pending interrupts immediately.
866       * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
867       */
868      { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
869        .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
870      { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
871        .access = PL0_W, .type = ARM_CP_NOP },
872      { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
873        .access = PL0_W, .type = ARM_CP_NOP },
874      { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
875        .access = PL1_RW, .accessfn = access_tvm_trvm,
876        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
877                               offsetof(CPUARMState, cp15.ifar_ns) },
878        .resetvalue = 0, },
879      /*
880       * Watchpoint Fault Address Register : should actually only be present
881       * for 1136, 1176, 11MPCore.
882       */
883      { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
884        .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
885      { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
886        .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
887        .fgt = FGT_CPACR_EL1,
888        .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
889        .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
890        .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
891  };
892  
893  typedef struct pm_event {
894      uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
895      /* If the event is supported on this CPU (used to generate PMCEID[01]) */
896      bool (*supported)(CPUARMState *);
897      /*
898       * Retrieve the current count of the underlying event. The programmed
899       * counters hold a difference from the return value from this function
900       */
901      uint64_t (*get_count)(CPUARMState *);
902      /*
903       * Return how many nanoseconds it will take (at a minimum) for count events
904       * to occur. A negative value indicates the counter will never overflow, or
905       * that the counter has otherwise arranged for the overflow bit to be set
906       * and the PMU interrupt to be raised on overflow.
907       */
908      int64_t (*ns_per_count)(uint64_t);
909  } pm_event;
910  
911  static bool event_always_supported(CPUARMState *env)
912  {
913      return true;
914  }
915  
916  static uint64_t swinc_get_count(CPUARMState *env)
917  {
918      /*
919       * SW_INCR events are written directly to the pmevcntr's by writes to
920       * PMSWINC, so there is no underlying count maintained by the PMU itself
921       */
922      return 0;
923  }
924  
925  static int64_t swinc_ns_per(uint64_t ignored)
926  {
927      return -1;
928  }
929  
930  /*
931   * Return the underlying cycle count for the PMU cycle counters. If we're in
932   * usermode, simply return 0.
933   */
934  static uint64_t cycles_get_count(CPUARMState *env)
935  {
936  #ifndef CONFIG_USER_ONLY
937      return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
938                     ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
939  #else
940      return cpu_get_host_ticks();
941  #endif
942  }
943  
944  #ifndef CONFIG_USER_ONLY
945  static int64_t cycles_ns_per(uint64_t cycles)
946  {
947      return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
948  }
949  
950  static bool instructions_supported(CPUARMState *env)
951  {
952      /* Precise instruction counting */
953      return icount_enabled() == ICOUNT_PRECISE;
954  }
955  
956  static uint64_t instructions_get_count(CPUARMState *env)
957  {
958      assert(icount_enabled() == ICOUNT_PRECISE);
959      return (uint64_t)icount_get_raw();
960  }
961  
962  static int64_t instructions_ns_per(uint64_t icount)
963  {
964      assert(icount_enabled() == ICOUNT_PRECISE);
965      return icount_to_ns((int64_t)icount);
966  }
967  #endif
968  
969  static bool pmuv3p1_events_supported(CPUARMState *env)
970  {
971      /* For events which are supported in any v8.1 PMU */
972      return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
973  }
974  
975  static bool pmuv3p4_events_supported(CPUARMState *env)
976  {
977      /* For events which are supported in any v8.1 PMU */
978      return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
979  }
980  
981  static uint64_t zero_event_get_count(CPUARMState *env)
982  {
983      /* For events which on QEMU never fire, so their count is always zero */
984      return 0;
985  }
986  
987  static int64_t zero_event_ns_per(uint64_t cycles)
988  {
989      /* An event which never fires can never overflow */
990      return -1;
991  }
992  
993  static const pm_event pm_events[] = {
994      { .number = 0x000, /* SW_INCR */
995        .supported = event_always_supported,
996        .get_count = swinc_get_count,
997        .ns_per_count = swinc_ns_per,
998      },
999  #ifndef CONFIG_USER_ONLY
1000      { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1001        .supported = instructions_supported,
1002        .get_count = instructions_get_count,
1003        .ns_per_count = instructions_ns_per,
1004      },
1005      { .number = 0x011, /* CPU_CYCLES, Cycle */
1006        .supported = event_always_supported,
1007        .get_count = cycles_get_count,
1008        .ns_per_count = cycles_ns_per,
1009      },
1010  #endif
1011      { .number = 0x023, /* STALL_FRONTEND */
1012        .supported = pmuv3p1_events_supported,
1013        .get_count = zero_event_get_count,
1014        .ns_per_count = zero_event_ns_per,
1015      },
1016      { .number = 0x024, /* STALL_BACKEND */
1017        .supported = pmuv3p1_events_supported,
1018        .get_count = zero_event_get_count,
1019        .ns_per_count = zero_event_ns_per,
1020      },
1021      { .number = 0x03c, /* STALL */
1022        .supported = pmuv3p4_events_supported,
1023        .get_count = zero_event_get_count,
1024        .ns_per_count = zero_event_ns_per,
1025      },
1026  };
1027  
1028  /*
1029   * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1030   * events (i.e. the statistical profiling extension), this implementation
1031   * should first be updated to something sparse instead of the current
1032   * supported_event_map[] array.
1033   */
1034  #define MAX_EVENT_ID 0x3c
1035  #define UNSUPPORTED_EVENT UINT16_MAX
1036  static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1037  
1038  /*
1039   * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1040   * of ARM event numbers to indices in our pm_events array.
1041   *
1042   * Note: Events in the 0x40XX range are not currently supported.
1043   */
1044  void pmu_init(ARMCPU *cpu)
1045  {
1046      unsigned int i;
1047  
1048      /*
1049       * Empty supported_event_map and cpu->pmceid[01] before adding supported
1050       * events to them
1051       */
1052      for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1053          supported_event_map[i] = UNSUPPORTED_EVENT;
1054      }
1055      cpu->pmceid0 = 0;
1056      cpu->pmceid1 = 0;
1057  
1058      for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1059          const pm_event *cnt = &pm_events[i];
1060          assert(cnt->number <= MAX_EVENT_ID);
1061          /* We do not currently support events in the 0x40xx range */
1062          assert(cnt->number <= 0x3f);
1063  
1064          if (cnt->supported(&cpu->env)) {
1065              supported_event_map[cnt->number] = i;
1066              uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1067              if (cnt->number & 0x20) {
1068                  cpu->pmceid1 |= event_mask;
1069              } else {
1070                  cpu->pmceid0 |= event_mask;
1071              }
1072          }
1073      }
1074  }
1075  
1076  /*
1077   * Check at runtime whether a PMU event is supported for the current machine
1078   */
1079  static bool event_supported(uint16_t number)
1080  {
1081      if (number > MAX_EVENT_ID) {
1082          return false;
1083      }
1084      return supported_event_map[number] != UNSUPPORTED_EVENT;
1085  }
1086  
1087  static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1088                                     bool isread)
1089  {
1090      /*
1091       * Performance monitor registers user accessibility is controlled
1092       * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1093       * trapping to EL2 or EL3 for other accesses.
1094       */
1095      int el = arm_current_el(env);
1096      uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
1097  
1098      if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1099          return CP_ACCESS_TRAP;
1100      }
1101      if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
1102          return CP_ACCESS_TRAP_EL2;
1103      }
1104      if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1105          return CP_ACCESS_TRAP_EL3;
1106      }
1107  
1108      return CP_ACCESS_OK;
1109  }
1110  
1111  static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1112                                             const ARMCPRegInfo *ri,
1113                                             bool isread)
1114  {
1115      /* ER: event counter read trap control */
1116      if (arm_feature(env, ARM_FEATURE_V8)
1117          && arm_current_el(env) == 0
1118          && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1119          && isread) {
1120          return CP_ACCESS_OK;
1121      }
1122  
1123      return pmreg_access(env, ri, isread);
1124  }
1125  
1126  static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1127                                           const ARMCPRegInfo *ri,
1128                                           bool isread)
1129  {
1130      /* SW: software increment write trap control */
1131      if (arm_feature(env, ARM_FEATURE_V8)
1132          && arm_current_el(env) == 0
1133          && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1134          && !isread) {
1135          return CP_ACCESS_OK;
1136      }
1137  
1138      return pmreg_access(env, ri, isread);
1139  }
1140  
1141  static CPAccessResult pmreg_access_selr(CPUARMState *env,
1142                                          const ARMCPRegInfo *ri,
1143                                          bool isread)
1144  {
1145      /* ER: event counter read trap control */
1146      if (arm_feature(env, ARM_FEATURE_V8)
1147          && arm_current_el(env) == 0
1148          && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1149          return CP_ACCESS_OK;
1150      }
1151  
1152      return pmreg_access(env, ri, isread);
1153  }
1154  
1155  static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1156                                           const ARMCPRegInfo *ri,
1157                                           bool isread)
1158  {
1159      /* CR: cycle counter read trap control */
1160      if (arm_feature(env, ARM_FEATURE_V8)
1161          && arm_current_el(env) == 0
1162          && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1163          && isread) {
1164          return CP_ACCESS_OK;
1165      }
1166  
1167      return pmreg_access(env, ri, isread);
1168  }
1169  
1170  /*
1171   * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
1172   * We use these to decide whether we need to wrap a write to MDCR_EL2
1173   * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
1174   */
1175  #define MDCR_EL2_PMU_ENABLE_BITS \
1176      (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
1177  #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
1178  
1179  /*
1180   * Returns true if the counter (pass 31 for PMCCNTR) should count events using
1181   * the current EL, security state, and register configuration.
1182   */
1183  static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1184  {
1185      uint64_t filter;
1186      bool e, p, u, nsk, nsu, nsh, m;
1187      bool enabled, prohibited = false, filtered;
1188      bool secure = arm_is_secure(env);
1189      int el = arm_current_el(env);
1190      uint64_t mdcr_el2;
1191      uint8_t hpmn;
1192  
1193      /*
1194       * We might be called for M-profile cores where MDCR_EL2 doesn't
1195       * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
1196       * must be before we read that value.
1197       */
1198      if (!arm_feature(env, ARM_FEATURE_PMU)) {
1199          return false;
1200      }
1201  
1202      mdcr_el2 = arm_mdcr_el2_eff(env);
1203      hpmn = mdcr_el2 & MDCR_HPMN;
1204  
1205      if (!arm_feature(env, ARM_FEATURE_EL2) ||
1206              (counter < hpmn || counter == 31)) {
1207          e = env->cp15.c9_pmcr & PMCRE;
1208      } else {
1209          e = mdcr_el2 & MDCR_HPME;
1210      }
1211      enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1212  
1213      /* Is event counting prohibited? */
1214      if (el == 2 && (counter < hpmn || counter == 31)) {
1215          prohibited = mdcr_el2 & MDCR_HPMD;
1216      }
1217      if (secure) {
1218          prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
1219      }
1220  
1221      if (counter == 31) {
1222          /*
1223           * The cycle counter defaults to running. PMCR.DP says "disable
1224           * the cycle counter when event counting is prohibited".
1225           * Some MDCR bits disable the cycle counter specifically.
1226           */
1227          prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
1228          if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1229              if (secure) {
1230                  prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
1231              }
1232              if (el == 2) {
1233                  prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
1234              }
1235          }
1236      }
1237  
1238      if (counter == 31) {
1239          filter = env->cp15.pmccfiltr_el0;
1240      } else {
1241          filter = env->cp15.c14_pmevtyper[counter];
1242      }
1243  
1244      p   = filter & PMXEVTYPER_P;
1245      u   = filter & PMXEVTYPER_U;
1246      nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1247      nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1248      nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1249      m   = arm_el_is_aa64(env, 1) &&
1250                arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1251  
1252      if (el == 0) {
1253          filtered = secure ? u : u != nsu;
1254      } else if (el == 1) {
1255          filtered = secure ? p : p != nsk;
1256      } else if (el == 2) {
1257          filtered = !nsh;
1258      } else { /* EL3 */
1259          filtered = m != p;
1260      }
1261  
1262      if (counter != 31) {
1263          /*
1264           * If not checking PMCCNTR, ensure the counter is setup to an event we
1265           * support
1266           */
1267          uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1268          if (!event_supported(event)) {
1269              return false;
1270          }
1271      }
1272  
1273      return enabled && !prohibited && !filtered;
1274  }
1275  
1276  static void pmu_update_irq(CPUARMState *env)
1277  {
1278      ARMCPU *cpu = env_archcpu(env);
1279      qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1280              (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1281  }
1282  
1283  static bool pmccntr_clockdiv_enabled(CPUARMState *env)
1284  {
1285      /*
1286       * Return true if the clock divider is enabled and the cycle counter
1287       * is supposed to tick only once every 64 clock cycles. This is
1288       * controlled by PMCR.D, but if PMCR.LC is set to enable the long
1289       * (64-bit) cycle counter PMCR.D has no effect.
1290       */
1291      return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
1292  }
1293  
1294  static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
1295  {
1296      /* Return true if the specified event counter is configured to be 64 bit */
1297  
1298      /* This isn't intended to be used with the cycle counter */
1299      assert(counter < 31);
1300  
1301      if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1302          return false;
1303      }
1304  
1305      if (arm_feature(env, ARM_FEATURE_EL2)) {
1306          /*
1307           * MDCR_EL2.HLP still applies even when EL2 is disabled in the
1308           * current security state, so we don't use arm_mdcr_el2_eff() here.
1309           */
1310          bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
1311          int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1312  
1313          if (counter >= hpmn) {
1314              return hlp;
1315          }
1316      }
1317      return env->cp15.c9_pmcr & PMCRLP;
1318  }
1319  
1320  /*
1321   * Ensure c15_ccnt is the guest-visible count so that operations such as
1322   * enabling/disabling the counter or filtering, modifying the count itself,
1323   * etc. can be done logically. This is essentially a no-op if the counter is
1324   * not enabled at the time of the call.
1325   */
1326  static void pmccntr_op_start(CPUARMState *env)
1327  {
1328      uint64_t cycles = cycles_get_count(env);
1329  
1330      if (pmu_counter_enabled(env, 31)) {
1331          uint64_t eff_cycles = cycles;
1332          if (pmccntr_clockdiv_enabled(env)) {
1333              eff_cycles /= 64;
1334          }
1335  
1336          uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1337  
1338          uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1339                                   1ull << 63 : 1ull << 31;
1340          if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1341              env->cp15.c9_pmovsr |= (1ULL << 31);
1342              pmu_update_irq(env);
1343          }
1344  
1345          env->cp15.c15_ccnt = new_pmccntr;
1346      }
1347      env->cp15.c15_ccnt_delta = cycles;
1348  }
1349  
1350  /*
1351   * If PMCCNTR is enabled, recalculate the delta between the clock and the
1352   * guest-visible count. A call to pmccntr_op_finish should follow every call to
1353   * pmccntr_op_start.
1354   */
1355  static void pmccntr_op_finish(CPUARMState *env)
1356  {
1357      if (pmu_counter_enabled(env, 31)) {
1358  #ifndef CONFIG_USER_ONLY
1359          /* Calculate when the counter will next overflow */
1360          uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1361          if (!(env->cp15.c9_pmcr & PMCRLC)) {
1362              remaining_cycles = (uint32_t)remaining_cycles;
1363          }
1364          int64_t overflow_in = cycles_ns_per(remaining_cycles);
1365  
1366          if (overflow_in > 0) {
1367              int64_t overflow_at;
1368  
1369              if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1370                                   overflow_in, &overflow_at)) {
1371                  ARMCPU *cpu = env_archcpu(env);
1372                  timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1373              }
1374          }
1375  #endif
1376  
1377          uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1378          if (pmccntr_clockdiv_enabled(env)) {
1379              prev_cycles /= 64;
1380          }
1381          env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1382      }
1383  }
1384  
1385  static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1386  {
1387  
1388      uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1389      uint64_t count = 0;
1390      if (event_supported(event)) {
1391          uint16_t event_idx = supported_event_map[event];
1392          count = pm_events[event_idx].get_count(env);
1393      }
1394  
1395      if (pmu_counter_enabled(env, counter)) {
1396          uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1397          uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
1398              1ULL << 63 : 1ULL << 31;
1399  
1400          if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
1401              env->cp15.c9_pmovsr |= (1 << counter);
1402              pmu_update_irq(env);
1403          }
1404          env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1405      }
1406      env->cp15.c14_pmevcntr_delta[counter] = count;
1407  }
1408  
1409  static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1410  {
1411      if (pmu_counter_enabled(env, counter)) {
1412  #ifndef CONFIG_USER_ONLY
1413          uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1414          uint16_t event_idx = supported_event_map[event];
1415          uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
1416          int64_t overflow_in;
1417  
1418          if (!pmevcntr_is_64_bit(env, counter)) {
1419              delta = (uint32_t)delta;
1420          }
1421          overflow_in = pm_events[event_idx].ns_per_count(delta);
1422  
1423          if (overflow_in > 0) {
1424              int64_t overflow_at;
1425  
1426              if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1427                                   overflow_in, &overflow_at)) {
1428                  ARMCPU *cpu = env_archcpu(env);
1429                  timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1430              }
1431          }
1432  #endif
1433  
1434          env->cp15.c14_pmevcntr_delta[counter] -=
1435              env->cp15.c14_pmevcntr[counter];
1436      }
1437  }
1438  
1439  void pmu_op_start(CPUARMState *env)
1440  {
1441      unsigned int i;
1442      pmccntr_op_start(env);
1443      for (i = 0; i < pmu_num_counters(env); i++) {
1444          pmevcntr_op_start(env, i);
1445      }
1446  }
1447  
1448  void pmu_op_finish(CPUARMState *env)
1449  {
1450      unsigned int i;
1451      pmccntr_op_finish(env);
1452      for (i = 0; i < pmu_num_counters(env); i++) {
1453          pmevcntr_op_finish(env, i);
1454      }
1455  }
1456  
1457  void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1458  {
1459      pmu_op_start(&cpu->env);
1460  }
1461  
1462  void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1463  {
1464      pmu_op_finish(&cpu->env);
1465  }
1466  
1467  void arm_pmu_timer_cb(void *opaque)
1468  {
1469      ARMCPU *cpu = opaque;
1470  
1471      /*
1472       * Update all the counter values based on the current underlying counts,
1473       * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1474       * has the effect of setting the cpu->pmu_timer to the next earliest time a
1475       * counter may expire.
1476       */
1477      pmu_op_start(&cpu->env);
1478      pmu_op_finish(&cpu->env);
1479  }
1480  
1481  static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1482                         uint64_t value)
1483  {
1484      pmu_op_start(env);
1485  
1486      if (value & PMCRC) {
1487          /* The counter has been reset */
1488          env->cp15.c15_ccnt = 0;
1489      }
1490  
1491      if (value & PMCRP) {
1492          unsigned int i;
1493          for (i = 0; i < pmu_num_counters(env); i++) {
1494              env->cp15.c14_pmevcntr[i] = 0;
1495          }
1496      }
1497  
1498      env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
1499      env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
1500  
1501      pmu_op_finish(env);
1502  }
1503  
1504  static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1505  {
1506      uint64_t pmcr = env->cp15.c9_pmcr;
1507  
1508      /*
1509       * If EL2 is implemented and enabled for the current security state, reads
1510       * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
1511       */
1512      if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
1513          pmcr &= ~PMCRN_MASK;
1514          pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
1515      }
1516  
1517      return pmcr;
1518  }
1519  
1520  static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1521                            uint64_t value)
1522  {
1523      unsigned int i;
1524      uint64_t overflow_mask, new_pmswinc;
1525  
1526      for (i = 0; i < pmu_num_counters(env); i++) {
1527          /* Increment a counter's count iff: */
1528          if ((value & (1 << i)) && /* counter's bit is set */
1529                  /* counter is enabled and not filtered */
1530                  pmu_counter_enabled(env, i) &&
1531                  /* counter is SW_INCR */
1532                  (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1533              pmevcntr_op_start(env, i);
1534  
1535              /*
1536               * Detect if this write causes an overflow since we can't predict
1537               * PMSWINC overflows like we can for other events
1538               */
1539              new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1540  
1541              overflow_mask = pmevcntr_is_64_bit(env, i) ?
1542                  1ULL << 63 : 1ULL << 31;
1543  
1544              if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
1545                  env->cp15.c9_pmovsr |= (1 << i);
1546                  pmu_update_irq(env);
1547              }
1548  
1549              env->cp15.c14_pmevcntr[i] = new_pmswinc;
1550  
1551              pmevcntr_op_finish(env, i);
1552          }
1553      }
1554  }
1555  
1556  static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1557  {
1558      uint64_t ret;
1559      pmccntr_op_start(env);
1560      ret = env->cp15.c15_ccnt;
1561      pmccntr_op_finish(env);
1562      return ret;
1563  }
1564  
1565  static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1566                           uint64_t value)
1567  {
1568      /*
1569       * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1570       * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1571       * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1572       * accessed.
1573       */
1574      env->cp15.c9_pmselr = value & 0x1f;
1575  }
1576  
1577  static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1578                          uint64_t value)
1579  {
1580      pmccntr_op_start(env);
1581      env->cp15.c15_ccnt = value;
1582      pmccntr_op_finish(env);
1583  }
1584  
1585  static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1586                              uint64_t value)
1587  {
1588      uint64_t cur_val = pmccntr_read(env, NULL);
1589  
1590      pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1591  }
1592  
1593  static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1594                              uint64_t value)
1595  {
1596      pmccntr_op_start(env);
1597      env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1598      pmccntr_op_finish(env);
1599  }
1600  
1601  static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1602                              uint64_t value)
1603  {
1604      pmccntr_op_start(env);
1605      /* M is not accessible from AArch32 */
1606      env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1607          (value & PMCCFILTR);
1608      pmccntr_op_finish(env);
1609  }
1610  
1611  static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1612  {
1613      /* M is not visible in AArch32 */
1614      return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1615  }
1616  
1617  static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1618                              uint64_t value)
1619  {
1620      pmu_op_start(env);
1621      value &= pmu_counter_mask(env);
1622      env->cp15.c9_pmcnten |= value;
1623      pmu_op_finish(env);
1624  }
1625  
1626  static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1627                               uint64_t value)
1628  {
1629      pmu_op_start(env);
1630      value &= pmu_counter_mask(env);
1631      env->cp15.c9_pmcnten &= ~value;
1632      pmu_op_finish(env);
1633  }
1634  
1635  static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1636                           uint64_t value)
1637  {
1638      value &= pmu_counter_mask(env);
1639      env->cp15.c9_pmovsr &= ~value;
1640      pmu_update_irq(env);
1641  }
1642  
1643  static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1644                           uint64_t value)
1645  {
1646      value &= pmu_counter_mask(env);
1647      env->cp15.c9_pmovsr |= value;
1648      pmu_update_irq(env);
1649  }
1650  
1651  static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1652                               uint64_t value, const uint8_t counter)
1653  {
1654      if (counter == 31) {
1655          pmccfiltr_write(env, ri, value);
1656      } else if (counter < pmu_num_counters(env)) {
1657          pmevcntr_op_start(env, counter);
1658  
1659          /*
1660           * If this counter's event type is changing, store the current
1661           * underlying count for the new type in c14_pmevcntr_delta[counter] so
1662           * pmevcntr_op_finish has the correct baseline when it converts back to
1663           * a delta.
1664           */
1665          uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1666              PMXEVTYPER_EVTCOUNT;
1667          uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1668          if (old_event != new_event) {
1669              uint64_t count = 0;
1670              if (event_supported(new_event)) {
1671                  uint16_t event_idx = supported_event_map[new_event];
1672                  count = pm_events[event_idx].get_count(env);
1673              }
1674              env->cp15.c14_pmevcntr_delta[counter] = count;
1675          }
1676  
1677          env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1678          pmevcntr_op_finish(env, counter);
1679      }
1680      /*
1681       * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1682       * PMSELR value is equal to or greater than the number of implemented
1683       * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1684       */
1685  }
1686  
1687  static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1688                                 const uint8_t counter)
1689  {
1690      if (counter == 31) {
1691          return env->cp15.pmccfiltr_el0;
1692      } else if (counter < pmu_num_counters(env)) {
1693          return env->cp15.c14_pmevtyper[counter];
1694      } else {
1695        /*
1696         * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1697         * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1698         */
1699          return 0;
1700      }
1701  }
1702  
1703  static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1704                                uint64_t value)
1705  {
1706      uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1707      pmevtyper_write(env, ri, value, counter);
1708  }
1709  
1710  static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1711                                 uint64_t value)
1712  {
1713      uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1714      env->cp15.c14_pmevtyper[counter] = value;
1715  
1716      /*
1717       * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1718       * pmu_op_finish calls when loading saved state for a migration. Because
1719       * we're potentially updating the type of event here, the value written to
1720       * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
1721       * different counter type. Therefore, we need to set this value to the
1722       * current count for the counter type we're writing so that pmu_op_finish
1723       * has the correct count for its calculation.
1724       */
1725      uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1726      if (event_supported(event)) {
1727          uint16_t event_idx = supported_event_map[event];
1728          env->cp15.c14_pmevcntr_delta[counter] =
1729              pm_events[event_idx].get_count(env);
1730      }
1731  }
1732  
1733  static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1734  {
1735      uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1736      return pmevtyper_read(env, ri, counter);
1737  }
1738  
1739  static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1740                               uint64_t value)
1741  {
1742      pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1743  }
1744  
1745  static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1746  {
1747      return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1748  }
1749  
1750  static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1751                               uint64_t value, uint8_t counter)
1752  {
1753      if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1754          /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1755          value &= MAKE_64BIT_MASK(0, 32);
1756      }
1757      if (counter < pmu_num_counters(env)) {
1758          pmevcntr_op_start(env, counter);
1759          env->cp15.c14_pmevcntr[counter] = value;
1760          pmevcntr_op_finish(env, counter);
1761      }
1762      /*
1763       * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1764       * are CONSTRAINED UNPREDICTABLE.
1765       */
1766  }
1767  
1768  static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1769                                uint8_t counter)
1770  {
1771      if (counter < pmu_num_counters(env)) {
1772          uint64_t ret;
1773          pmevcntr_op_start(env, counter);
1774          ret = env->cp15.c14_pmevcntr[counter];
1775          pmevcntr_op_finish(env, counter);
1776          if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
1777              /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
1778              ret &= MAKE_64BIT_MASK(0, 32);
1779          }
1780          return ret;
1781      } else {
1782        /*
1783         * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1784         * are CONSTRAINED UNPREDICTABLE.
1785         */
1786          return 0;
1787      }
1788  }
1789  
1790  static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1791                               uint64_t value)
1792  {
1793      uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1794      pmevcntr_write(env, ri, value, counter);
1795  }
1796  
1797  static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1798  {
1799      uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1800      return pmevcntr_read(env, ri, counter);
1801  }
1802  
1803  static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1804                               uint64_t value)
1805  {
1806      uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1807      assert(counter < pmu_num_counters(env));
1808      env->cp15.c14_pmevcntr[counter] = value;
1809      pmevcntr_write(env, ri, value, counter);
1810  }
1811  
1812  static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1813  {
1814      uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1815      assert(counter < pmu_num_counters(env));
1816      return env->cp15.c14_pmevcntr[counter];
1817  }
1818  
1819  static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1820                               uint64_t value)
1821  {
1822      pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1823  }
1824  
1825  static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1826  {
1827      return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1828  }
1829  
1830  static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1831                              uint64_t value)
1832  {
1833      if (arm_feature(env, ARM_FEATURE_V8)) {
1834          env->cp15.c9_pmuserenr = value & 0xf;
1835      } else {
1836          env->cp15.c9_pmuserenr = value & 1;
1837      }
1838  }
1839  
1840  static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1841                               uint64_t value)
1842  {
1843      /* We have no event counters so only the C bit can be changed */
1844      value &= pmu_counter_mask(env);
1845      env->cp15.c9_pminten |= value;
1846      pmu_update_irq(env);
1847  }
1848  
1849  static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1850                               uint64_t value)
1851  {
1852      value &= pmu_counter_mask(env);
1853      env->cp15.c9_pminten &= ~value;
1854      pmu_update_irq(env);
1855  }
1856  
1857  static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1858                         uint64_t value)
1859  {
1860      /*
1861       * Note that even though the AArch64 view of this register has bits
1862       * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1863       * architectural requirements for bits which are RES0 only in some
1864       * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1865       * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1866       */
1867      raw_write(env, ri, value & ~0x1FULL);
1868  }
1869  
1870  static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1871  {
1872      /* Begin with base v8.0 state.  */
1873      uint64_t valid_mask = 0x3fff;
1874      ARMCPU *cpu = env_archcpu(env);
1875      uint64_t changed;
1876  
1877      /*
1878       * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
1879       * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64.
1880       * Instead, choose the format based on the mode of EL3.
1881       */
1882      if (arm_el_is_aa64(env, 3)) {
1883          value |= SCR_FW | SCR_AW;      /* RES1 */
1884          valid_mask &= ~SCR_NET;        /* RES0 */
1885  
1886          if (!cpu_isar_feature(aa64_aa32_el1, cpu) &&
1887              !cpu_isar_feature(aa64_aa32_el2, cpu)) {
1888              value |= SCR_RW;           /* RAO/WI */
1889          }
1890          if (cpu_isar_feature(aa64_ras, cpu)) {
1891              valid_mask |= SCR_TERR;
1892          }
1893          if (cpu_isar_feature(aa64_lor, cpu)) {
1894              valid_mask |= SCR_TLOR;
1895          }
1896          if (cpu_isar_feature(aa64_pauth, cpu)) {
1897              valid_mask |= SCR_API | SCR_APK;
1898          }
1899          if (cpu_isar_feature(aa64_sel2, cpu)) {
1900              valid_mask |= SCR_EEL2;
1901          } else if (cpu_isar_feature(aa64_rme, cpu)) {
1902              /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */
1903              value |= SCR_NS;
1904          }
1905          if (cpu_isar_feature(aa64_mte, cpu)) {
1906              valid_mask |= SCR_ATA;
1907          }
1908          if (cpu_isar_feature(aa64_scxtnum, cpu)) {
1909              valid_mask |= SCR_ENSCXT;
1910          }
1911          if (cpu_isar_feature(aa64_doublefault, cpu)) {
1912              valid_mask |= SCR_EASE | SCR_NMEA;
1913          }
1914          if (cpu_isar_feature(aa64_sme, cpu)) {
1915              valid_mask |= SCR_ENTP2;
1916          }
1917          if (cpu_isar_feature(aa64_hcx, cpu)) {
1918              valid_mask |= SCR_HXEN;
1919          }
1920          if (cpu_isar_feature(aa64_fgt, cpu)) {
1921              valid_mask |= SCR_FGTEN;
1922          }
1923          if (cpu_isar_feature(aa64_rme, cpu)) {
1924              valid_mask |= SCR_NSE | SCR_GPF;
1925          }
1926          if (cpu_isar_feature(aa64_ecv, cpu)) {
1927              valid_mask |= SCR_ECVEN;
1928          }
1929      } else {
1930          valid_mask &= ~(SCR_RW | SCR_ST);
1931          if (cpu_isar_feature(aa32_ras, cpu)) {
1932              valid_mask |= SCR_TERR;
1933          }
1934      }
1935  
1936      if (!arm_feature(env, ARM_FEATURE_EL2)) {
1937          valid_mask &= ~SCR_HCE;
1938  
1939          /*
1940           * On ARMv7, SMD (or SCD as it is called in v7) is only
1941           * supported if EL2 exists. The bit is UNK/SBZP when
1942           * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1943           * when EL2 is unavailable.
1944           * On ARMv8, this bit is always available.
1945           */
1946          if (arm_feature(env, ARM_FEATURE_V7) &&
1947              !arm_feature(env, ARM_FEATURE_V8)) {
1948              valid_mask &= ~SCR_SMD;
1949          }
1950      }
1951  
1952      /* Clear all-context RES0 bits.  */
1953      value &= valid_mask;
1954      changed = env->cp15.scr_el3 ^ value;
1955      env->cp15.scr_el3 = value;
1956  
1957      /*
1958       * If SCR_EL3.{NS,NSE} changes, i.e. change of security state,
1959       * we must invalidate all TLBs below EL3.
1960       */
1961      if (changed & (SCR_NS | SCR_NSE)) {
1962          tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
1963                                             ARMMMUIdxBit_E20_0 |
1964                                             ARMMMUIdxBit_E10_1 |
1965                                             ARMMMUIdxBit_E20_2 |
1966                                             ARMMMUIdxBit_E10_1_PAN |
1967                                             ARMMMUIdxBit_E20_2_PAN |
1968                                             ARMMMUIdxBit_E2));
1969      }
1970  }
1971  
1972  static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1973  {
1974      /*
1975       * scr_write will set the RES1 bits on an AArch64-only CPU.
1976       * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise.
1977       */
1978      scr_write(env, ri, 0);
1979  }
1980  
1981  static CPAccessResult access_tid4(CPUARMState *env,
1982                                    const ARMCPRegInfo *ri,
1983                                    bool isread)
1984  {
1985      if (arm_current_el(env) == 1 &&
1986          (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) {
1987          return CP_ACCESS_TRAP_EL2;
1988      }
1989  
1990      return CP_ACCESS_OK;
1991  }
1992  
1993  static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1994  {
1995      ARMCPU *cpu = env_archcpu(env);
1996  
1997      /*
1998       * Acquire the CSSELR index from the bank corresponding to the CCSIDR
1999       * bank
2000       */
2001      uint32_t index = A32_BANKED_REG_GET(env, csselr,
2002                                          ri->secure & ARM_CP_SECSTATE_S);
2003  
2004      return cpu->ccsidr[index];
2005  }
2006  
2007  static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2008                           uint64_t value)
2009  {
2010      raw_write(env, ri, value & 0xf);
2011  }
2012  
2013  static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2014  {
2015      CPUState *cs = env_cpu(env);
2016      bool el1 = arm_current_el(env) == 1;
2017      uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0;
2018      uint64_t ret = 0;
2019  
2020      if (hcr_el2 & HCR_IMO) {
2021          if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2022              ret |= CPSR_I;
2023          }
2024          if (cs->interrupt_request & CPU_INTERRUPT_VINMI) {
2025              ret |= ISR_IS;
2026              ret |= CPSR_I;
2027          }
2028      } else {
2029          if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2030              ret |= CPSR_I;
2031          }
2032  
2033          if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
2034              ret |= ISR_IS;
2035              ret |= CPSR_I;
2036          }
2037      }
2038  
2039      if (hcr_el2 & HCR_FMO) {
2040          if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2041              ret |= CPSR_F;
2042          }
2043          if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) {
2044              ret |= ISR_FS;
2045              ret |= CPSR_F;
2046          }
2047      } else {
2048          if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2049              ret |= CPSR_F;
2050          }
2051      }
2052  
2053      if (hcr_el2 & HCR_AMO) {
2054          if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
2055              ret |= CPSR_A;
2056          }
2057      }
2058  
2059      return ret;
2060  }
2061  
2062  static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2063                                         bool isread)
2064  {
2065      if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2066          return CP_ACCESS_TRAP_EL2;
2067      }
2068  
2069      return CP_ACCESS_OK;
2070  }
2071  
2072  static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2073                                         bool isread)
2074  {
2075      if (arm_feature(env, ARM_FEATURE_V8)) {
2076          return access_aa64_tid1(env, ri, isread);
2077      }
2078  
2079      return CP_ACCESS_OK;
2080  }
2081  
2082  static const ARMCPRegInfo v7_cp_reginfo[] = {
2083      /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2084      { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2085        .access = PL1_W, .type = ARM_CP_NOP },
2086      /*
2087       * Performance monitors are implementation defined in v7,
2088       * but with an ARM recommended set of registers, which we
2089       * follow.
2090       *
2091       * Performance registers fall into three categories:
2092       *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2093       *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2094       *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2095       * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2096       * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2097       */
2098      { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2099        .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
2100        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2101        .writefn = pmcntenset_write,
2102        .accessfn = pmreg_access,
2103        .fgt = FGT_PMCNTEN,
2104        .raw_writefn = raw_write },
2105      { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
2106        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2107        .access = PL0_RW, .accessfn = pmreg_access,
2108        .fgt = FGT_PMCNTEN,
2109        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2110        .writefn = pmcntenset_write, .raw_writefn = raw_write },
2111      { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2112        .access = PL0_RW,
2113        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2114        .accessfn = pmreg_access,
2115        .fgt = FGT_PMCNTEN,
2116        .writefn = pmcntenclr_write,
2117        .type = ARM_CP_ALIAS | ARM_CP_IO },
2118      { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2119        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2120        .access = PL0_RW, .accessfn = pmreg_access,
2121        .fgt = FGT_PMCNTEN,
2122        .type = ARM_CP_ALIAS | ARM_CP_IO,
2123        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2124        .writefn = pmcntenclr_write },
2125      { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2126        .access = PL0_RW, .type = ARM_CP_IO,
2127        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2128        .accessfn = pmreg_access,
2129        .fgt = FGT_PMOVS,
2130        .writefn = pmovsr_write,
2131        .raw_writefn = raw_write },
2132      { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2133        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2134        .access = PL0_RW, .accessfn = pmreg_access,
2135        .fgt = FGT_PMOVS,
2136        .type = ARM_CP_ALIAS | ARM_CP_IO,
2137        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2138        .writefn = pmovsr_write,
2139        .raw_writefn = raw_write },
2140      { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2141        .access = PL0_W, .accessfn = pmreg_access_swinc,
2142        .fgt = FGT_PMSWINC_EL0,
2143        .type = ARM_CP_NO_RAW | ARM_CP_IO,
2144        .writefn = pmswinc_write },
2145      { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2146        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2147        .access = PL0_W, .accessfn = pmreg_access_swinc,
2148        .fgt = FGT_PMSWINC_EL0,
2149        .type = ARM_CP_NO_RAW | ARM_CP_IO,
2150        .writefn = pmswinc_write },
2151      { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2152        .access = PL0_RW, .type = ARM_CP_ALIAS,
2153        .fgt = FGT_PMSELR_EL0,
2154        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2155        .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2156        .raw_writefn = raw_write},
2157      { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2158        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2159        .access = PL0_RW, .accessfn = pmreg_access_selr,
2160        .fgt = FGT_PMSELR_EL0,
2161        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2162        .writefn = pmselr_write, .raw_writefn = raw_write, },
2163      { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2164        .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2165        .fgt = FGT_PMCCNTR_EL0,
2166        .readfn = pmccntr_read, .writefn = pmccntr_write32,
2167        .accessfn = pmreg_access_ccntr },
2168      { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2169        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2170        .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2171        .fgt = FGT_PMCCNTR_EL0,
2172        .type = ARM_CP_IO,
2173        .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2174        .readfn = pmccntr_read, .writefn = pmccntr_write,
2175        .raw_readfn = raw_read, .raw_writefn = raw_write, },
2176      { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2177        .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2178        .access = PL0_RW, .accessfn = pmreg_access,
2179        .fgt = FGT_PMCCFILTR_EL0,
2180        .type = ARM_CP_ALIAS | ARM_CP_IO,
2181        .resetvalue = 0, },
2182      { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2183        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2184        .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2185        .access = PL0_RW, .accessfn = pmreg_access,
2186        .fgt = FGT_PMCCFILTR_EL0,
2187        .type = ARM_CP_IO,
2188        .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2189        .resetvalue = 0, },
2190      { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2191        .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2192        .accessfn = pmreg_access,
2193        .fgt = FGT_PMEVTYPERN_EL0,
2194        .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2195      { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2196        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2197        .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2198        .accessfn = pmreg_access,
2199        .fgt = FGT_PMEVTYPERN_EL0,
2200        .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2201      { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2202        .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2203        .accessfn = pmreg_access_xevcntr,
2204        .fgt = FGT_PMEVCNTRN_EL0,
2205        .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2206      { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2207        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2208        .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2209        .accessfn = pmreg_access_xevcntr,
2210        .fgt = FGT_PMEVCNTRN_EL0,
2211        .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2212      { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2213        .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2214        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2215        .resetvalue = 0,
2216        .writefn = pmuserenr_write, .raw_writefn = raw_write },
2217      { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2218        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2219        .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2220        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2221        .resetvalue = 0,
2222        .writefn = pmuserenr_write, .raw_writefn = raw_write },
2223      { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2224        .access = PL1_RW, .accessfn = access_tpm,
2225        .fgt = FGT_PMINTEN,
2226        .type = ARM_CP_ALIAS | ARM_CP_IO,
2227        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2228        .resetvalue = 0,
2229        .writefn = pmintenset_write, .raw_writefn = raw_write },
2230      { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2231        .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2232        .access = PL1_RW, .accessfn = access_tpm,
2233        .fgt = FGT_PMINTEN,
2234        .type = ARM_CP_IO,
2235        .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2236        .writefn = pmintenset_write, .raw_writefn = raw_write,
2237        .resetvalue = 0x0 },
2238      { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2239        .access = PL1_RW, .accessfn = access_tpm,
2240        .fgt = FGT_PMINTEN,
2241        .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2242        .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2243        .writefn = pmintenclr_write, },
2244      { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2245        .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2246        .access = PL1_RW, .accessfn = access_tpm,
2247        .fgt = FGT_PMINTEN,
2248        .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2249        .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2250        .writefn = pmintenclr_write },
2251      { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2252        .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2253        .access = PL1_R,
2254        .accessfn = access_tid4,
2255        .fgt = FGT_CCSIDR_EL1,
2256        .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2257      { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2258        .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2259        .access = PL1_RW,
2260        .accessfn = access_tid4,
2261        .fgt = FGT_CSSELR_EL1,
2262        .writefn = csselr_write, .resetvalue = 0,
2263        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2264                               offsetof(CPUARMState, cp15.csselr_ns) } },
2265      /*
2266       * Auxiliary ID register: this actually has an IMPDEF value but for now
2267       * just RAZ for all cores:
2268       */
2269      { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2270        .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2271        .access = PL1_R, .type = ARM_CP_CONST,
2272        .accessfn = access_aa64_tid1,
2273        .fgt = FGT_AIDR_EL1,
2274        .resetvalue = 0 },
2275      /*
2276       * Auxiliary fault status registers: these also are IMPDEF, and we
2277       * choose to RAZ/WI for all cores.
2278       */
2279      { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2280        .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2281        .access = PL1_RW, .accessfn = access_tvm_trvm,
2282        .fgt = FGT_AFSR0_EL1,
2283        .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
2284        .type = ARM_CP_CONST, .resetvalue = 0 },
2285      { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2286        .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2287        .access = PL1_RW, .accessfn = access_tvm_trvm,
2288        .fgt = FGT_AFSR1_EL1,
2289        .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
2290        .type = ARM_CP_CONST, .resetvalue = 0 },
2291      /*
2292       * MAIR can just read-as-written because we don't implement caches
2293       * and so don't need to care about memory attributes.
2294       */
2295      { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2296        .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2297        .access = PL1_RW, .accessfn = access_tvm_trvm,
2298        .fgt = FGT_MAIR_EL1,
2299        .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
2300        .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2301        .resetvalue = 0 },
2302      { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2303        .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2304        .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2305        .resetvalue = 0 },
2306      /*
2307       * For non-long-descriptor page tables these are PRRR and NMRR;
2308       * regardless they still act as reads-as-written for QEMU.
2309       */
2310       /*
2311        * MAIR0/1 are defined separately from their 64-bit counterpart which
2312        * allows them to assign the correct fieldoffset based on the endianness
2313        * handled in the field definitions.
2314        */
2315      { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2316        .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2317        .access = PL1_RW, .accessfn = access_tvm_trvm,
2318        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2319                               offsetof(CPUARMState, cp15.mair0_ns) },
2320        .resetfn = arm_cp_reset_ignore },
2321      { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2322        .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2323        .access = PL1_RW, .accessfn = access_tvm_trvm,
2324        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2325                               offsetof(CPUARMState, cp15.mair1_ns) },
2326        .resetfn = arm_cp_reset_ignore },
2327      { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2328        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2329        .fgt = FGT_ISR_EL1,
2330        .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2331      /* 32 bit ITLB invalidates */
2332      { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2333        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2334        .writefn = tlbiall_write },
2335      { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2336        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2337        .writefn = tlbimva_write },
2338      { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2339        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2340        .writefn = tlbiasid_write },
2341      /* 32 bit DTLB invalidates */
2342      { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2343        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2344        .writefn = tlbiall_write },
2345      { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2346        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2347        .writefn = tlbimva_write },
2348      { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2349        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2350        .writefn = tlbiasid_write },
2351      /* 32 bit TLB invalidates */
2352      { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2353        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2354        .writefn = tlbiall_write },
2355      { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2356        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2357        .writefn = tlbimva_write },
2358      { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2359        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2360        .writefn = tlbiasid_write },
2361      { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2362        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2363        .writefn = tlbimvaa_write },
2364  };
2365  
2366  static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2367      /* 32 bit TLB invalidates, Inner Shareable */
2368      { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2369        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2370        .writefn = tlbiall_is_write },
2371      { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2372        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2373        .writefn = tlbimva_is_write },
2374      { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2375        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2376        .writefn = tlbiasid_is_write },
2377      { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2378        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
2379        .writefn = tlbimvaa_is_write },
2380  };
2381  
2382  static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2383      /* PMOVSSET is not implemented in v7 before v7ve */
2384      { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2385        .access = PL0_RW, .accessfn = pmreg_access,
2386        .fgt = FGT_PMOVS,
2387        .type = ARM_CP_ALIAS | ARM_CP_IO,
2388        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2389        .writefn = pmovsset_write,
2390        .raw_writefn = raw_write },
2391      { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2392        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2393        .access = PL0_RW, .accessfn = pmreg_access,
2394        .fgt = FGT_PMOVS,
2395        .type = ARM_CP_ALIAS | ARM_CP_IO,
2396        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2397        .writefn = pmovsset_write,
2398        .raw_writefn = raw_write },
2399  };
2400  
2401  static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2402                          uint64_t value)
2403  {
2404      value &= 1;
2405      env->teecr = value;
2406  }
2407  
2408  static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2409                                     bool isread)
2410  {
2411      /*
2412       * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE
2413       * at all, so we don't need to check whether we're v8A.
2414       */
2415      if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
2416          (env->cp15.hstr_el2 & HSTR_TTEE)) {
2417          return CP_ACCESS_TRAP_EL2;
2418      }
2419      return CP_ACCESS_OK;
2420  }
2421  
2422  static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2423                                      bool isread)
2424  {
2425      if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2426          return CP_ACCESS_TRAP;
2427      }
2428      return teecr_access(env, ri, isread);
2429  }
2430  
2431  static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2432      { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2433        .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2434        .resetvalue = 0,
2435        .writefn = teecr_write, .accessfn = teecr_access },
2436      { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2437        .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2438        .accessfn = teehbr_access, .resetvalue = 0 },
2439  };
2440  
2441  static const ARMCPRegInfo v6k_cp_reginfo[] = {
2442      { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2443        .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2444        .access = PL0_RW,
2445        .fgt = FGT_TPIDR_EL0,
2446        .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2447      { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2448        .access = PL0_RW,
2449        .fgt = FGT_TPIDR_EL0,
2450        .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2451                               offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2452        .resetfn = arm_cp_reset_ignore },
2453      { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2454        .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2455        .access = PL0_R | PL1_W,
2456        .fgt = FGT_TPIDRRO_EL0,
2457        .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2458        .resetvalue = 0},
2459      { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2460        .access = PL0_R | PL1_W,
2461        .fgt = FGT_TPIDRRO_EL0,
2462        .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2463                               offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2464        .resetfn = arm_cp_reset_ignore },
2465      { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2466        .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2467        .access = PL1_RW,
2468        .fgt = FGT_TPIDR_EL1,
2469        .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2470      { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2471        .access = PL1_RW,
2472        .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2473                               offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2474        .resetvalue = 0 },
2475  };
2476  
2477  static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
2478  {
2479      ARMCPU *cpu = env_archcpu(env);
2480  
2481      cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
2482  }
2483  
2484  #ifndef CONFIG_USER_ONLY
2485  
2486  static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2487                                         bool isread)
2488  {
2489      /*
2490       * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2491       * Writable only at the highest implemented exception level.
2492       */
2493      int el = arm_current_el(env);
2494      uint64_t hcr;
2495      uint32_t cntkctl;
2496  
2497      switch (el) {
2498      case 0:
2499          hcr = arm_hcr_el2_eff(env);
2500          if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2501              cntkctl = env->cp15.cnthctl_el2;
2502          } else {
2503              cntkctl = env->cp15.c14_cntkctl;
2504          }
2505          if (!extract32(cntkctl, 0, 2)) {
2506              return CP_ACCESS_TRAP;
2507          }
2508          break;
2509      case 1:
2510          if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2511              arm_is_secure_below_el3(env)) {
2512              /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2513              return CP_ACCESS_TRAP_UNCATEGORIZED;
2514          }
2515          break;
2516      case 2:
2517      case 3:
2518          break;
2519      }
2520  
2521      if (!isread && el < arm_highest_el(env)) {
2522          return CP_ACCESS_TRAP_UNCATEGORIZED;
2523      }
2524  
2525      return CP_ACCESS_OK;
2526  }
2527  
2528  static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2529                                          bool isread)
2530  {
2531      unsigned int cur_el = arm_current_el(env);
2532      bool has_el2 = arm_is_el2_enabled(env);
2533      uint64_t hcr = arm_hcr_el2_eff(env);
2534  
2535      switch (cur_el) {
2536      case 0:
2537          /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2538          if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2539              return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2540                      ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2541          }
2542  
2543          /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2544          if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2545              return CP_ACCESS_TRAP;
2546          }
2547          /* fall through */
2548      case 1:
2549          /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2550          if (has_el2 && timeridx == GTIMER_PHYS &&
2551              (hcr & HCR_E2H
2552               ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2553               : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2554              return CP_ACCESS_TRAP_EL2;
2555          }
2556          if (has_el2 && timeridx == GTIMER_VIRT) {
2557              if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVCT)) {
2558                  return CP_ACCESS_TRAP_EL2;
2559              }
2560          }
2561          break;
2562      }
2563      return CP_ACCESS_OK;
2564  }
2565  
2566  static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2567                                        bool isread)
2568  {
2569      unsigned int cur_el = arm_current_el(env);
2570      bool has_el2 = arm_is_el2_enabled(env);
2571      uint64_t hcr = arm_hcr_el2_eff(env);
2572  
2573      switch (cur_el) {
2574      case 0:
2575          if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2576              /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2577              return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2578                      ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2579          }
2580  
2581          /*
2582           * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2583           * EL0 if EL0[PV]TEN is zero.
2584           */
2585          if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2586              return CP_ACCESS_TRAP;
2587          }
2588          /* fall through */
2589  
2590      case 1:
2591          if (has_el2 && timeridx == GTIMER_PHYS) {
2592              if (hcr & HCR_E2H) {
2593                  /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2594                  if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2595                      return CP_ACCESS_TRAP_EL2;
2596                  }
2597              } else {
2598                  /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2599                  if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2600                      return CP_ACCESS_TRAP_EL2;
2601                  }
2602              }
2603          }
2604          if (has_el2 && timeridx == GTIMER_VIRT) {
2605              if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1TVT)) {
2606                  return CP_ACCESS_TRAP_EL2;
2607              }
2608          }
2609          break;
2610      }
2611      return CP_ACCESS_OK;
2612  }
2613  
2614  static CPAccessResult gt_pct_access(CPUARMState *env,
2615                                      const ARMCPRegInfo *ri,
2616                                      bool isread)
2617  {
2618      return gt_counter_access(env, GTIMER_PHYS, isread);
2619  }
2620  
2621  static CPAccessResult gt_vct_access(CPUARMState *env,
2622                                      const ARMCPRegInfo *ri,
2623                                      bool isread)
2624  {
2625      return gt_counter_access(env, GTIMER_VIRT, isread);
2626  }
2627  
2628  static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2629                                         bool isread)
2630  {
2631      return gt_timer_access(env, GTIMER_PHYS, isread);
2632  }
2633  
2634  static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2635                                         bool isread)
2636  {
2637      return gt_timer_access(env, GTIMER_VIRT, isread);
2638  }
2639  
2640  static CPAccessResult gt_stimer_access(CPUARMState *env,
2641                                         const ARMCPRegInfo *ri,
2642                                         bool isread)
2643  {
2644      /*
2645       * The AArch64 register view of the secure physical timer is
2646       * always accessible from EL3, and configurably accessible from
2647       * Secure EL1.
2648       */
2649      switch (arm_current_el(env)) {
2650      case 1:
2651          if (!arm_is_secure(env)) {
2652              return CP_ACCESS_TRAP;
2653          }
2654          if (!(env->cp15.scr_el3 & SCR_ST)) {
2655              return CP_ACCESS_TRAP_EL3;
2656          }
2657          return CP_ACCESS_OK;
2658      case 0:
2659      case 2:
2660          return CP_ACCESS_TRAP;
2661      case 3:
2662          return CP_ACCESS_OK;
2663      default:
2664          g_assert_not_reached();
2665      }
2666  }
2667  
2668  uint64_t gt_get_countervalue(CPUARMState *env)
2669  {
2670      ARMCPU *cpu = env_archcpu(env);
2671  
2672      return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2673  }
2674  
2675  static void gt_update_irq(ARMCPU *cpu, int timeridx)
2676  {
2677      CPUARMState *env = &cpu->env;
2678      uint64_t cnthctl = env->cp15.cnthctl_el2;
2679      ARMSecuritySpace ss = arm_security_space(env);
2680      /* ISTATUS && !IMASK */
2681      int irqstate = (env->cp15.c14_timer[timeridx].ctl & 6) == 4;
2682  
2683      /*
2684       * If bit CNTHCTL_EL2.CNT[VP]MASK is set, it overrides IMASK.
2685       * It is RES0 in Secure and NonSecure state.
2686       */
2687      if ((ss == ARMSS_Root || ss == ARMSS_Realm) &&
2688          ((timeridx == GTIMER_VIRT && (cnthctl & R_CNTHCTL_CNTVMASK_MASK)) ||
2689           (timeridx == GTIMER_PHYS && (cnthctl & R_CNTHCTL_CNTPMASK_MASK)))) {
2690          irqstate = 0;
2691      }
2692  
2693      qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2694      trace_arm_gt_update_irq(timeridx, irqstate);
2695  }
2696  
2697  void gt_rme_post_el_change(ARMCPU *cpu, void *ignored)
2698  {
2699      /*
2700       * Changing security state between Root and Secure/NonSecure, which may
2701       * happen when switching EL, can change the effective value of CNTHCTL_EL2
2702       * mask bits. Update the IRQ state accordingly.
2703       */
2704      gt_update_irq(cpu, GTIMER_VIRT);
2705      gt_update_irq(cpu, GTIMER_PHYS);
2706  }
2707  
2708  static uint64_t gt_phys_raw_cnt_offset(CPUARMState *env)
2709  {
2710      if ((env->cp15.scr_el3 & SCR_ECVEN) &&
2711          FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, ECV) &&
2712          arm_is_el2_enabled(env) &&
2713          (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
2714          return env->cp15.cntpoff_el2;
2715      }
2716      return 0;
2717  }
2718  
2719  static uint64_t gt_phys_cnt_offset(CPUARMState *env)
2720  {
2721      if (arm_current_el(env) >= 2) {
2722          return 0;
2723      }
2724      return gt_phys_raw_cnt_offset(env);
2725  }
2726  
2727  static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2728  {
2729      ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2730  
2731      if (gt->ctl & 1) {
2732          /*
2733           * Timer enabled: calculate and set current ISTATUS, irq, and
2734           * reset timer to when ISTATUS next has to change
2735           */
2736          uint64_t offset = timeridx == GTIMER_VIRT ?
2737              cpu->env.cp15.cntvoff_el2 : gt_phys_raw_cnt_offset(&cpu->env);
2738          uint64_t count = gt_get_countervalue(&cpu->env);
2739          /* Note that this must be unsigned 64 bit arithmetic: */
2740          int istatus = count - offset >= gt->cval;
2741          uint64_t nexttick;
2742  
2743          gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2744  
2745          if (istatus) {
2746              /*
2747               * Next transition is when (count - offset) rolls back over to 0.
2748               * If offset > count then this is when count == offset;
2749               * if offset <= count then this is when count == offset + 2^64
2750               * For the latter case we set nexttick to an "as far in future
2751               * as possible" value and let the code below handle it.
2752               */
2753              if (offset > count) {
2754                  nexttick = offset;
2755              } else {
2756                  nexttick = UINT64_MAX;
2757              }
2758          } else {
2759              /*
2760               * Next transition is when (count - offset) == cval, i.e.
2761               * when count == (cval + offset).
2762               * If that would overflow, then again we set up the next interrupt
2763               * for "as far in the future as possible" for the code below.
2764               */
2765              if (uadd64_overflow(gt->cval, offset, &nexttick)) {
2766                  nexttick = UINT64_MAX;
2767              }
2768          }
2769          /*
2770           * Note that the desired next expiry time might be beyond the
2771           * signed-64-bit range of a QEMUTimer -- in this case we just
2772           * set the timer for as far in the future as possible. When the
2773           * timer expires we will reset the timer for any remaining period.
2774           */
2775          if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2776              timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2777          } else {
2778              timer_mod(cpu->gt_timer[timeridx], nexttick);
2779          }
2780          trace_arm_gt_recalc(timeridx, nexttick);
2781      } else {
2782          /* Timer disabled: ISTATUS and timer output always clear */
2783          gt->ctl &= ~4;
2784          timer_del(cpu->gt_timer[timeridx]);
2785          trace_arm_gt_recalc_disabled(timeridx);
2786      }
2787      gt_update_irq(cpu, timeridx);
2788  }
2789  
2790  static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2791                             int timeridx)
2792  {
2793      ARMCPU *cpu = env_archcpu(env);
2794  
2795      timer_del(cpu->gt_timer[timeridx]);
2796  }
2797  
2798  static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2799  {
2800      return gt_get_countervalue(env) - gt_phys_cnt_offset(env);
2801  }
2802  
2803  uint64_t gt_virt_cnt_offset(CPUARMState *env)
2804  {
2805      uint64_t hcr;
2806  
2807      switch (arm_current_el(env)) {
2808      case 2:
2809          hcr = arm_hcr_el2_eff(env);
2810          if (hcr & HCR_E2H) {
2811              return 0;
2812          }
2813          break;
2814      case 0:
2815          hcr = arm_hcr_el2_eff(env);
2816          if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2817              return 0;
2818          }
2819          break;
2820      }
2821  
2822      return env->cp15.cntvoff_el2;
2823  }
2824  
2825  static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2826  {
2827      return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2828  }
2829  
2830  static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2831                            int timeridx,
2832                            uint64_t value)
2833  {
2834      trace_arm_gt_cval_write(timeridx, value);
2835      env->cp15.c14_timer[timeridx].cval = value;
2836      gt_recalc_timer(env_archcpu(env), timeridx);
2837  }
2838  
2839  static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2840                               int timeridx)
2841  {
2842      uint64_t offset = 0;
2843  
2844      switch (timeridx) {
2845      case GTIMER_VIRT:
2846      case GTIMER_HYPVIRT:
2847          offset = gt_virt_cnt_offset(env);
2848          break;
2849      case GTIMER_PHYS:
2850          offset = gt_phys_cnt_offset(env);
2851          break;
2852      }
2853  
2854      return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2855                        (gt_get_countervalue(env) - offset));
2856  }
2857  
2858  static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2859                            int timeridx,
2860                            uint64_t value)
2861  {
2862      uint64_t offset = 0;
2863  
2864      switch (timeridx) {
2865      case GTIMER_VIRT:
2866      case GTIMER_HYPVIRT:
2867          offset = gt_virt_cnt_offset(env);
2868          break;
2869      case GTIMER_PHYS:
2870          offset = gt_phys_cnt_offset(env);
2871          break;
2872      }
2873  
2874      trace_arm_gt_tval_write(timeridx, value);
2875      env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2876                                           sextract64(value, 0, 32);
2877      gt_recalc_timer(env_archcpu(env), timeridx);
2878  }
2879  
2880  static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2881                           int timeridx,
2882                           uint64_t value)
2883  {
2884      ARMCPU *cpu = env_archcpu(env);
2885      uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2886  
2887      trace_arm_gt_ctl_write(timeridx, value);
2888      env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2889      if ((oldval ^ value) & 1) {
2890          /* Enable toggled */
2891          gt_recalc_timer(cpu, timeridx);
2892      } else if ((oldval ^ value) & 2) {
2893          /*
2894           * IMASK toggled: don't need to recalculate,
2895           * just set the interrupt line based on ISTATUS
2896           */
2897          trace_arm_gt_imask_toggle(timeridx);
2898          gt_update_irq(cpu, timeridx);
2899      }
2900  }
2901  
2902  static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2903  {
2904      gt_timer_reset(env, ri, GTIMER_PHYS);
2905  }
2906  
2907  static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2908                                 uint64_t value)
2909  {
2910      gt_cval_write(env, ri, GTIMER_PHYS, value);
2911  }
2912  
2913  static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2914  {
2915      return gt_tval_read(env, ri, GTIMER_PHYS);
2916  }
2917  
2918  static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2919                                 uint64_t value)
2920  {
2921      gt_tval_write(env, ri, GTIMER_PHYS, value);
2922  }
2923  
2924  static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2925                                uint64_t value)
2926  {
2927      gt_ctl_write(env, ri, GTIMER_PHYS, value);
2928  }
2929  
2930  static int gt_phys_redir_timeridx(CPUARMState *env)
2931  {
2932      switch (arm_mmu_idx(env)) {
2933      case ARMMMUIdx_E20_0:
2934      case ARMMMUIdx_E20_2:
2935      case ARMMMUIdx_E20_2_PAN:
2936          return GTIMER_HYP;
2937      default:
2938          return GTIMER_PHYS;
2939      }
2940  }
2941  
2942  static int gt_virt_redir_timeridx(CPUARMState *env)
2943  {
2944      switch (arm_mmu_idx(env)) {
2945      case ARMMMUIdx_E20_0:
2946      case ARMMMUIdx_E20_2:
2947      case ARMMMUIdx_E20_2_PAN:
2948          return GTIMER_HYPVIRT;
2949      default:
2950          return GTIMER_VIRT;
2951      }
2952  }
2953  
2954  static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2955                                          const ARMCPRegInfo *ri)
2956  {
2957      int timeridx = gt_phys_redir_timeridx(env);
2958      return env->cp15.c14_timer[timeridx].cval;
2959  }
2960  
2961  static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2962                                       uint64_t value)
2963  {
2964      int timeridx = gt_phys_redir_timeridx(env);
2965      gt_cval_write(env, ri, timeridx, value);
2966  }
2967  
2968  static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2969                                          const ARMCPRegInfo *ri)
2970  {
2971      int timeridx = gt_phys_redir_timeridx(env);
2972      return gt_tval_read(env, ri, timeridx);
2973  }
2974  
2975  static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2976                                       uint64_t value)
2977  {
2978      int timeridx = gt_phys_redir_timeridx(env);
2979      gt_tval_write(env, ri, timeridx, value);
2980  }
2981  
2982  static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2983                                         const ARMCPRegInfo *ri)
2984  {
2985      int timeridx = gt_phys_redir_timeridx(env);
2986      return env->cp15.c14_timer[timeridx].ctl;
2987  }
2988  
2989  static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2990                                      uint64_t value)
2991  {
2992      int timeridx = gt_phys_redir_timeridx(env);
2993      gt_ctl_write(env, ri, timeridx, value);
2994  }
2995  
2996  static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2997  {
2998      gt_timer_reset(env, ri, GTIMER_VIRT);
2999  }
3000  
3001  static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3002                                 uint64_t value)
3003  {
3004      gt_cval_write(env, ri, GTIMER_VIRT, value);
3005  }
3006  
3007  static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3008  {
3009      return gt_tval_read(env, ri, GTIMER_VIRT);
3010  }
3011  
3012  static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3013                                 uint64_t value)
3014  {
3015      gt_tval_write(env, ri, GTIMER_VIRT, value);
3016  }
3017  
3018  static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3019                                uint64_t value)
3020  {
3021      gt_ctl_write(env, ri, GTIMER_VIRT, value);
3022  }
3023  
3024  static void gt_cnthctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3025                               uint64_t value)
3026  {
3027      ARMCPU *cpu = env_archcpu(env);
3028      uint32_t oldval = env->cp15.cnthctl_el2;
3029      uint32_t valid_mask =
3030          R_CNTHCTL_EL0PCTEN_E2H1_MASK |
3031          R_CNTHCTL_EL0VCTEN_E2H1_MASK |
3032          R_CNTHCTL_EVNTEN_MASK |
3033          R_CNTHCTL_EVNTDIR_MASK |
3034          R_CNTHCTL_EVNTI_MASK |
3035          R_CNTHCTL_EL0VTEN_MASK |
3036          R_CNTHCTL_EL0PTEN_MASK |
3037          R_CNTHCTL_EL1PCTEN_E2H1_MASK |
3038          R_CNTHCTL_EL1PTEN_MASK;
3039  
3040      if (cpu_isar_feature(aa64_rme, cpu)) {
3041          valid_mask |= R_CNTHCTL_CNTVMASK_MASK | R_CNTHCTL_CNTPMASK_MASK;
3042      }
3043      if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
3044          valid_mask |=
3045              R_CNTHCTL_EL1TVT_MASK |
3046              R_CNTHCTL_EL1TVCT_MASK |
3047              R_CNTHCTL_EL1NVPCT_MASK |
3048              R_CNTHCTL_EL1NVVCT_MASK |
3049              R_CNTHCTL_EVNTIS_MASK;
3050      }
3051      if (cpu_isar_feature(aa64_ecv, cpu)) {
3052          valid_mask |= R_CNTHCTL_ECV_MASK;
3053      }
3054  
3055      /* Clear RES0 bits */
3056      value &= valid_mask;
3057  
3058      raw_write(env, ri, value);
3059  
3060      if ((oldval ^ value) & R_CNTHCTL_CNTVMASK_MASK) {
3061          gt_update_irq(cpu, GTIMER_VIRT);
3062      } else if ((oldval ^ value) & R_CNTHCTL_CNTPMASK_MASK) {
3063          gt_update_irq(cpu, GTIMER_PHYS);
3064      }
3065  }
3066  
3067  static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
3068                                uint64_t value)
3069  {
3070      ARMCPU *cpu = env_archcpu(env);
3071  
3072      trace_arm_gt_cntvoff_write(value);
3073      raw_write(env, ri, value);
3074      gt_recalc_timer(cpu, GTIMER_VIRT);
3075  }
3076  
3077  static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
3078                                          const ARMCPRegInfo *ri)
3079  {
3080      int timeridx = gt_virt_redir_timeridx(env);
3081      return env->cp15.c14_timer[timeridx].cval;
3082  }
3083  
3084  static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3085                                       uint64_t value)
3086  {
3087      int timeridx = gt_virt_redir_timeridx(env);
3088      gt_cval_write(env, ri, timeridx, value);
3089  }
3090  
3091  static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
3092                                          const ARMCPRegInfo *ri)
3093  {
3094      int timeridx = gt_virt_redir_timeridx(env);
3095      return gt_tval_read(env, ri, timeridx);
3096  }
3097  
3098  static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3099                                       uint64_t value)
3100  {
3101      int timeridx = gt_virt_redir_timeridx(env);
3102      gt_tval_write(env, ri, timeridx, value);
3103  }
3104  
3105  static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
3106                                         const ARMCPRegInfo *ri)
3107  {
3108      int timeridx = gt_virt_redir_timeridx(env);
3109      return env->cp15.c14_timer[timeridx].ctl;
3110  }
3111  
3112  static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3113                                      uint64_t value)
3114  {
3115      int timeridx = gt_virt_redir_timeridx(env);
3116      gt_ctl_write(env, ri, timeridx, value);
3117  }
3118  
3119  static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3120  {
3121      gt_timer_reset(env, ri, GTIMER_HYP);
3122  }
3123  
3124  static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3125                                uint64_t value)
3126  {
3127      gt_cval_write(env, ri, GTIMER_HYP, value);
3128  }
3129  
3130  static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3131  {
3132      return gt_tval_read(env, ri, GTIMER_HYP);
3133  }
3134  
3135  static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3136                                uint64_t value)
3137  {
3138      gt_tval_write(env, ri, GTIMER_HYP, value);
3139  }
3140  
3141  static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3142                                uint64_t value)
3143  {
3144      gt_ctl_write(env, ri, GTIMER_HYP, value);
3145  }
3146  
3147  static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3148  {
3149      gt_timer_reset(env, ri, GTIMER_SEC);
3150  }
3151  
3152  static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3153                                uint64_t value)
3154  {
3155      gt_cval_write(env, ri, GTIMER_SEC, value);
3156  }
3157  
3158  static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3159  {
3160      return gt_tval_read(env, ri, GTIMER_SEC);
3161  }
3162  
3163  static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3164                                uint64_t value)
3165  {
3166      gt_tval_write(env, ri, GTIMER_SEC, value);
3167  }
3168  
3169  static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3170                                uint64_t value)
3171  {
3172      gt_ctl_write(env, ri, GTIMER_SEC, value);
3173  }
3174  
3175  static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3176  {
3177      gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3178  }
3179  
3180  static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3181                               uint64_t value)
3182  {
3183      gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3184  }
3185  
3186  static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3187  {
3188      return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3189  }
3190  
3191  static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3192                               uint64_t value)
3193  {
3194      gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3195  }
3196  
3197  static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3198                              uint64_t value)
3199  {
3200      gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3201  }
3202  
3203  void arm_gt_ptimer_cb(void *opaque)
3204  {
3205      ARMCPU *cpu = opaque;
3206  
3207      gt_recalc_timer(cpu, GTIMER_PHYS);
3208  }
3209  
3210  void arm_gt_vtimer_cb(void *opaque)
3211  {
3212      ARMCPU *cpu = opaque;
3213  
3214      gt_recalc_timer(cpu, GTIMER_VIRT);
3215  }
3216  
3217  void arm_gt_htimer_cb(void *opaque)
3218  {
3219      ARMCPU *cpu = opaque;
3220  
3221      gt_recalc_timer(cpu, GTIMER_HYP);
3222  }
3223  
3224  void arm_gt_stimer_cb(void *opaque)
3225  {
3226      ARMCPU *cpu = opaque;
3227  
3228      gt_recalc_timer(cpu, GTIMER_SEC);
3229  }
3230  
3231  void arm_gt_hvtimer_cb(void *opaque)
3232  {
3233      ARMCPU *cpu = opaque;
3234  
3235      gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3236  }
3237  
3238  static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3239      /*
3240       * Note that CNTFRQ is purely reads-as-written for the benefit
3241       * of software; writing it doesn't actually change the timer frequency.
3242       * Our reset value matches the fixed frequency we implement the timer at.
3243       */
3244      { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3245        .type = ARM_CP_ALIAS,
3246        .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3247        .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3248      },
3249      { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3250        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3251        .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3252        .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3253        .resetfn = arm_gt_cntfrq_reset,
3254      },
3255      /* overall control: mostly access permissions */
3256      { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3257        .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
3258        .access = PL1_RW,
3259        .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
3260        .resetvalue = 0,
3261      },
3262      /* per-timer control */
3263      { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3264        .secure = ARM_CP_SECSTATE_NS,
3265        .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3266        .accessfn = gt_ptimer_access,
3267        .fieldoffset = offsetoflow32(CPUARMState,
3268                                     cp15.c14_timer[GTIMER_PHYS].ctl),
3269        .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3270        .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3271      },
3272      { .name = "CNTP_CTL_S",
3273        .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3274        .secure = ARM_CP_SECSTATE_S,
3275        .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3276        .accessfn = gt_ptimer_access,
3277        .fieldoffset = offsetoflow32(CPUARMState,
3278                                     cp15.c14_timer[GTIMER_SEC].ctl),
3279        .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3280      },
3281      { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3282        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3283        .type = ARM_CP_IO, .access = PL0_RW,
3284        .accessfn = gt_ptimer_access,
3285        .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1,
3286        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3287        .resetvalue = 0,
3288        .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3289        .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3290      },
3291      { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3292        .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3293        .accessfn = gt_vtimer_access,
3294        .fieldoffset = offsetoflow32(CPUARMState,
3295                                     cp15.c14_timer[GTIMER_VIRT].ctl),
3296        .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3297        .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3298      },
3299      { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3300        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3301        .type = ARM_CP_IO, .access = PL0_RW,
3302        .accessfn = gt_vtimer_access,
3303        .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1,
3304        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3305        .resetvalue = 0,
3306        .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3307        .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3308      },
3309      /* TimerValue views: a 32 bit downcounting view of the underlying state */
3310      { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3311        .secure = ARM_CP_SECSTATE_NS,
3312        .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3313        .accessfn = gt_ptimer_access,
3314        .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3315      },
3316      { .name = "CNTP_TVAL_S",
3317        .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3318        .secure = ARM_CP_SECSTATE_S,
3319        .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3320        .accessfn = gt_ptimer_access,
3321        .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3322      },
3323      { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3324        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3325        .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3326        .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3327        .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3328      },
3329      { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3330        .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3331        .accessfn = gt_vtimer_access,
3332        .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3333      },
3334      { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3335        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3336        .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3337        .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3338        .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3339      },
3340      /* The counter itself */
3341      { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3342        .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3343        .accessfn = gt_pct_access,
3344        .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3345      },
3346      { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3347        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3348        .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3349        .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3350      },
3351      { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3352        .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3353        .accessfn = gt_vct_access,
3354        .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3355      },
3356      { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3357        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3358        .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3359        .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3360      },
3361      /* Comparison value, indicating when the timer goes off */
3362      { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3363        .secure = ARM_CP_SECSTATE_NS,
3364        .access = PL0_RW,
3365        .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3366        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3367        .accessfn = gt_ptimer_access,
3368        .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3369        .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3370      },
3371      { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3372        .secure = ARM_CP_SECSTATE_S,
3373        .access = PL0_RW,
3374        .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3375        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3376        .accessfn = gt_ptimer_access,
3377        .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3378      },
3379      { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3380        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3381        .access = PL0_RW,
3382        .type = ARM_CP_IO,
3383        .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1,
3384        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3385        .resetvalue = 0, .accessfn = gt_ptimer_access,
3386        .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3387        .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3388      },
3389      { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3390        .access = PL0_RW,
3391        .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3392        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3393        .accessfn = gt_vtimer_access,
3394        .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3395        .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3396      },
3397      { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3398        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3399        .access = PL0_RW,
3400        .type = ARM_CP_IO,
3401        .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1,
3402        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3403        .resetvalue = 0, .accessfn = gt_vtimer_access,
3404        .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3405        .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3406      },
3407      /*
3408       * Secure timer -- this is actually restricted to only EL3
3409       * and configurably Secure-EL1 via the accessfn.
3410       */
3411      { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3412        .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3413        .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3414        .accessfn = gt_stimer_access,
3415        .readfn = gt_sec_tval_read,
3416        .writefn = gt_sec_tval_write,
3417        .resetfn = gt_sec_timer_reset,
3418      },
3419      { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3420        .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3421        .type = ARM_CP_IO, .access = PL1_RW,
3422        .accessfn = gt_stimer_access,
3423        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3424        .resetvalue = 0,
3425        .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3426      },
3427      { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3428        .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3429        .type = ARM_CP_IO, .access = PL1_RW,
3430        .accessfn = gt_stimer_access,
3431        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3432        .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3433      },
3434  };
3435  
3436  /*
3437   * FEAT_ECV adds extra views of CNTVCT_EL0 and CNTPCT_EL0 which
3438   * are "self-synchronizing". For QEMU all sysregs are self-synchronizing,
3439   * so our implementations here are identical to the normal registers.
3440   */
3441  static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
3442      { .name = "CNTVCTSS", .cp = 15, .crm = 14, .opc1 = 9,
3443        .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3444        .accessfn = gt_vct_access,
3445        .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3446      },
3447      { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
3448        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
3449        .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3450        .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3451      },
3452      { .name = "CNTPCTSS", .cp = 15, .crm = 14, .opc1 = 8,
3453        .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3454        .accessfn = gt_pct_access,
3455        .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3456      },
3457      { .name = "CNTPCTSS_EL0", .state = ARM_CP_STATE_AA64,
3458        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 5,
3459        .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3460        .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3461      },
3462  };
3463  
3464  static CPAccessResult gt_cntpoff_access(CPUARMState *env,
3465                                          const ARMCPRegInfo *ri,
3466                                          bool isread)
3467  {
3468      if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) &&
3469          !(env->cp15.scr_el3 & SCR_ECVEN)) {
3470          return CP_ACCESS_TRAP_EL3;
3471      }
3472      return CP_ACCESS_OK;
3473  }
3474  
3475  static void gt_cntpoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
3476                                uint64_t value)
3477  {
3478      ARMCPU *cpu = env_archcpu(env);
3479  
3480      trace_arm_gt_cntpoff_write(value);
3481      raw_write(env, ri, value);
3482      gt_recalc_timer(cpu, GTIMER_PHYS);
3483  }
3484  
3485  static const ARMCPRegInfo gen_timer_cntpoff_reginfo = {
3486      .name = "CNTPOFF_EL2", .state = ARM_CP_STATE_AA64,
3487      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 6,
3488      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3489      .accessfn = gt_cntpoff_access, .writefn = gt_cntpoff_write,
3490      .nv2_redirect_offset = 0x1a8,
3491      .fieldoffset = offsetof(CPUARMState, cp15.cntpoff_el2),
3492  };
3493  #else
3494  
3495  /*
3496   * In user-mode most of the generic timer registers are inaccessible
3497   * however modern kernels (4.12+) allow access to cntvct_el0
3498   */
3499  
3500  static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3501  {
3502      ARMCPU *cpu = env_archcpu(env);
3503  
3504      /*
3505       * Currently we have no support for QEMUTimer in linux-user so we
3506       * can't call gt_get_countervalue(env), instead we directly
3507       * call the lower level functions.
3508       */
3509      return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3510  }
3511  
3512  static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3513      { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3514        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3515        .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3516        .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3517        .resetfn = arm_gt_cntfrq_reset,
3518      },
3519      { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3520        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3521        .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3522        .readfn = gt_virt_cnt_read,
3523      },
3524  };
3525  
3526  /*
3527   * CNTVCTSS_EL0 has the same trap conditions as CNTVCT_EL0, so it also
3528   * is exposed to userspace by Linux.
3529   */
3530  static const ARMCPRegInfo gen_timer_ecv_cp_reginfo[] = {
3531      { .name = "CNTVCTSS_EL0", .state = ARM_CP_STATE_AA64,
3532        .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 6,
3533        .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3534        .readfn = gt_virt_cnt_read,
3535      },
3536  };
3537  
3538  #endif
3539  
3540  static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3541  {
3542      if (arm_feature(env, ARM_FEATURE_LPAE)) {
3543          raw_write(env, ri, value);
3544      } else if (arm_feature(env, ARM_FEATURE_V7)) {
3545          raw_write(env, ri, value & 0xfffff6ff);
3546      } else {
3547          raw_write(env, ri, value & 0xfffff1ff);
3548      }
3549  }
3550  
3551  #ifndef CONFIG_USER_ONLY
3552  /* get_phys_addr() isn't present for user-mode-only targets */
3553  
3554  static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3555                                   bool isread)
3556  {
3557      if (ri->opc2 & 4) {
3558          /*
3559           * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
3560           * Secure EL1 (which can only happen if EL3 is AArch64).
3561           * They are simply UNDEF if executed from NS EL1.
3562           * They function normally from EL2 or EL3.
3563           */
3564          if (arm_current_el(env) == 1) {
3565              if (arm_is_secure_below_el3(env)) {
3566                  if (env->cp15.scr_el3 & SCR_EEL2) {
3567                      return CP_ACCESS_TRAP_EL2;
3568                  }
3569                  return CP_ACCESS_TRAP_EL3;
3570              }
3571              return CP_ACCESS_TRAP_UNCATEGORIZED;
3572          }
3573      }
3574      return CP_ACCESS_OK;
3575  }
3576  
3577  #ifdef CONFIG_TCG
3578  static int par_el1_shareability(GetPhysAddrResult *res)
3579  {
3580      /*
3581       * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
3582       * memory -- see pseudocode PAREncodeShareability().
3583       */
3584      if (((res->cacheattrs.attrs & 0xf0) == 0) ||
3585          res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
3586          return 2;
3587      }
3588      return res->cacheattrs.shareability;
3589  }
3590  
3591  static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3592                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
3593                               ARMSecuritySpace ss)
3594  {
3595      bool ret;
3596      uint64_t par64;
3597      bool format64 = false;
3598      ARMMMUFaultInfo fi = {};
3599      GetPhysAddrResult res = {};
3600  
3601      /*
3602       * I_MXTJT: Granule protection checks are not performed on the final address
3603       * of a successful translation.
3604       */
3605      ret = get_phys_addr_with_space_nogpc(env, value, access_type, mmu_idx, ss,
3606                                           &res, &fi);
3607  
3608      /*
3609       * ATS operations only do S1 or S1+S2 translations, so we never
3610       * have to deal with the ARMCacheAttrs format for S2 only.
3611       */
3612      assert(!res.cacheattrs.is_s2_format);
3613  
3614      if (ret) {
3615          /*
3616           * Some kinds of translation fault must cause exceptions rather
3617           * than being reported in the PAR.
3618           */
3619          int current_el = arm_current_el(env);
3620          int target_el;
3621          uint32_t syn, fsr, fsc;
3622          bool take_exc = false;
3623  
3624          if (fi.s1ptw && current_el == 1
3625              && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3626              /*
3627               * Synchronous stage 2 fault on an access made as part of the
3628               * translation table walk for AT S1E0* or AT S1E1* insn
3629               * executed from NS EL1. If this is a synchronous external abort
3630               * and SCR_EL3.EA == 1, then we take a synchronous external abort
3631               * to EL3. Otherwise the fault is taken as an exception to EL2,
3632               * and HPFAR_EL2 holds the faulting IPA.
3633               */
3634              if (fi.type == ARMFault_SyncExternalOnWalk &&
3635                  (env->cp15.scr_el3 & SCR_EA)) {
3636                  target_el = 3;
3637              } else {
3638                  env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3639                  if (arm_is_secure_below_el3(env) && fi.s1ns) {
3640                      env->cp15.hpfar_el2 |= HPFAR_NS;
3641                  }
3642                  target_el = 2;
3643              }
3644              take_exc = true;
3645          } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3646              /*
3647               * Synchronous external aborts during a translation table walk
3648               * are taken as Data Abort exceptions.
3649               */
3650              if (fi.stage2) {
3651                  if (current_el == 3) {
3652                      target_el = 3;
3653                  } else {
3654                      target_el = 2;
3655                  }
3656              } else {
3657                  target_el = exception_target_el(env);
3658              }
3659              take_exc = true;
3660          }
3661  
3662          if (take_exc) {
3663              /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3664              if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3665                  arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3666                  fsr = arm_fi_to_lfsc(&fi);
3667                  fsc = extract32(fsr, 0, 6);
3668              } else {
3669                  fsr = arm_fi_to_sfsc(&fi);
3670                  fsc = 0x3f;
3671              }
3672              /*
3673               * Report exception with ESR indicating a fault due to a
3674               * translation table walk for a cache maintenance instruction.
3675               */
3676              syn = syn_data_abort_no_iss(current_el == target_el, 0,
3677                                          fi.ea, 1, fi.s1ptw, 1, fsc);
3678              env->exception.vaddress = value;
3679              env->exception.fsr = fsr;
3680              raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3681          }
3682      }
3683  
3684      if (is_a64(env)) {
3685          format64 = true;
3686      } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3687          /*
3688           * ATS1Cxx:
3689           * * TTBCR.EAE determines whether the result is returned using the
3690           *   32-bit or the 64-bit PAR format
3691           * * Instructions executed in Hyp mode always use the 64bit format
3692           *
3693           * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3694           * * The Non-secure TTBCR.EAE bit is set to 1
3695           * * The implementation includes EL2, and the value of HCR.VM is 1
3696           *
3697           * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3698           *
3699           * ATS1Hx always uses the 64bit format.
3700           */
3701          format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3702  
3703          if (arm_feature(env, ARM_FEATURE_EL2)) {
3704              if (mmu_idx == ARMMMUIdx_E10_0 ||
3705                  mmu_idx == ARMMMUIdx_E10_1 ||
3706                  mmu_idx == ARMMMUIdx_E10_1_PAN) {
3707                  format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3708              } else {
3709                  format64 |= arm_current_el(env) == 2;
3710              }
3711          }
3712      }
3713  
3714      if (format64) {
3715          /* Create a 64-bit PAR */
3716          par64 = (1 << 11); /* LPAE bit always set */
3717          if (!ret) {
3718              par64 |= res.f.phys_addr & ~0xfffULL;
3719              if (!res.f.attrs.secure) {
3720                  par64 |= (1 << 9); /* NS */
3721              }
3722              par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
3723              par64 |= par_el1_shareability(&res) << 7; /* SH */
3724          } else {
3725              uint32_t fsr = arm_fi_to_lfsc(&fi);
3726  
3727              par64 |= 1; /* F */
3728              par64 |= (fsr & 0x3f) << 1; /* FS */
3729              if (fi.stage2) {
3730                  par64 |= (1 << 9); /* S */
3731              }
3732              if (fi.s1ptw) {
3733                  par64 |= (1 << 8); /* PTW */
3734              }
3735          }
3736      } else {
3737          /*
3738           * fsr is a DFSR/IFSR value for the short descriptor
3739           * translation table format (with WnR always clear).
3740           * Convert it to a 32-bit PAR.
3741           */
3742          if (!ret) {
3743              /* We do not set any attribute bits in the PAR */
3744              if (res.f.lg_page_size == 24
3745                  && arm_feature(env, ARM_FEATURE_V7)) {
3746                  par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
3747              } else {
3748                  par64 = res.f.phys_addr & 0xfffff000;
3749              }
3750              if (!res.f.attrs.secure) {
3751                  par64 |= (1 << 9); /* NS */
3752              }
3753          } else {
3754              uint32_t fsr = arm_fi_to_sfsc(&fi);
3755  
3756              par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3757                      ((fsr & 0xf) << 1) | 1;
3758          }
3759      }
3760      return par64;
3761  }
3762  #endif /* CONFIG_TCG */
3763  
3764  static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3765  {
3766  #ifdef CONFIG_TCG
3767      MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3768      uint64_t par64;
3769      ARMMMUIdx mmu_idx;
3770      int el = arm_current_el(env);
3771      ARMSecuritySpace ss = arm_security_space(env);
3772  
3773      switch (ri->opc2 & 6) {
3774      case 0:
3775          /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3776          switch (el) {
3777          case 3:
3778              mmu_idx = ARMMMUIdx_E3;
3779              break;
3780          case 2:
3781              g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3782              /* fall through */
3783          case 1:
3784              if (ri->crm == 9 && arm_pan_enabled(env)) {
3785                  mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
3786              } else {
3787                  mmu_idx = ARMMMUIdx_Stage1_E1;
3788              }
3789              break;
3790          default:
3791              g_assert_not_reached();
3792          }
3793          break;
3794      case 2:
3795          /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3796          switch (el) {
3797          case 3:
3798              mmu_idx = ARMMMUIdx_E10_0;
3799              break;
3800          case 2:
3801              g_assert(ss != ARMSS_Secure);  /* ARMv8.4-SecEL2 is 64-bit only */
3802              mmu_idx = ARMMMUIdx_Stage1_E0;
3803              break;
3804          case 1:
3805              mmu_idx = ARMMMUIdx_Stage1_E0;
3806              break;
3807          default:
3808              g_assert_not_reached();
3809          }
3810          break;
3811      case 4:
3812          /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3813          mmu_idx = ARMMMUIdx_E10_1;
3814          ss = ARMSS_NonSecure;
3815          break;
3816      case 6:
3817          /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3818          mmu_idx = ARMMMUIdx_E10_0;
3819          ss = ARMSS_NonSecure;
3820          break;
3821      default:
3822          g_assert_not_reached();
3823      }
3824  
3825      par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
3826  
3827      A32_BANKED_CURRENT_REG_SET(env, par, par64);
3828  #else
3829      /* Handled by hardware accelerator. */
3830      g_assert_not_reached();
3831  #endif /* CONFIG_TCG */
3832  }
3833  
3834  static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3835                          uint64_t value)
3836  {
3837  #ifdef CONFIG_TCG
3838      MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3839      uint64_t par64;
3840  
3841      /* There is no SecureEL2 for AArch32. */
3842      par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
3843                           ARMSS_NonSecure);
3844  
3845      A32_BANKED_CURRENT_REG_SET(env, par, par64);
3846  #else
3847      /* Handled by hardware accelerator. */
3848      g_assert_not_reached();
3849  #endif /* CONFIG_TCG */
3850  }
3851  
3852  static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
3853                                       bool isread)
3854  {
3855      /*
3856       * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
3857       * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
3858       * only happen when executing at EL3 because that combination also causes an
3859       * illegal exception return. We don't need to check FEAT_RME either, because
3860       * scr_write() ensures that the NSE bit is not set otherwise.
3861       */
3862      if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
3863          return CP_ACCESS_TRAP;
3864      }
3865      return CP_ACCESS_OK;
3866  }
3867  
3868  static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3869                                       bool isread)
3870  {
3871      if (arm_current_el(env) == 3 &&
3872          !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
3873          return CP_ACCESS_TRAP;
3874      }
3875      return at_e012_access(env, ri, isread);
3876  }
3877  
3878  static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
3879                                        bool isread)
3880  {
3881      if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
3882          return CP_ACCESS_TRAP_EL2;
3883      }
3884      return at_e012_access(env, ri, isread);
3885  }
3886  
3887  static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3888                          uint64_t value)
3889  {
3890  #ifdef CONFIG_TCG
3891      MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3892      ARMMMUIdx mmu_idx;
3893      uint64_t hcr_el2 = arm_hcr_el2_eff(env);
3894      bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
3895      bool for_el3 = false;
3896      ARMSecuritySpace ss;
3897  
3898      switch (ri->opc2 & 6) {
3899      case 0:
3900          switch (ri->opc1) {
3901          case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3902              if (ri->crm == 9 && arm_pan_enabled(env)) {
3903                  mmu_idx = regime_e20 ?
3904                            ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
3905              } else {
3906                  mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
3907              }
3908              break;
3909          case 4: /* AT S1E2R, AT S1E2W */
3910              mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
3911              break;
3912          case 6: /* AT S1E3R, AT S1E3W */
3913              mmu_idx = ARMMMUIdx_E3;
3914              for_el3 = true;
3915              break;
3916          default:
3917              g_assert_not_reached();
3918          }
3919          break;
3920      case 2: /* AT S1E0R, AT S1E0W */
3921          mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
3922          break;
3923      case 4: /* AT S12E1R, AT S12E1W */
3924          mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
3925          break;
3926      case 6: /* AT S12E0R, AT S12E0W */
3927          mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
3928          break;
3929      default:
3930          g_assert_not_reached();
3931      }
3932  
3933      ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
3934      env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
3935  #else
3936      /* Handled by hardware accelerator. */
3937      g_assert_not_reached();
3938  #endif /* CONFIG_TCG */
3939  }
3940  #endif
3941  
3942  /* Return basic MPU access permission bits.  */
3943  static uint32_t simple_mpu_ap_bits(uint32_t val)
3944  {
3945      uint32_t ret;
3946      uint32_t mask;
3947      int i;
3948      ret = 0;
3949      mask = 3;
3950      for (i = 0; i < 16; i += 2) {
3951          ret |= (val >> i) & mask;
3952          mask <<= 2;
3953      }
3954      return ret;
3955  }
3956  
3957  /* Pad basic MPU access permission bits to extended format.  */
3958  static uint32_t extended_mpu_ap_bits(uint32_t val)
3959  {
3960      uint32_t ret;
3961      uint32_t mask;
3962      int i;
3963      ret = 0;
3964      mask = 3;
3965      for (i = 0; i < 16; i += 2) {
3966          ret |= (val & mask) << i;
3967          mask <<= 2;
3968      }
3969      return ret;
3970  }
3971  
3972  static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3973                                   uint64_t value)
3974  {
3975      env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3976  }
3977  
3978  static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3979  {
3980      return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3981  }
3982  
3983  static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3984                                   uint64_t value)
3985  {
3986      env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3987  }
3988  
3989  static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3990  {
3991      return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3992  }
3993  
3994  static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3995  {
3996      uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3997  
3998      if (!u32p) {
3999          return 0;
4000      }
4001  
4002      u32p += env->pmsav7.rnr[M_REG_NS];
4003      return *u32p;
4004  }
4005  
4006  static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
4007                           uint64_t value)
4008  {
4009      ARMCPU *cpu = env_archcpu(env);
4010      uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
4011  
4012      if (!u32p) {
4013          return;
4014      }
4015  
4016      u32p += env->pmsav7.rnr[M_REG_NS];
4017      tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4018      *u32p = value;
4019  }
4020  
4021  static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4022                                uint64_t value)
4023  {
4024      ARMCPU *cpu = env_archcpu(env);
4025      uint32_t nrgs = cpu->pmsav7_dregion;
4026  
4027      if (value >= nrgs) {
4028          qemu_log_mask(LOG_GUEST_ERROR,
4029                        "PMSAv7 RGNR write >= # supported regions, %" PRIu32
4030                        " > %" PRIu32 "\n", (uint32_t)value, nrgs);
4031          return;
4032      }
4033  
4034      raw_write(env, ri, value);
4035  }
4036  
4037  static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4038                            uint64_t value)
4039  {
4040      ARMCPU *cpu = env_archcpu(env);
4041  
4042      tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4043      env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
4044  }
4045  
4046  static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
4047  {
4048      return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
4049  }
4050  
4051  static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4052                            uint64_t value)
4053  {
4054      ARMCPU *cpu = env_archcpu(env);
4055  
4056      tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4057      env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value;
4058  }
4059  
4060  static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
4061  {
4062      return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]];
4063  }
4064  
4065  static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4066                             uint64_t value)
4067  {
4068      ARMCPU *cpu = env_archcpu(env);
4069  
4070      /*
4071       * Ignore writes that would select not implemented region.
4072       * This is architecturally UNPREDICTABLE.
4073       */
4074      if (value >= cpu->pmsav7_dregion) {
4075          return;
4076      }
4077  
4078      env->pmsav7.rnr[M_REG_NS] = value;
4079  }
4080  
4081  static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4082                            uint64_t value)
4083  {
4084      ARMCPU *cpu = env_archcpu(env);
4085  
4086      tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4087      env->pmsav8.hprbar[env->pmsav8.hprselr] = value;
4088  }
4089  
4090  static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri)
4091  {
4092      return env->pmsav8.hprbar[env->pmsav8.hprselr];
4093  }
4094  
4095  static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4096                            uint64_t value)
4097  {
4098      ARMCPU *cpu = env_archcpu(env);
4099  
4100      tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4101      env->pmsav8.hprlar[env->pmsav8.hprselr] = value;
4102  }
4103  
4104  static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri)
4105  {
4106      return env->pmsav8.hprlar[env->pmsav8.hprselr];
4107  }
4108  
4109  static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4110                            uint64_t value)
4111  {
4112      uint32_t n;
4113      uint32_t bit;
4114      ARMCPU *cpu = env_archcpu(env);
4115  
4116      /* Ignore writes to unimplemented regions */
4117      int rmax = MIN(cpu->pmsav8r_hdregion, 32);
4118      value &= MAKE_64BIT_MASK(0, rmax);
4119  
4120      tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4121  
4122      /* Register alias is only valid for first 32 indexes */
4123      for (n = 0; n < rmax; ++n) {
4124          bit = extract32(value, n, 1);
4125          env->pmsav8.hprlar[n] = deposit32(
4126                      env->pmsav8.hprlar[n], 0, 1, bit);
4127      }
4128  }
4129  
4130  static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4131  {
4132      uint32_t n;
4133      uint32_t result = 0x0;
4134      ARMCPU *cpu = env_archcpu(env);
4135  
4136      /* Register alias is only valid for first 32 indexes */
4137      for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) {
4138          if (env->pmsav8.hprlar[n] & 0x1) {
4139              result |= (0x1 << n);
4140          }
4141      }
4142      return result;
4143  }
4144  
4145  static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4146                             uint64_t value)
4147  {
4148      ARMCPU *cpu = env_archcpu(env);
4149  
4150      /*
4151       * Ignore writes that would select not implemented region.
4152       * This is architecturally UNPREDICTABLE.
4153       */
4154      if (value >= cpu->pmsav8r_hdregion) {
4155          return;
4156      }
4157  
4158      env->pmsav8.hprselr = value;
4159  }
4160  
4161  static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri,
4162                            uint64_t value)
4163  {
4164      ARMCPU *cpu = env_archcpu(env);
4165      uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
4166                      (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
4167  
4168      tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
4169  
4170      if (ri->opc1 & 4) {
4171          if (index >= cpu->pmsav8r_hdregion) {
4172              return;
4173          }
4174          if (ri->opc2 & 0x1) {
4175              env->pmsav8.hprlar[index] = value;
4176          } else {
4177              env->pmsav8.hprbar[index] = value;
4178          }
4179      } else {
4180          if (index >= cpu->pmsav7_dregion) {
4181              return;
4182          }
4183          if (ri->opc2 & 0x1) {
4184              env->pmsav8.rlar[M_REG_NS][index] = value;
4185          } else {
4186              env->pmsav8.rbar[M_REG_NS][index] = value;
4187          }
4188      }
4189  }
4190  
4191  static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri)
4192  {
4193      ARMCPU *cpu = env_archcpu(env);
4194      uint8_t index = (extract32(ri->opc0, 0, 1) << 4) |
4195                      (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1);
4196  
4197      if (ri->opc1 & 4) {
4198          if (index >= cpu->pmsav8r_hdregion) {
4199              return 0x0;
4200          }
4201          if (ri->opc2 & 0x1) {
4202              return env->pmsav8.hprlar[index];
4203          } else {
4204              return env->pmsav8.hprbar[index];
4205          }
4206      } else {
4207          if (index >= cpu->pmsav7_dregion) {
4208              return 0x0;
4209          }
4210          if (ri->opc2 & 0x1) {
4211              return env->pmsav8.rlar[M_REG_NS][index];
4212          } else {
4213              return env->pmsav8.rbar[M_REG_NS][index];
4214          }
4215      }
4216  }
4217  
4218  static const ARMCPRegInfo pmsav8r_cp_reginfo[] = {
4219      { .name = "PRBAR",
4220        .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0,
4221        .access = PL1_RW, .type = ARM_CP_NO_RAW,
4222        .accessfn = access_tvm_trvm,
4223        .readfn = prbar_read, .writefn = prbar_write },
4224      { .name = "PRLAR",
4225        .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1,
4226        .access = PL1_RW, .type = ARM_CP_NO_RAW,
4227        .accessfn = access_tvm_trvm,
4228        .readfn = prlar_read, .writefn = prlar_write },
4229      { .name = "PRSELR", .resetvalue = 0,
4230        .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1,
4231        .access = PL1_RW, .accessfn = access_tvm_trvm,
4232        .writefn = prselr_write,
4233        .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) },
4234      { .name = "HPRBAR", .resetvalue = 0,
4235        .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0,
4236        .access = PL2_RW, .type = ARM_CP_NO_RAW,
4237        .readfn = hprbar_read, .writefn = hprbar_write },
4238      { .name = "HPRLAR",
4239        .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1,
4240        .access = PL2_RW, .type = ARM_CP_NO_RAW,
4241        .readfn = hprlar_read, .writefn = hprlar_write },
4242      { .name = "HPRSELR", .resetvalue = 0,
4243        .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1,
4244        .access = PL2_RW,
4245        .writefn = hprselr_write,
4246        .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) },
4247      { .name = "HPRENR",
4248        .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1,
4249        .access = PL2_RW, .type = ARM_CP_NO_RAW,
4250        .readfn = hprenr_read, .writefn = hprenr_write },
4251  };
4252  
4253  static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
4254      /*
4255       * Reset for all these registers is handled in arm_cpu_reset(),
4256       * because the PMSAv7 is also used by M-profile CPUs, which do
4257       * not register cpregs but still need the state to be reset.
4258       */
4259      { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
4260        .access = PL1_RW, .type = ARM_CP_NO_RAW,
4261        .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
4262        .readfn = pmsav7_read, .writefn = pmsav7_write,
4263        .resetfn = arm_cp_reset_ignore },
4264      { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
4265        .access = PL1_RW, .type = ARM_CP_NO_RAW,
4266        .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
4267        .readfn = pmsav7_read, .writefn = pmsav7_write,
4268        .resetfn = arm_cp_reset_ignore },
4269      { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
4270        .access = PL1_RW, .type = ARM_CP_NO_RAW,
4271        .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
4272        .readfn = pmsav7_read, .writefn = pmsav7_write,
4273        .resetfn = arm_cp_reset_ignore },
4274      { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
4275        .access = PL1_RW,
4276        .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
4277        .writefn = pmsav7_rgnr_write,
4278        .resetfn = arm_cp_reset_ignore },
4279  };
4280  
4281  static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
4282      { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4283        .access = PL1_RW, .type = ARM_CP_ALIAS,
4284        .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4285        .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
4286      { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4287        .access = PL1_RW, .type = ARM_CP_ALIAS,
4288        .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4289        .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
4290      { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
4291        .access = PL1_RW,
4292        .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
4293        .resetvalue = 0, },
4294      { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
4295        .access = PL1_RW,
4296        .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
4297        .resetvalue = 0, },
4298      { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4299        .access = PL1_RW,
4300        .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
4301      { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
4302        .access = PL1_RW,
4303        .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
4304      /* Protection region base and size registers */
4305      { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
4306        .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4307        .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
4308      { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
4309        .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4310        .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
4311      { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
4312        .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4313        .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
4314      { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
4315        .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4316        .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
4317      { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
4318        .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4319        .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
4320      { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
4321        .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4322        .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
4323      { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
4324        .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4325        .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
4326      { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
4327        .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
4328        .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
4329  };
4330  
4331  static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4332                               uint64_t value)
4333  {
4334      ARMCPU *cpu = env_archcpu(env);
4335  
4336      if (!arm_feature(env, ARM_FEATURE_V8)) {
4337          if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
4338              /*
4339               * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
4340               * using Long-descriptor translation table format
4341               */
4342              value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
4343          } else if (arm_feature(env, ARM_FEATURE_EL3)) {
4344              /*
4345               * In an implementation that includes the Security Extensions
4346               * TTBCR has additional fields PD0 [4] and PD1 [5] for
4347               * Short-descriptor translation table format.
4348               */
4349              value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
4350          } else {
4351              value &= TTBCR_N;
4352          }
4353      }
4354  
4355      if (arm_feature(env, ARM_FEATURE_LPAE)) {
4356          /*
4357           * With LPAE the TTBCR could result in a change of ASID
4358           * via the TTBCR.A1 bit, so do a TLB flush.
4359           */
4360          tlb_flush(CPU(cpu));
4361      }
4362      raw_write(env, ri, value);
4363  }
4364  
4365  static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
4366                                 uint64_t value)
4367  {
4368      ARMCPU *cpu = env_archcpu(env);
4369  
4370      /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
4371      tlb_flush(CPU(cpu));
4372      raw_write(env, ri, value);
4373  }
4374  
4375  static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4376                              uint64_t value)
4377  {
4378      /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
4379      if (cpreg_field_is_64bit(ri) &&
4380          extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4381          ARMCPU *cpu = env_archcpu(env);
4382          tlb_flush(CPU(cpu));
4383      }
4384      raw_write(env, ri, value);
4385  }
4386  
4387  static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4388                                      uint64_t value)
4389  {
4390      /*
4391       * If we are running with E2&0 regime, then an ASID is active.
4392       * Flush if that might be changing.  Note we're not checking
4393       * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
4394       * holds the active ASID, only checking the field that might.
4395       */
4396      if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
4397          (arm_hcr_el2_eff(env) & HCR_E2H)) {
4398          uint16_t mask = ARMMMUIdxBit_E20_2 |
4399                          ARMMMUIdxBit_E20_2_PAN |
4400                          ARMMMUIdxBit_E20_0;
4401          tlb_flush_by_mmuidx(env_cpu(env), mask);
4402      }
4403      raw_write(env, ri, value);
4404  }
4405  
4406  static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4407                          uint64_t value)
4408  {
4409      ARMCPU *cpu = env_archcpu(env);
4410      CPUState *cs = CPU(cpu);
4411  
4412      /*
4413       * A change in VMID to the stage2 page table (Stage2) invalidates
4414       * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0).
4415       */
4416      if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
4417          tlb_flush_by_mmuidx(cs, alle1_tlbmask(env));
4418      }
4419      raw_write(env, ri, value);
4420  }
4421  
4422  static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
4423      { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4424        .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4425        .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
4426                               offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
4427      { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4428        .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4429        .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
4430                               offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
4431      { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4432        .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4433        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
4434                               offsetof(CPUARMState, cp15.dfar_ns) } },
4435      { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
4436        .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
4437        .access = PL1_RW, .accessfn = access_tvm_trvm,
4438        .fgt = FGT_FAR_EL1,
4439        .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
4440        .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
4441        .resetvalue = 0, },
4442  };
4443  
4444  static const ARMCPRegInfo vmsa_cp_reginfo[] = {
4445      { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
4446        .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
4447        .access = PL1_RW, .accessfn = access_tvm_trvm,
4448        .fgt = FGT_ESR_EL1,
4449        .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
4450        .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
4451      { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
4452        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
4453        .access = PL1_RW, .accessfn = access_tvm_trvm,
4454        .fgt = FGT_TTBR0_EL1,
4455        .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
4456        .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4457        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4458                               offsetof(CPUARMState, cp15.ttbr0_ns) } },
4459      { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
4460        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
4461        .access = PL1_RW, .accessfn = access_tvm_trvm,
4462        .fgt = FGT_TTBR1_EL1,
4463        .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
4464        .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
4465        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4466                               offsetof(CPUARMState, cp15.ttbr1_ns) } },
4467      { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
4468        .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4469        .access = PL1_RW, .accessfn = access_tvm_trvm,
4470        .fgt = FGT_TCR_EL1,
4471        .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
4472        .writefn = vmsa_tcr_el12_write,
4473        .raw_writefn = raw_write,
4474        .resetvalue = 0,
4475        .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
4476      { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4477        .access = PL1_RW, .accessfn = access_tvm_trvm,
4478        .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
4479        .raw_writefn = raw_write,
4480        .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
4481                               offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
4482  };
4483  
4484  /*
4485   * Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4486   * qemu tlbs nor adjusting cached masks.
4487   */
4488  static const ARMCPRegInfo ttbcr2_reginfo = {
4489      .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
4490      .access = PL1_RW, .accessfn = access_tvm_trvm,
4491      .type = ARM_CP_ALIAS,
4492      .bank_fieldoffsets = {
4493          offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
4494          offsetofhigh32(CPUARMState, cp15.tcr_el[1]),
4495      },
4496  };
4497  
4498  static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
4499                                  uint64_t value)
4500  {
4501      env->cp15.c15_ticonfig = value & 0xe7;
4502      /* The OS_TYPE bit in this register changes the reported CPUID! */
4503      env->cp15.c0_cpuid = (value & (1 << 5)) ?
4504          ARM_CPUID_TI915T : ARM_CPUID_TI925T;
4505  }
4506  
4507  static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
4508                                  uint64_t value)
4509  {
4510      env->cp15.c15_threadid = value & 0xffff;
4511  }
4512  
4513  static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
4514                             uint64_t value)
4515  {
4516      /* Wait-for-interrupt (deprecated) */
4517      cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
4518  }
4519  
4520  static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
4521                                    uint64_t value)
4522  {
4523      /*
4524       * On OMAP there are registers indicating the max/min index of dcache lines
4525       * containing a dirty line; cache flush operations have to reset these.
4526       */
4527      env->cp15.c15_i_max = 0x000;
4528      env->cp15.c15_i_min = 0xff0;
4529  }
4530  
4531  static const ARMCPRegInfo omap_cp_reginfo[] = {
4532      { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
4533        .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
4534        .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
4535        .resetvalue = 0, },
4536      { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
4537        .access = PL1_RW, .type = ARM_CP_NOP },
4538      { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
4539        .access = PL1_RW,
4540        .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
4541        .writefn = omap_ticonfig_write },
4542      { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
4543        .access = PL1_RW,
4544        .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
4545      { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
4546        .access = PL1_RW, .resetvalue = 0xff0,
4547        .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
4548      { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
4549        .access = PL1_RW,
4550        .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
4551        .writefn = omap_threadid_write },
4552      { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4553        .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4554        .type = ARM_CP_NO_RAW,
4555        .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
4556      /*
4557       * TODO: Peripheral port remap register:
4558       * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4559       * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4560       * when MMU is off.
4561       */
4562      { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4563        .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
4564        .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
4565        .writefn = omap_cachemaint_write },
4566      { .name = "C9", .cp = 15, .crn = 9,
4567        .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
4568        .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
4569  };
4570  
4571  static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4572                                uint64_t value)
4573  {
4574      env->cp15.c15_cpar = value & 0x3fff;
4575  }
4576  
4577  static const ARMCPRegInfo xscale_cp_reginfo[] = {
4578      { .name = "XSCALE_CPAR",
4579        .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4580        .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
4581        .writefn = xscale_cpar_write, },
4582      { .name = "XSCALE_AUXCR",
4583        .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
4584        .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
4585        .resetvalue = 0, },
4586      /*
4587       * XScale specific cache-lockdown: since we have no cache we NOP these
4588       * and hope the guest does not really rely on cache behaviour.
4589       */
4590      { .name = "XSCALE_LOCK_ICACHE_LINE",
4591        .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
4592        .access = PL1_W, .type = ARM_CP_NOP },
4593      { .name = "XSCALE_UNLOCK_ICACHE",
4594        .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
4595        .access = PL1_W, .type = ARM_CP_NOP },
4596      { .name = "XSCALE_DCACHE_LOCK",
4597        .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
4598        .access = PL1_RW, .type = ARM_CP_NOP },
4599      { .name = "XSCALE_UNLOCK_DCACHE",
4600        .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
4601        .access = PL1_W, .type = ARM_CP_NOP },
4602  };
4603  
4604  static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
4605      /*
4606       * RAZ/WI the whole crn=15 space, when we don't have a more specific
4607       * implementation of this implementation-defined space.
4608       * Ideally this should eventually disappear in favour of actually
4609       * implementing the correct behaviour for all cores.
4610       */
4611      { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4612        .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4613        .access = PL1_RW,
4614        .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
4615        .resetvalue = 0 },
4616  };
4617  
4618  static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
4619      /* Cache status: RAZ because we have no cache so it's always clean */
4620      { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4621        .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4622        .resetvalue = 0 },
4623  };
4624  
4625  static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4626      /* We never have a block transfer operation in progress */
4627      { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4628        .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4629        .resetvalue = 0 },
4630      /* The cache ops themselves: these all NOP for QEMU */
4631      { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4632        .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4633      { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4634        .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4635      { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4636        .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4637      { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4638        .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4639      { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4640        .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4641      { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4642        .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT },
4643  };
4644  
4645  static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4646      /*
4647       * The cache test-and-clean instructions always return (1 << 30)
4648       * to indicate that there are no dirty cache lines.
4649       */
4650      { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4651        .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4652        .resetvalue = (1 << 30) },
4653      { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4654        .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4655        .resetvalue = (1 << 30) },
4656  };
4657  
4658  static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4659      /* Ignore ReadBuffer accesses */
4660      { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4661        .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4662        .access = PL1_RW, .resetvalue = 0,
4663        .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4664  };
4665  
4666  static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4667  {
4668      unsigned int cur_el = arm_current_el(env);
4669  
4670      if (arm_is_el2_enabled(env) && cur_el == 1) {
4671          return env->cp15.vpidr_el2;
4672      }
4673      return raw_read(env, ri);
4674  }
4675  
4676  static uint64_t mpidr_read_val(CPUARMState *env)
4677  {
4678      ARMCPU *cpu = env_archcpu(env);
4679      uint64_t mpidr = cpu->mp_affinity;
4680  
4681      if (arm_feature(env, ARM_FEATURE_V7MP)) {
4682          mpidr |= (1U << 31);
4683          /*
4684           * Cores which are uniprocessor (non-coherent)
4685           * but still implement the MP extensions set
4686           * bit 30. (For instance, Cortex-R5).
4687           */
4688          if (cpu->mp_is_up) {
4689              mpidr |= (1u << 30);
4690          }
4691      }
4692      return mpidr;
4693  }
4694  
4695  static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4696  {
4697      unsigned int cur_el = arm_current_el(env);
4698  
4699      if (arm_is_el2_enabled(env) && cur_el == 1) {
4700          return env->cp15.vmpidr_el2;
4701      }
4702      return mpidr_read_val(env);
4703  }
4704  
4705  static const ARMCPRegInfo lpae_cp_reginfo[] = {
4706      /* NOP AMAIR0/1 */
4707      { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4708        .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4709        .access = PL1_RW, .accessfn = access_tvm_trvm,
4710        .fgt = FGT_AMAIR_EL1,
4711        .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
4712        .type = ARM_CP_CONST, .resetvalue = 0 },
4713      /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4714      { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4715        .access = PL1_RW, .accessfn = access_tvm_trvm,
4716        .type = ARM_CP_CONST, .resetvalue = 0 },
4717      { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4718        .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4719        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4720                               offsetof(CPUARMState, cp15.par_ns)} },
4721      { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4722        .access = PL1_RW, .accessfn = access_tvm_trvm,
4723        .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4724        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4725                               offsetof(CPUARMState, cp15.ttbr0_ns) },
4726        .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4727      { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4728        .access = PL1_RW, .accessfn = access_tvm_trvm,
4729        .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4730        .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4731                               offsetof(CPUARMState, cp15.ttbr1_ns) },
4732        .writefn = vmsa_ttbr_write, .raw_writefn = raw_write },
4733  };
4734  
4735  static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4736  {
4737      return vfp_get_fpcr(env);
4738  }
4739  
4740  static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4741                              uint64_t value)
4742  {
4743      vfp_set_fpcr(env, value);
4744  }
4745  
4746  static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4747  {
4748      return vfp_get_fpsr(env);
4749  }
4750  
4751  static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4752                              uint64_t value)
4753  {
4754      vfp_set_fpsr(env, value);
4755  }
4756  
4757  static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4758                                         bool isread)
4759  {
4760      if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4761          return CP_ACCESS_TRAP;
4762      }
4763      return CP_ACCESS_OK;
4764  }
4765  
4766  static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4767                              uint64_t value)
4768  {
4769      env->daif = value & PSTATE_DAIF;
4770  }
4771  
4772  static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4773  {
4774      return env->pstate & PSTATE_PAN;
4775  }
4776  
4777  static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4778                             uint64_t value)
4779  {
4780      env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4781  }
4782  
4783  static const ARMCPRegInfo pan_reginfo = {
4784      .name = "PAN", .state = ARM_CP_STATE_AA64,
4785      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4786      .type = ARM_CP_NO_RAW, .access = PL1_RW,
4787      .readfn = aa64_pan_read, .writefn = aa64_pan_write
4788  };
4789  
4790  static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4791  {
4792      return env->pstate & PSTATE_UAO;
4793  }
4794  
4795  static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4796                             uint64_t value)
4797  {
4798      env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4799  }
4800  
4801  static const ARMCPRegInfo uao_reginfo = {
4802      .name = "UAO", .state = ARM_CP_STATE_AA64,
4803      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4804      .type = ARM_CP_NO_RAW, .access = PL1_RW,
4805      .readfn = aa64_uao_read, .writefn = aa64_uao_write
4806  };
4807  
4808  static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri)
4809  {
4810      return env->pstate & PSTATE_DIT;
4811  }
4812  
4813  static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri,
4814                             uint64_t value)
4815  {
4816      env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT);
4817  }
4818  
4819  static const ARMCPRegInfo dit_reginfo = {
4820      .name = "DIT", .state = ARM_CP_STATE_AA64,
4821      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5,
4822      .type = ARM_CP_NO_RAW, .access = PL0_RW,
4823      .readfn = aa64_dit_read, .writefn = aa64_dit_write
4824  };
4825  
4826  static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri)
4827  {
4828      return env->pstate & PSTATE_SSBS;
4829  }
4830  
4831  static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri,
4832                             uint64_t value)
4833  {
4834      env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS);
4835  }
4836  
4837  static const ARMCPRegInfo ssbs_reginfo = {
4838      .name = "SSBS", .state = ARM_CP_STATE_AA64,
4839      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6,
4840      .type = ARM_CP_NO_RAW, .access = PL0_RW,
4841      .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write
4842  };
4843  
4844  static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4845                                                const ARMCPRegInfo *ri,
4846                                                bool isread)
4847  {
4848      /* Cache invalidate/clean to Point of Coherency or Persistence...  */
4849      switch (arm_current_el(env)) {
4850      case 0:
4851          /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4852          if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4853              return CP_ACCESS_TRAP;
4854          }
4855          /* fall through */
4856      case 1:
4857          /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
4858          if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4859              return CP_ACCESS_TRAP_EL2;
4860          }
4861          break;
4862      }
4863      return CP_ACCESS_OK;
4864  }
4865  
4866  static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags)
4867  {
4868      /* Cache invalidate/clean to Point of Unification... */
4869      switch (arm_current_el(env)) {
4870      case 0:
4871          /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4872          if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4873              return CP_ACCESS_TRAP;
4874          }
4875          /* fall through */
4876      case 1:
4877          /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set.  */
4878          if (arm_hcr_el2_eff(env) & hcrflags) {
4879              return CP_ACCESS_TRAP_EL2;
4880          }
4881          break;
4882      }
4883      return CP_ACCESS_OK;
4884  }
4885  
4886  static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri,
4887                                     bool isread)
4888  {
4889      return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU);
4890  }
4891  
4892  static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri,
4893                                    bool isread)
4894  {
4895      return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU);
4896  }
4897  
4898  /*
4899   * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4900   * Page D4-1736 (DDI0487A.b)
4901   */
4902  
4903  static int vae1_tlbmask(CPUARMState *env)
4904  {
4905      uint64_t hcr = arm_hcr_el2_eff(env);
4906      uint16_t mask;
4907  
4908      if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4909          mask = ARMMMUIdxBit_E20_2 |
4910                 ARMMMUIdxBit_E20_2_PAN |
4911                 ARMMMUIdxBit_E20_0;
4912      } else {
4913          mask = ARMMMUIdxBit_E10_1 |
4914                 ARMMMUIdxBit_E10_1_PAN |
4915                 ARMMMUIdxBit_E10_0;
4916      }
4917      return mask;
4918  }
4919  
4920  static int vae2_tlbmask(CPUARMState *env)
4921  {
4922      uint64_t hcr = arm_hcr_el2_eff(env);
4923      uint16_t mask;
4924  
4925      if (hcr & HCR_E2H) {
4926          mask = ARMMMUIdxBit_E20_2 |
4927                 ARMMMUIdxBit_E20_2_PAN |
4928                 ARMMMUIdxBit_E20_0;
4929      } else {
4930          mask = ARMMMUIdxBit_E2;
4931      }
4932      return mask;
4933  }
4934  
4935  /* Return 56 if TBI is enabled, 64 otherwise. */
4936  static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4937                                uint64_t addr)
4938  {
4939      uint64_t tcr = regime_tcr(env, mmu_idx);
4940      int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4941      int select = extract64(addr, 55, 1);
4942  
4943      return (tbi >> select) & 1 ? 56 : 64;
4944  }
4945  
4946  static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4947  {
4948      uint64_t hcr = arm_hcr_el2_eff(env);
4949      ARMMMUIdx mmu_idx;
4950  
4951      /* Only the regime of the mmu_idx below is significant. */
4952      if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4953          mmu_idx = ARMMMUIdx_E20_0;
4954      } else {
4955          mmu_idx = ARMMMUIdx_E10_0;
4956      }
4957  
4958      return tlbbits_for_regime(env, mmu_idx, addr);
4959  }
4960  
4961  static int vae2_tlbbits(CPUARMState *env, uint64_t addr)
4962  {
4963      uint64_t hcr = arm_hcr_el2_eff(env);
4964      ARMMMUIdx mmu_idx;
4965  
4966      /*
4967       * Only the regime of the mmu_idx below is significant.
4968       * Regime EL2&0 has two ranges with separate TBI configuration, while EL2
4969       * only has one.
4970       */
4971      if (hcr & HCR_E2H) {
4972          mmu_idx = ARMMMUIdx_E20_2;
4973      } else {
4974          mmu_idx = ARMMMUIdx_E2;
4975      }
4976  
4977      return tlbbits_for_regime(env, mmu_idx, addr);
4978  }
4979  
4980  static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4981                                        uint64_t value)
4982  {
4983      CPUState *cs = env_cpu(env);
4984      int mask = vae1_tlbmask(env);
4985  
4986      tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4987  }
4988  
4989  static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4990                                      uint64_t value)
4991  {
4992      CPUState *cs = env_cpu(env);
4993      int mask = vae1_tlbmask(env);
4994  
4995      if (tlb_force_broadcast(env)) {
4996          tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4997      } else {
4998          tlb_flush_by_mmuidx(cs, mask);
4999      }
5000  }
5001  
5002  static int e2_tlbmask(CPUARMState *env)
5003  {
5004      return (ARMMMUIdxBit_E20_0 |
5005              ARMMMUIdxBit_E20_2 |
5006              ARMMMUIdxBit_E20_2_PAN |
5007              ARMMMUIdxBit_E2);
5008  }
5009  
5010  static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5011                                    uint64_t value)
5012  {
5013      CPUState *cs = env_cpu(env);
5014      int mask = alle1_tlbmask(env);
5015  
5016      tlb_flush_by_mmuidx(cs, mask);
5017  }
5018  
5019  static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5020                                    uint64_t value)
5021  {
5022      CPUState *cs = env_cpu(env);
5023      int mask = e2_tlbmask(env);
5024  
5025      tlb_flush_by_mmuidx(cs, mask);
5026  }
5027  
5028  static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
5029                                    uint64_t value)
5030  {
5031      ARMCPU *cpu = env_archcpu(env);
5032      CPUState *cs = CPU(cpu);
5033  
5034      tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
5035  }
5036  
5037  static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5038                                      uint64_t value)
5039  {
5040      CPUState *cs = env_cpu(env);
5041      int mask = alle1_tlbmask(env);
5042  
5043      tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
5044  }
5045  
5046  static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5047                                      uint64_t value)
5048  {
5049      CPUState *cs = env_cpu(env);
5050      int mask = e2_tlbmask(env);
5051  
5052      tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
5053  }
5054  
5055  static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5056                                      uint64_t value)
5057  {
5058      CPUState *cs = env_cpu(env);
5059  
5060      tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
5061  }
5062  
5063  static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5064                                   uint64_t value)
5065  {
5066      /*
5067       * Invalidate by VA, EL2
5068       * Currently handles both VAE2 and VALE2, since we don't support
5069       * flush-last-level-only.
5070       */
5071      CPUState *cs = env_cpu(env);
5072      int mask = vae2_tlbmask(env);
5073      uint64_t pageaddr = sextract64(value << 12, 0, 56);
5074      int bits = vae2_tlbbits(env, pageaddr);
5075  
5076      tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
5077  }
5078  
5079  static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
5080                                   uint64_t value)
5081  {
5082      /*
5083       * Invalidate by VA, EL3
5084       * Currently handles both VAE3 and VALE3, since we don't support
5085       * flush-last-level-only.
5086       */
5087      ARMCPU *cpu = env_archcpu(env);
5088      CPUState *cs = CPU(cpu);
5089      uint64_t pageaddr = sextract64(value << 12, 0, 56);
5090  
5091      tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
5092  }
5093  
5094  static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5095                                     uint64_t value)
5096  {
5097      CPUState *cs = env_cpu(env);
5098      int mask = vae1_tlbmask(env);
5099      uint64_t pageaddr = sextract64(value << 12, 0, 56);
5100      int bits = vae1_tlbbits(env, pageaddr);
5101  
5102      tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
5103  }
5104  
5105  static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5106                                   uint64_t value)
5107  {
5108      /*
5109       * Invalidate by VA, EL1&0 (AArch64 version).
5110       * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
5111       * since we don't support flush-for-specific-ASID-only or
5112       * flush-last-level-only.
5113       */
5114      CPUState *cs = env_cpu(env);
5115      int mask = vae1_tlbmask(env);
5116      uint64_t pageaddr = sextract64(value << 12, 0, 56);
5117      int bits = vae1_tlbbits(env, pageaddr);
5118  
5119      if (tlb_force_broadcast(env)) {
5120          tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
5121      } else {
5122          tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
5123      }
5124  }
5125  
5126  static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5127                                     uint64_t value)
5128  {
5129      CPUState *cs = env_cpu(env);
5130      int mask = vae2_tlbmask(env);
5131      uint64_t pageaddr = sextract64(value << 12, 0, 56);
5132      int bits = vae2_tlbbits(env, pageaddr);
5133  
5134      tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
5135  }
5136  
5137  static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5138                                     uint64_t value)
5139  {
5140      CPUState *cs = env_cpu(env);
5141      uint64_t pageaddr = sextract64(value << 12, 0, 56);
5142      int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
5143  
5144      tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
5145                                                    ARMMMUIdxBit_E3, bits);
5146  }
5147  
5148  static int ipas2e1_tlbmask(CPUARMState *env, int64_t value)
5149  {
5150      /*
5151       * The MSB of value is the NS field, which only applies if SEL2
5152       * is implemented and SCR_EL3.NS is not set (i.e. in secure mode).
5153       */
5154      return (value >= 0
5155              && cpu_isar_feature(aa64_sel2, env_archcpu(env))
5156              && arm_is_secure_below_el3(env)
5157              ? ARMMMUIdxBit_Stage2_S
5158              : ARMMMUIdxBit_Stage2);
5159  }
5160  
5161  static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5162                                      uint64_t value)
5163  {
5164      CPUState *cs = env_cpu(env);
5165      int mask = ipas2e1_tlbmask(env, value);
5166      uint64_t pageaddr = sextract64(value << 12, 0, 56);
5167  
5168      if (tlb_force_broadcast(env)) {
5169          tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
5170      } else {
5171          tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
5172      }
5173  }
5174  
5175  static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
5176                                        uint64_t value)
5177  {
5178      CPUState *cs = env_cpu(env);
5179      int mask = ipas2e1_tlbmask(env, value);
5180      uint64_t pageaddr = sextract64(value << 12, 0, 56);
5181  
5182      tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask);
5183  }
5184  
5185  #ifdef TARGET_AARCH64
5186  typedef struct {
5187      uint64_t base;
5188      uint64_t length;
5189  } TLBIRange;
5190  
5191  static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg)
5192  {
5193      /*
5194       * Note that the TLBI range TG field encoding differs from both
5195       * TG0 and TG1 encodings.
5196       */
5197      switch (tg) {
5198      case 1:
5199          return Gran4K;
5200      case 2:
5201          return Gran16K;
5202      case 3:
5203          return Gran64K;
5204      default:
5205          return GranInvalid;
5206      }
5207  }
5208  
5209  static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx,
5210                                       uint64_t value)
5211  {
5212      unsigned int page_size_granule, page_shift, num, scale, exponent;
5213      /* Extract one bit to represent the va selector in use. */
5214      uint64_t select = sextract64(value, 36, 1);
5215      ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false);
5216      TLBIRange ret = { };
5217      ARMGranuleSize gran;
5218  
5219      page_size_granule = extract64(value, 46, 2);
5220      gran = tlbi_range_tg_to_gran_size(page_size_granule);
5221  
5222      /* The granule encoded in value must match the granule in use. */
5223      if (gran != param.gran) {
5224          qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n",
5225                        page_size_granule);
5226          return ret;
5227      }
5228  
5229      page_shift = arm_granule_bits(gran);
5230      num = extract64(value, 39, 5);
5231      scale = extract64(value, 44, 2);
5232      exponent = (5 * scale) + 1;
5233  
5234      ret.length = (num + 1) << (exponent + page_shift);
5235  
5236      if (param.select) {
5237          ret.base = sextract64(value, 0, 37);
5238      } else {
5239          ret.base = extract64(value, 0, 37);
5240      }
5241      if (param.ds) {
5242          /*
5243           * With DS=1, BaseADDR is always shifted 16 so that it is able
5244           * to address all 52 va bits.  The input address is perforce
5245           * aligned on a 64k boundary regardless of translation granule.
5246           */
5247          page_shift = 16;
5248      }
5249      ret.base <<= page_shift;
5250  
5251      return ret;
5252  }
5253  
5254  static void do_rvae_write(CPUARMState *env, uint64_t value,
5255                            int idxmap, bool synced)
5256  {
5257      ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap);
5258      TLBIRange range;
5259      int bits;
5260  
5261      range = tlbi_aa64_get_range(env, one_idx, value);
5262      bits = tlbbits_for_regime(env, one_idx, range.base);
5263  
5264      if (synced) {
5265          tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env),
5266                                                    range.base,
5267                                                    range.length,
5268                                                    idxmap,
5269                                                    bits);
5270      } else {
5271          tlb_flush_range_by_mmuidx(env_cpu(env), range.base,
5272                                    range.length, idxmap, bits);
5273      }
5274  }
5275  
5276  static void tlbi_aa64_rvae1_write(CPUARMState *env,
5277                                    const ARMCPRegInfo *ri,
5278                                    uint64_t value)
5279  {
5280      /*
5281       * Invalidate by VA range, EL1&0.
5282       * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1,
5283       * since we don't support flush-for-specific-ASID-only or
5284       * flush-last-level-only.
5285       */
5286  
5287      do_rvae_write(env, value, vae1_tlbmask(env),
5288                    tlb_force_broadcast(env));
5289  }
5290  
5291  static void tlbi_aa64_rvae1is_write(CPUARMState *env,
5292                                      const ARMCPRegInfo *ri,
5293                                      uint64_t value)
5294  {
5295      /*
5296       * Invalidate by VA range, Inner/Outer Shareable EL1&0.
5297       * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS,
5298       * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support
5299       * flush-for-specific-ASID-only, flush-last-level-only or inner/outer
5300       * shareable specific flushes.
5301       */
5302  
5303      do_rvae_write(env, value, vae1_tlbmask(env), true);
5304  }
5305  
5306  static void tlbi_aa64_rvae2_write(CPUARMState *env,
5307                                    const ARMCPRegInfo *ri,
5308                                    uint64_t value)
5309  {
5310      /*
5311       * Invalidate by VA range, EL2.
5312       * Currently handles all of RVAE2 and RVALE2,
5313       * since we don't support flush-for-specific-ASID-only or
5314       * flush-last-level-only.
5315       */
5316  
5317      do_rvae_write(env, value, vae2_tlbmask(env),
5318                    tlb_force_broadcast(env));
5319  
5320  
5321  }
5322  
5323  static void tlbi_aa64_rvae2is_write(CPUARMState *env,
5324                                      const ARMCPRegInfo *ri,
5325                                      uint64_t value)
5326  {
5327      /*
5328       * Invalidate by VA range, Inner/Outer Shareable, EL2.
5329       * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS,
5330       * since we don't support flush-for-specific-ASID-only,
5331       * flush-last-level-only or inner/outer shareable specific flushes.
5332       */
5333  
5334      do_rvae_write(env, value, vae2_tlbmask(env), true);
5335  
5336  }
5337  
5338  static void tlbi_aa64_rvae3_write(CPUARMState *env,
5339                                    const ARMCPRegInfo *ri,
5340                                    uint64_t value)
5341  {
5342      /*
5343       * Invalidate by VA range, EL3.
5344       * Currently handles all of RVAE3 and RVALE3,
5345       * since we don't support flush-for-specific-ASID-only or
5346       * flush-last-level-only.
5347       */
5348  
5349      do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
5350  }
5351  
5352  static void tlbi_aa64_rvae3is_write(CPUARMState *env,
5353                                      const ARMCPRegInfo *ri,
5354                                      uint64_t value)
5355  {
5356      /*
5357       * Invalidate by VA range, EL3, Inner/Outer Shareable.
5358       * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS,
5359       * since we don't support flush-for-specific-ASID-only,
5360       * flush-last-level-only or inner/outer specific flushes.
5361       */
5362  
5363      do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
5364  }
5365  
5366  static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
5367                                       uint64_t value)
5368  {
5369      do_rvae_write(env, value, ipas2e1_tlbmask(env, value),
5370                    tlb_force_broadcast(env));
5371  }
5372  
5373  static void tlbi_aa64_ripas2e1is_write(CPUARMState *env,
5374                                         const ARMCPRegInfo *ri,
5375                                         uint64_t value)
5376  {
5377      do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true);
5378  }
5379  #endif
5380  
5381  static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
5382                                        bool isread)
5383  {
5384      int cur_el = arm_current_el(env);
5385  
5386      if (cur_el < 2) {
5387          uint64_t hcr = arm_hcr_el2_eff(env);
5388  
5389          if (cur_el == 0) {
5390              if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5391                  if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
5392                      return CP_ACCESS_TRAP_EL2;
5393                  }
5394              } else {
5395                  if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
5396                      return CP_ACCESS_TRAP;
5397                  }
5398                  if (hcr & HCR_TDZ) {
5399                      return CP_ACCESS_TRAP_EL2;
5400                  }
5401              }
5402          } else if (hcr & HCR_TDZ) {
5403              return CP_ACCESS_TRAP_EL2;
5404          }
5405      }
5406      return CP_ACCESS_OK;
5407  }
5408  
5409  static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
5410  {
5411      ARMCPU *cpu = env_archcpu(env);
5412      int dzp_bit = 1 << 4;
5413  
5414      /* DZP indicates whether DC ZVA access is allowed */
5415      if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
5416          dzp_bit = 0;
5417      }
5418      return cpu->dcz_blocksize | dzp_bit;
5419  }
5420  
5421  static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5422                                      bool isread)
5423  {
5424      if (!(env->pstate & PSTATE_SP)) {
5425          /*
5426           * Access to SP_EL0 is undefined if it's being used as
5427           * the stack pointer.
5428           */
5429          return CP_ACCESS_TRAP_UNCATEGORIZED;
5430      }
5431      return CP_ACCESS_OK;
5432  }
5433  
5434  static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
5435  {
5436      return env->pstate & PSTATE_SP;
5437  }
5438  
5439  static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
5440  {
5441      update_spsel(env, val);
5442  }
5443  
5444  static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5445                          uint64_t value)
5446  {
5447      ARMCPU *cpu = env_archcpu(env);
5448  
5449      if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
5450          /* M bit is RAZ/WI for PMSA with no MPU implemented */
5451          value &= ~SCTLR_M;
5452      }
5453  
5454      /* ??? Lots of these bits are not implemented.  */
5455  
5456      if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
5457          if (ri->opc1 == 6) { /* SCTLR_EL3 */
5458              value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
5459          } else {
5460              value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
5461                         SCTLR_ATA0 | SCTLR_ATA);
5462          }
5463      }
5464  
5465      if (raw_read(env, ri) == value) {
5466          /*
5467           * Skip the TLB flush if nothing actually changed; Linux likes
5468           * to do a lot of pointless SCTLR writes.
5469           */
5470          return;
5471      }
5472  
5473      raw_write(env, ri, value);
5474  
5475      /* This may enable/disable the MMU, so do a TLB flush.  */
5476      tlb_flush(CPU(cpu));
5477  
5478      if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
5479          /*
5480           * Normally we would always end the TB on an SCTLR write; see the
5481           * comment in ARMCPRegInfo sctlr initialization below for why Xscale
5482           * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
5483           * of hflags from the translator, so do it here.
5484           */
5485          arm_rebuild_hflags(env);
5486      }
5487  }
5488  
5489  static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
5490                             uint64_t value)
5491  {
5492      /*
5493       * Some MDCR_EL3 bits affect whether PMU counters are running:
5494       * if we are trying to change any of those then we must
5495       * bracket this update with PMU start/finish calls.
5496       */
5497      bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
5498  
5499      if (pmu_op) {
5500          pmu_op_start(env);
5501      }
5502      env->cp15.mdcr_el3 = value;
5503      if (pmu_op) {
5504          pmu_op_finish(env);
5505      }
5506  }
5507  
5508  static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5509                         uint64_t value)
5510  {
5511      /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */
5512      mdcr_el3_write(env, ri, value & SDCR_VALID_MASK);
5513  }
5514  
5515  static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5516                             uint64_t value)
5517  {
5518      /*
5519       * Some MDCR_EL2 bits affect whether PMU counters are running:
5520       * if we are trying to change any of those then we must
5521       * bracket this update with PMU start/finish calls.
5522       */
5523      bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
5524  
5525      if (pmu_op) {
5526          pmu_op_start(env);
5527      }
5528      env->cp15.mdcr_el2 = value;
5529      if (pmu_op) {
5530          pmu_op_finish(env);
5531      }
5532  }
5533  
5534  static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
5535                                   bool isread)
5536  {
5537      if (arm_current_el(env) == 1) {
5538          uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
5539  
5540          if (hcr_nv == (HCR_NV | HCR_NV1)) {
5541              return CP_ACCESS_TRAP_EL2;
5542          }
5543      }
5544      return CP_ACCESS_OK;
5545  }
5546  
5547  #ifdef CONFIG_USER_ONLY
5548  /*
5549   * `IC IVAU` is handled to improve compatibility with JITs that dual-map their
5550   * code to get around W^X restrictions, where one region is writable and the
5551   * other is executable.
5552   *
5553   * Since the executable region is never written to we cannot detect code
5554   * changes when running in user mode, and rely on the emulated JIT telling us
5555   * that the code has changed by executing this instruction.
5556   */
5557  static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
5558                            uint64_t value)
5559  {
5560      uint64_t icache_line_mask, start_address, end_address;
5561      const ARMCPU *cpu;
5562  
5563      cpu = env_archcpu(env);
5564  
5565      icache_line_mask = (4 << extract32(cpu->ctr, 0, 4)) - 1;
5566      start_address = value & ~icache_line_mask;
5567      end_address = value | icache_line_mask;
5568  
5569      mmap_lock();
5570  
5571      tb_invalidate_phys_range(start_address, end_address);
5572  
5573      mmap_unlock();
5574  }
5575  #endif
5576  
5577  static const ARMCPRegInfo v8_cp_reginfo[] = {
5578      /*
5579       * Minimal set of EL0-visible registers. This will need to be expanded
5580       * significantly for system emulation of AArch64 CPUs.
5581       */
5582      { .name = "NZCV", .state = ARM_CP_STATE_AA64,
5583        .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
5584        .access = PL0_RW, .type = ARM_CP_NZCV },
5585      { .name = "DAIF", .state = ARM_CP_STATE_AA64,
5586        .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
5587        .type = ARM_CP_NO_RAW,
5588        .access = PL0_RW, .accessfn = aa64_daif_access,
5589        .fieldoffset = offsetof(CPUARMState, daif),
5590        .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
5591      { .name = "FPCR", .state = ARM_CP_STATE_AA64,
5592        .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
5593        .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
5594        .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
5595      { .name = "FPSR", .state = ARM_CP_STATE_AA64,
5596        .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
5597        .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
5598        .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
5599      { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
5600        .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
5601        .access = PL0_R, .type = ARM_CP_NO_RAW,
5602        .fgt = FGT_DCZID_EL0,
5603        .readfn = aa64_dczid_read },
5604      { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
5605        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
5606        .access = PL0_W, .type = ARM_CP_DC_ZVA,
5607  #ifndef CONFIG_USER_ONLY
5608        /* Avoid overhead of an access check that always passes in user-mode */
5609        .accessfn = aa64_zva_access,
5610        .fgt = FGT_DCZVA,
5611  #endif
5612      },
5613      { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
5614        .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
5615        .access = PL1_R, .type = ARM_CP_CURRENTEL },
5616      /*
5617       * Instruction cache ops. All of these except `IC IVAU` NOP because we
5618       * don't emulate caches.
5619       */
5620      { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
5621        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5622        .access = PL1_W, .type = ARM_CP_NOP,
5623        .fgt = FGT_ICIALLUIS,
5624        .accessfn = access_ticab },
5625      { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
5626        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5627        .access = PL1_W, .type = ARM_CP_NOP,
5628        .fgt = FGT_ICIALLU,
5629        .accessfn = access_tocu },
5630      { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
5631        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
5632        .access = PL0_W,
5633        .fgt = FGT_ICIVAU,
5634        .accessfn = access_tocu,
5635  #ifdef CONFIG_USER_ONLY
5636        .type = ARM_CP_NO_RAW,
5637        .writefn = ic_ivau_write
5638  #else
5639        .type = ARM_CP_NOP
5640  #endif
5641      },
5642      /* Cache ops: all NOPs since we don't emulate caches */
5643      { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
5644        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5645        .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
5646        .fgt = FGT_DCIVAC,
5647        .type = ARM_CP_NOP },
5648      { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
5649        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5650        .fgt = FGT_DCISW,
5651        .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5652      { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
5653        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
5654        .access = PL0_W, .type = ARM_CP_NOP,
5655        .fgt = FGT_DCCVAC,
5656        .accessfn = aa64_cacheop_poc_access },
5657      { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
5658        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5659        .fgt = FGT_DCCSW,
5660        .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5661      { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
5662        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
5663        .access = PL0_W, .type = ARM_CP_NOP,
5664        .fgt = FGT_DCCVAU,
5665        .accessfn = access_tocu },
5666      { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
5667        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
5668        .access = PL0_W, .type = ARM_CP_NOP,
5669        .fgt = FGT_DCCIVAC,
5670        .accessfn = aa64_cacheop_poc_access },
5671      { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
5672        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5673        .fgt = FGT_DCCISW,
5674        .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
5675      /* TLBI operations */
5676      { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
5677        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
5678        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5679        .fgt = FGT_TLBIVMALLE1IS,
5680        .writefn = tlbi_aa64_vmalle1is_write },
5681      { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
5682        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
5683        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5684        .fgt = FGT_TLBIVAE1IS,
5685        .writefn = tlbi_aa64_vae1is_write },
5686      { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
5687        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
5688        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5689        .fgt = FGT_TLBIASIDE1IS,
5690        .writefn = tlbi_aa64_vmalle1is_write },
5691      { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
5692        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
5693        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5694        .fgt = FGT_TLBIVAAE1IS,
5695        .writefn = tlbi_aa64_vae1is_write },
5696      { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
5697        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5698        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5699        .fgt = FGT_TLBIVALE1IS,
5700        .writefn = tlbi_aa64_vae1is_write },
5701      { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
5702        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5703        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
5704        .fgt = FGT_TLBIVAALE1IS,
5705        .writefn = tlbi_aa64_vae1is_write },
5706      { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
5707        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
5708        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5709        .fgt = FGT_TLBIVMALLE1,
5710        .writefn = tlbi_aa64_vmalle1_write },
5711      { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
5712        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
5713        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5714        .fgt = FGT_TLBIVAE1,
5715        .writefn = tlbi_aa64_vae1_write },
5716      { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
5717        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
5718        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5719        .fgt = FGT_TLBIASIDE1,
5720        .writefn = tlbi_aa64_vmalle1_write },
5721      { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
5722        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
5723        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5724        .fgt = FGT_TLBIVAAE1,
5725        .writefn = tlbi_aa64_vae1_write },
5726      { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
5727        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5728        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5729        .fgt = FGT_TLBIVALE1,
5730        .writefn = tlbi_aa64_vae1_write },
5731      { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
5732        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5733        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
5734        .fgt = FGT_TLBIVAALE1,
5735        .writefn = tlbi_aa64_vae1_write },
5736      { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
5737        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5738        .access = PL2_W, .type = ARM_CP_NO_RAW,
5739        .writefn = tlbi_aa64_ipas2e1is_write },
5740      { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
5741        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5742        .access = PL2_W, .type = ARM_CP_NO_RAW,
5743        .writefn = tlbi_aa64_ipas2e1is_write },
5744      { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
5745        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5746        .access = PL2_W, .type = ARM_CP_NO_RAW,
5747        .writefn = tlbi_aa64_alle1is_write },
5748      { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
5749        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
5750        .access = PL2_W, .type = ARM_CP_NO_RAW,
5751        .writefn = tlbi_aa64_alle1is_write },
5752      { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
5753        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5754        .access = PL2_W, .type = ARM_CP_NO_RAW,
5755        .writefn = tlbi_aa64_ipas2e1_write },
5756      { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
5757        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5758        .access = PL2_W, .type = ARM_CP_NO_RAW,
5759        .writefn = tlbi_aa64_ipas2e1_write },
5760      { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
5761        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5762        .access = PL2_W, .type = ARM_CP_NO_RAW,
5763        .writefn = tlbi_aa64_alle1_write },
5764      { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
5765        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
5766        .access = PL2_W, .type = ARM_CP_NO_RAW,
5767        .writefn = tlbi_aa64_alle1is_write },
5768  #ifndef CONFIG_USER_ONLY
5769      /* 64 bit address translation operations */
5770      { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
5771        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
5772        .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5773        .fgt = FGT_ATS1E1R,
5774        .accessfn = at_s1e01_access, .writefn = ats_write64 },
5775      { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
5776        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
5777        .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5778        .fgt = FGT_ATS1E1W,
5779        .accessfn = at_s1e01_access, .writefn = ats_write64 },
5780      { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
5781        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
5782        .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5783        .fgt = FGT_ATS1E0R,
5784        .accessfn = at_s1e01_access, .writefn = ats_write64 },
5785      { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
5786        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
5787        .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5788        .fgt = FGT_ATS1E0W,
5789        .accessfn = at_s1e01_access, .writefn = ats_write64 },
5790      { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
5791        .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
5792        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5793        .accessfn = at_e012_access, .writefn = ats_write64 },
5794      { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
5795        .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
5796        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5797        .accessfn = at_e012_access, .writefn = ats_write64 },
5798      { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
5799        .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
5800        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5801        .accessfn = at_e012_access, .writefn = ats_write64 },
5802      { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
5803        .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
5804        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5805        .accessfn = at_e012_access, .writefn = ats_write64 },
5806      /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
5807      { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
5808        .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
5809        .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5810        .writefn = ats_write64 },
5811      { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
5812        .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
5813        .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
5814        .writefn = ats_write64 },
5815      { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
5816        .type = ARM_CP_ALIAS,
5817        .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
5818        .access = PL1_RW, .resetvalue = 0,
5819        .fgt = FGT_PAR_EL1,
5820        .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
5821        .writefn = par_write },
5822  #endif
5823      /* TLB invalidate last level of translation table walk */
5824      { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
5825        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
5826        .writefn = tlbimva_is_write },
5827      { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5828        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis,
5829        .writefn = tlbimvaa_is_write },
5830      { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5831        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5832        .writefn = tlbimva_write },
5833      { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5834        .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5835        .writefn = tlbimvaa_write },
5836      { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5837        .type = ARM_CP_NO_RAW, .access = PL2_W,
5838        .writefn = tlbimva_hyp_write },
5839      { .name = "TLBIMVALHIS",
5840        .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5841        .type = ARM_CP_NO_RAW, .access = PL2_W,
5842        .writefn = tlbimva_hyp_is_write },
5843      { .name = "TLBIIPAS2",
5844        .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5845        .type = ARM_CP_NO_RAW, .access = PL2_W,
5846        .writefn = tlbiipas2_hyp_write },
5847      { .name = "TLBIIPAS2IS",
5848        .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5849        .type = ARM_CP_NO_RAW, .access = PL2_W,
5850        .writefn = tlbiipas2is_hyp_write },
5851      { .name = "TLBIIPAS2L",
5852        .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5853        .type = ARM_CP_NO_RAW, .access = PL2_W,
5854        .writefn = tlbiipas2_hyp_write },
5855      { .name = "TLBIIPAS2LIS",
5856        .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5857        .type = ARM_CP_NO_RAW, .access = PL2_W,
5858        .writefn = tlbiipas2is_hyp_write },
5859      /* 32 bit cache operations */
5860      { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5861        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
5862      { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5863        .type = ARM_CP_NOP, .access = PL1_W },
5864      { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5865        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5866      { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5867        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5868      { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5869        .type = ARM_CP_NOP, .access = PL1_W },
5870      { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5871        .type = ARM_CP_NOP, .access = PL1_W },
5872      { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5873        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5874      { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5875        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5876      { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5877        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5878      { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5879        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5880      { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5881        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu },
5882      { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5883        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5884      { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5885        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5886      /* MMU Domain access control / MPU write buffer control */
5887      { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5888        .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5889        .writefn = dacr_write, .raw_writefn = raw_write,
5890        .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5891                               offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5892      { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5893        .type = ARM_CP_ALIAS,
5894        .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5895        .access = PL1_RW, .accessfn = access_nv1,
5896        .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
5897        .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5898      { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5899        .type = ARM_CP_ALIAS,
5900        .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5901        .access = PL1_RW, .accessfn = access_nv1,
5902        .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
5903        .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5904      /*
5905       * We rely on the access checks not allowing the guest to write to the
5906       * state field when SPSel indicates that it's being used as the stack
5907       * pointer.
5908       */
5909      { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5910        .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5911        .access = PL1_RW, .accessfn = sp_el0_access,
5912        .type = ARM_CP_ALIAS,
5913        .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5914      { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5915        .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5916        .nv2_redirect_offset = 0x240,
5917        .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP,
5918        .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5919      { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5920        .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5921        .type = ARM_CP_NO_RAW,
5922        .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5923      { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5924        .type = ARM_CP_ALIAS,
5925        .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5926        .access = PL2_RW,
5927        .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5928      { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5929        .type = ARM_CP_ALIAS,
5930        .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5931        .access = PL2_RW,
5932        .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5933      { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5934        .type = ARM_CP_ALIAS,
5935        .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5936        .access = PL2_RW,
5937        .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5938      { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5939        .type = ARM_CP_ALIAS,
5940        .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5941        .access = PL2_RW,
5942        .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5943      { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5944        .type = ARM_CP_IO,
5945        .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5946        .resetvalue = 0,
5947        .access = PL3_RW,
5948        .writefn = mdcr_el3_write,
5949        .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5950      { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO,
5951        .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5952        .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5953        .writefn = sdcr_write,
5954        .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5955  };
5956  
5957  /* These are present only when EL1 supports AArch32 */
5958  static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
5959      { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5960        .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5961        .access = PL2_RW,
5962        .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
5963        .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
5964      { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5965        .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5966        .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5967        .writefn = dacr_write, .raw_writefn = raw_write,
5968        .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5969      { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5970        .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5971        .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
5972        .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5973  };
5974  
5975  static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5976  {
5977      ARMCPU *cpu = env_archcpu(env);
5978  
5979      if (arm_feature(env, ARM_FEATURE_V8)) {
5980          valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
5981      } else {
5982          valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
5983      }
5984  
5985      if (arm_feature(env, ARM_FEATURE_EL3)) {
5986          valid_mask &= ~HCR_HCD;
5987      } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5988          /*
5989           * Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5990           * However, if we're using the SMC PSCI conduit then QEMU is
5991           * effectively acting like EL3 firmware and so the guest at
5992           * EL2 should retain the ability to prevent EL1 from being
5993           * able to make SMC calls into the ersatz firmware, so in
5994           * that case HCR.TSC should be read/write.
5995           */
5996          valid_mask &= ~HCR_TSC;
5997      }
5998  
5999      if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6000          if (cpu_isar_feature(aa64_vh, cpu)) {
6001              valid_mask |= HCR_E2H;
6002          }
6003          if (cpu_isar_feature(aa64_ras, cpu)) {
6004              valid_mask |= HCR_TERR | HCR_TEA;
6005          }
6006          if (cpu_isar_feature(aa64_lor, cpu)) {
6007              valid_mask |= HCR_TLOR;
6008          }
6009          if (cpu_isar_feature(aa64_pauth, cpu)) {
6010              valid_mask |= HCR_API | HCR_APK;
6011          }
6012          if (cpu_isar_feature(aa64_mte, cpu)) {
6013              valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
6014          }
6015          if (cpu_isar_feature(aa64_scxtnum, cpu)) {
6016              valid_mask |= HCR_ENSCXT;
6017          }
6018          if (cpu_isar_feature(aa64_fwb, cpu)) {
6019              valid_mask |= HCR_FWB;
6020          }
6021          if (cpu_isar_feature(aa64_rme, cpu)) {
6022              valid_mask |= HCR_GPF;
6023          }
6024          if (cpu_isar_feature(aa64_nv, cpu)) {
6025              valid_mask |= HCR_NV | HCR_NV1 | HCR_AT;
6026          }
6027          if (cpu_isar_feature(aa64_nv2, cpu)) {
6028              valid_mask |= HCR_NV2;
6029          }
6030      }
6031  
6032      if (cpu_isar_feature(any_evt, cpu)) {
6033          valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4;
6034      } else if (cpu_isar_feature(any_half_evt, cpu)) {
6035          valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4;
6036      }
6037  
6038      /* Clear RES0 bits.  */
6039      value &= valid_mask;
6040  
6041      /*
6042       * These bits change the MMU setup:
6043       * HCR_VM enables stage 2 translation
6044       * HCR_PTW forbids certain page-table setups
6045       * HCR_DC disables stage1 and enables stage2 translation
6046       * HCR_DCT enables tagging on (disabled) stage1 translation
6047       * HCR_FWB changes the interpretation of stage2 descriptor bits
6048       * HCR_NV and HCR_NV1 affect interpretation of descriptor bits
6049       */
6050      if ((env->cp15.hcr_el2 ^ value) &
6051          (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) {
6052          tlb_flush(CPU(cpu));
6053      }
6054      env->cp15.hcr_el2 = value;
6055  
6056      /*
6057       * Updates to VI and VF require us to update the status of
6058       * virtual interrupts, which are the logical OR of these bits
6059       * and the state of the input lines from the GIC. (This requires
6060       * that we have the BQL, which is done by marking the
6061       * reginfo structs as ARM_CP_IO.)
6062       * Note that if a write to HCR pends a VIRQ or VFIQ or VINMI or
6063       * VFNMI, it is never possible for it to be taken immediately
6064       * because VIRQ, VFIQ, VINMI and VFNMI are masked unless running
6065       * at EL0 or EL1, and HCR can only be written at EL2.
6066       */
6067      g_assert(bql_locked());
6068      arm_cpu_update_virq(cpu);
6069      arm_cpu_update_vfiq(cpu);
6070      arm_cpu_update_vserr(cpu);
6071      if (cpu_isar_feature(aa64_nmi, cpu)) {
6072          arm_cpu_update_vinmi(cpu);
6073          arm_cpu_update_vfnmi(cpu);
6074      }
6075  }
6076  
6077  static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
6078  {
6079      do_hcr_write(env, value, 0);
6080  }
6081  
6082  static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
6083                            uint64_t value)
6084  {
6085      /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
6086      value = deposit64(env->cp15.hcr_el2, 32, 32, value);
6087      do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
6088  }
6089  
6090  static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
6091                           uint64_t value)
6092  {
6093      /* Handle HCR write, i.e. write to low half of HCR_EL2 */
6094      value = deposit64(env->cp15.hcr_el2, 0, 32, value);
6095      do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
6096  }
6097  
6098  /*
6099   * Return the effective value of HCR_EL2, at the given security state.
6100   * Bits that are not included here:
6101   * RW       (read from SCR_EL3.RW as needed)
6102   */
6103  uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space)
6104  {
6105      uint64_t ret = env->cp15.hcr_el2;
6106  
6107      assert(space != ARMSS_Root);
6108  
6109      if (!arm_is_el2_enabled_secstate(env, space)) {
6110          /*
6111           * "This register has no effect if EL2 is not enabled in the
6112           * current Security state".  This is ARMv8.4-SecEL2 speak for
6113           * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
6114           *
6115           * Prior to that, the language was "In an implementation that
6116           * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
6117           * as if this field is 0 for all purposes other than a direct
6118           * read or write access of HCR_EL2".  With lots of enumeration
6119           * on a per-field basis.  In current QEMU, this is condition
6120           * is arm_is_secure_below_el3.
6121           *
6122           * Since the v8.4 language applies to the entire register, and
6123           * appears to be backward compatible, use that.
6124           */
6125          return 0;
6126      }
6127  
6128      /*
6129       * For a cpu that supports both aarch64 and aarch32, we can set bits
6130       * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
6131       * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
6132       */
6133      if (!arm_el_is_aa64(env, 2)) {
6134          uint64_t aa32_valid;
6135  
6136          /*
6137           * These bits are up-to-date as of ARMv8.6.
6138           * For HCR, it's easiest to list just the 2 bits that are invalid.
6139           * For HCR2, list those that are valid.
6140           */
6141          aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
6142          aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
6143                         HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
6144          ret &= aa32_valid;
6145      }
6146  
6147      if (ret & HCR_TGE) {
6148          /* These bits are up-to-date as of ARMv8.6.  */
6149          if (ret & HCR_E2H) {
6150              ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
6151                       HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
6152                       HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
6153                       HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
6154                       HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
6155                       HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
6156          } else {
6157              ret |= HCR_FMO | HCR_IMO | HCR_AMO;
6158          }
6159          ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
6160                   HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
6161                   HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
6162                   HCR_TLOR);
6163      }
6164  
6165      return ret;
6166  }
6167  
6168  uint64_t arm_hcr_el2_eff(CPUARMState *env)
6169  {
6170      if (arm_feature(env, ARM_FEATURE_M)) {
6171          return 0;
6172      }
6173      return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
6174  }
6175  
6176  /*
6177   * Corresponds to ARM pseudocode function ELIsInHost().
6178   */
6179  bool el_is_in_host(CPUARMState *env, int el)
6180  {
6181      uint64_t mask;
6182  
6183      /*
6184       * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff().
6185       * Perform the simplest bit tests first, and validate EL2 afterward.
6186       */
6187      if (el & 1) {
6188          return false; /* EL1 or EL3 */
6189      }
6190  
6191      /*
6192       * Note that hcr_write() checks isar_feature_aa64_vh(),
6193       * aka HaveVirtHostExt(), in allowing HCR_E2H to be set.
6194       */
6195      mask = el ? HCR_E2H : HCR_E2H | HCR_TGE;
6196      if ((env->cp15.hcr_el2 & mask) != mask) {
6197          return false;
6198      }
6199  
6200      /* TGE and/or E2H set: double check those bits are currently legal. */
6201      return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2);
6202  }
6203  
6204  static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
6205                         uint64_t value)
6206  {
6207      ARMCPU *cpu = env_archcpu(env);
6208      uint64_t valid_mask = 0;
6209  
6210      /* FEAT_MOPS adds MSCEn and MCE2 */
6211      if (cpu_isar_feature(aa64_mops, cpu)) {
6212          valid_mask |= HCRX_MSCEN | HCRX_MCE2;
6213      }
6214  
6215      /* FEAT_NMI adds TALLINT, VINMI and VFNMI */
6216      if (cpu_isar_feature(aa64_nmi, cpu)) {
6217          valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI;
6218      }
6219  
6220      /* Clear RES0 bits.  */
6221      env->cp15.hcrx_el2 = value & valid_mask;
6222  
6223      /*
6224       * Updates to VINMI and VFNMI require us to update the status of
6225       * virtual NMI, which are the logical OR of these bits
6226       * and the state of the input lines from the GIC. (This requires
6227       * that we have the BQL, which is done by marking the
6228       * reginfo structs as ARM_CP_IO.)
6229       * Note that if a write to HCRX pends a VINMI or VFNMI it is never
6230       * possible for it to be taken immediately, because VINMI and
6231       * VFNMI are masked unless running at EL0 or EL1, and HCRX
6232       * can only be written at EL2.
6233       */
6234      if (cpu_isar_feature(aa64_nmi, cpu)) {
6235          g_assert(bql_locked());
6236          arm_cpu_update_vinmi(cpu);
6237          arm_cpu_update_vfnmi(cpu);
6238      }
6239  }
6240  
6241  static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
6242                                    bool isread)
6243  {
6244      if (arm_current_el(env) == 2
6245          && arm_feature(env, ARM_FEATURE_EL3)
6246          && !(env->cp15.scr_el3 & SCR_HXEN)) {
6247          return CP_ACCESS_TRAP_EL3;
6248      }
6249      return CP_ACCESS_OK;
6250  }
6251  
6252  static const ARMCPRegInfo hcrx_el2_reginfo = {
6253      .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
6254      .type = ARM_CP_IO,
6255      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
6256      .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
6257      .nv2_redirect_offset = 0xa0,
6258      .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
6259  };
6260  
6261  /* Return the effective value of HCRX_EL2.  */
6262  uint64_t arm_hcrx_el2_eff(CPUARMState *env)
6263  {
6264      /*
6265       * The bits in this register behave as 0 for all purposes other than
6266       * direct reads of the register if SCR_EL3.HXEn is 0.
6267       * If EL2 is not enabled in the current security state, then the
6268       * bit may behave as if 0, or as if 1, depending on the bit.
6269       * For the moment, we treat the EL2-disabled case as taking
6270       * priority over the HXEn-disabled case. This is true for the only
6271       * bit for a feature which we implement where the answer is different
6272       * for the two cases (MSCEn for FEAT_MOPS).
6273       * This may need to be revisited for future bits.
6274       */
6275      if (!arm_is_el2_enabled(env)) {
6276          uint64_t hcrx = 0;
6277          if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
6278              /* MSCEn behaves as 1 if EL2 is not enabled */
6279              hcrx |= HCRX_MSCEN;
6280          }
6281          return hcrx;
6282      }
6283      if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
6284          return 0;
6285      }
6286      return env->cp15.hcrx_el2;
6287  }
6288  
6289  static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
6290                             uint64_t value)
6291  {
6292      /*
6293       * For A-profile AArch32 EL3, if NSACR.CP10
6294       * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
6295       */
6296      if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
6297          !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
6298          uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
6299          value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
6300      }
6301      env->cp15.cptr_el[2] = value;
6302  }
6303  
6304  static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
6305  {
6306      /*
6307       * For A-profile AArch32 EL3, if NSACR.CP10
6308       * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
6309       */
6310      uint64_t value = env->cp15.cptr_el[2];
6311  
6312      if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
6313          !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
6314          value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
6315      }
6316      return value;
6317  }
6318  
6319  static const ARMCPRegInfo el2_cp_reginfo[] = {
6320      { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
6321        .type = ARM_CP_IO,
6322        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
6323        .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
6324        .nv2_redirect_offset = 0x78,
6325        .writefn = hcr_write, .raw_writefn = raw_write },
6326      { .name = "HCR", .state = ARM_CP_STATE_AA32,
6327        .type = ARM_CP_ALIAS | ARM_CP_IO,
6328        .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
6329        .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
6330        .writefn = hcr_writelow },
6331      { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
6332        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
6333        .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
6334      { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
6335        .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
6336        .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
6337        .access = PL2_RW,
6338        .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
6339      { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
6340        .type = ARM_CP_NV2_REDIRECT,
6341        .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
6342        .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
6343      { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
6344        .type = ARM_CP_NV2_REDIRECT,
6345        .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
6346        .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
6347      { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
6348        .type = ARM_CP_ALIAS,
6349        .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
6350        .access = PL2_RW,
6351        .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
6352      { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
6353        .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
6354        .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
6355        .access = PL2_RW,
6356        .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
6357      { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
6358        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
6359        .access = PL2_RW, .writefn = vbar_write,
6360        .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
6361        .resetvalue = 0 },
6362      { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
6363        .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
6364        .access = PL3_RW, .type = ARM_CP_ALIAS,
6365        .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
6366      { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
6367        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
6368        .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
6369        .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
6370        .readfn = cptr_el2_read, .writefn = cptr_el2_write },
6371      { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
6372        .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
6373        .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
6374        .resetvalue = 0 },
6375      { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
6376        .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
6377        .access = PL2_RW, .type = ARM_CP_ALIAS,
6378        .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
6379      { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
6380        .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
6381        .access = PL2_RW, .type = ARM_CP_CONST,
6382        .resetvalue = 0 },
6383      /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
6384      { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
6385        .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
6386        .access = PL2_RW, .type = ARM_CP_CONST,
6387        .resetvalue = 0 },
6388      { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
6389        .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
6390        .access = PL2_RW, .type = ARM_CP_CONST,
6391        .resetvalue = 0 },
6392      { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
6393        .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
6394        .access = PL2_RW, .type = ARM_CP_CONST,
6395        .resetvalue = 0 },
6396      { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
6397        .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
6398        .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
6399        .raw_writefn = raw_write,
6400        .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
6401      { .name = "VTCR", .state = ARM_CP_STATE_AA32,
6402        .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
6403        .type = ARM_CP_ALIAS,
6404        .access = PL2_RW, .accessfn = access_el3_aa32ns,
6405        .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) },
6406      { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
6407        .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
6408        .access = PL2_RW,
6409        .nv2_redirect_offset = 0x40,
6410        /* no .writefn needed as this can't cause an ASID change */
6411        .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
6412      { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
6413        .cp = 15, .opc1 = 6, .crm = 2,
6414        .type = ARM_CP_64BIT | ARM_CP_ALIAS,
6415        .access = PL2_RW, .accessfn = access_el3_aa32ns,
6416        .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
6417        .writefn = vttbr_write, .raw_writefn = raw_write },
6418      { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
6419        .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
6420        .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write,
6421        .nv2_redirect_offset = 0x20,
6422        .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
6423      { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
6424        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
6425        .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
6426        .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
6427      { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6428        .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
6429        .access = PL2_RW, .resetvalue = 0,
6430        .nv2_redirect_offset = 0x90,
6431        .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
6432      { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
6433        .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
6434        .access = PL2_RW, .resetvalue = 0,
6435        .writefn = vmsa_tcr_ttbr_el2_write, .raw_writefn = raw_write,
6436        .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
6437      { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
6438        .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
6439        .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
6440      { .name = "TLBIALLNSNH",
6441        .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
6442        .type = ARM_CP_NO_RAW, .access = PL2_W,
6443        .writefn = tlbiall_nsnh_write },
6444      { .name = "TLBIALLNSNHIS",
6445        .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
6446        .type = ARM_CP_NO_RAW, .access = PL2_W,
6447        .writefn = tlbiall_nsnh_is_write },
6448      { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
6449        .type = ARM_CP_NO_RAW, .access = PL2_W,
6450        .writefn = tlbiall_hyp_write },
6451      { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
6452        .type = ARM_CP_NO_RAW, .access = PL2_W,
6453        .writefn = tlbiall_hyp_is_write },
6454      { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
6455        .type = ARM_CP_NO_RAW, .access = PL2_W,
6456        .writefn = tlbimva_hyp_write },
6457      { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
6458        .type = ARM_CP_NO_RAW, .access = PL2_W,
6459        .writefn = tlbimva_hyp_is_write },
6460      { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
6461        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
6462        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6463        .writefn = tlbi_aa64_alle2_write },
6464      { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
6465        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
6466        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6467        .writefn = tlbi_aa64_vae2_write },
6468      { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
6469        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
6470        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6471        .writefn = tlbi_aa64_vae2_write },
6472      { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
6473        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
6474        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6475        .writefn = tlbi_aa64_alle2is_write },
6476      { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
6477        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
6478        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6479        .writefn = tlbi_aa64_vae2is_write },
6480      { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
6481        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
6482        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
6483        .writefn = tlbi_aa64_vae2is_write },
6484  #ifndef CONFIG_USER_ONLY
6485      /*
6486       * Unlike the other EL2-related AT operations, these must
6487       * UNDEF from EL3 if EL2 is not implemented, which is why we
6488       * define them here rather than with the rest of the AT ops.
6489       */
6490      { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
6491        .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
6492        .access = PL2_W, .accessfn = at_s1e2_access,
6493        .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
6494        .writefn = ats_write64 },
6495      { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
6496        .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
6497        .access = PL2_W, .accessfn = at_s1e2_access,
6498        .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
6499        .writefn = ats_write64 },
6500      /*
6501       * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
6502       * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
6503       * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
6504       * to behave as if SCR.NS was 1.
6505       */
6506      { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
6507        .access = PL2_W,
6508        .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
6509      { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
6510        .access = PL2_W,
6511        .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
6512      { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
6513        .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
6514        /*
6515         * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
6516         * reset values as IMPDEF. We choose to reset to 3 to comply with
6517         * both ARMv7 and ARMv8.
6518         */
6519        .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 3,
6520        .writefn = gt_cnthctl_write, .raw_writefn = raw_write,
6521        .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
6522      { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
6523        .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
6524        .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
6525        .writefn = gt_cntvoff_write,
6526        .nv2_redirect_offset = 0x60,
6527        .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
6528      { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
6529        .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
6530        .writefn = gt_cntvoff_write,
6531        .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
6532      { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
6533        .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
6534        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
6535        .type = ARM_CP_IO, .access = PL2_RW,
6536        .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
6537      { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
6538        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
6539        .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
6540        .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
6541      { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
6542        .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
6543        .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
6544        .resetfn = gt_hyp_timer_reset,
6545        .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
6546      { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
6547        .type = ARM_CP_IO,
6548        .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
6549        .access = PL2_RW,
6550        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
6551        .resetvalue = 0,
6552        .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
6553  #endif
6554      { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
6555        .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6556        .access = PL2_RW, .accessfn = access_el3_aa32ns,
6557        .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
6558      { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
6559        .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
6560        .access = PL2_RW,
6561        .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
6562      { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
6563        .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
6564        .access = PL2_RW,
6565        .nv2_redirect_offset = 0x80,
6566        .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
6567  };
6568  
6569  static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
6570      { .name = "HCR2", .state = ARM_CP_STATE_AA32,
6571        .type = ARM_CP_ALIAS | ARM_CP_IO,
6572        .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
6573        .access = PL2_RW,
6574        .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
6575        .writefn = hcr_writehigh },
6576  };
6577  
6578  static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri,
6579                                    bool isread)
6580  {
6581      if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) {
6582          return CP_ACCESS_OK;
6583      }
6584      return CP_ACCESS_TRAP_UNCATEGORIZED;
6585  }
6586  
6587  static const ARMCPRegInfo el2_sec_cp_reginfo[] = {
6588      { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64,
6589        .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0,
6590        .access = PL2_RW, .accessfn = sel2_access,
6591        .nv2_redirect_offset = 0x30,
6592        .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) },
6593      { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64,
6594        .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2,
6595        .access = PL2_RW, .accessfn = sel2_access,
6596        .nv2_redirect_offset = 0x48,
6597        .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) },
6598  };
6599  
6600  static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
6601                                     bool isread)
6602  {
6603      /*
6604       * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
6605       * At Secure EL1 it traps to EL3 or EL2.
6606       */
6607      if (arm_current_el(env) == 3) {
6608          return CP_ACCESS_OK;
6609      }
6610      if (arm_is_secure_below_el3(env)) {
6611          if (env->cp15.scr_el3 & SCR_EEL2) {
6612              return CP_ACCESS_TRAP_EL2;
6613          }
6614          return CP_ACCESS_TRAP_EL3;
6615      }
6616      /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
6617      if (isread) {
6618          return CP_ACCESS_OK;
6619      }
6620      return CP_ACCESS_TRAP_UNCATEGORIZED;
6621  }
6622  
6623  static const ARMCPRegInfo el3_cp_reginfo[] = {
6624      { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
6625        .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
6626        .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
6627        .resetfn = scr_reset, .writefn = scr_write, .raw_writefn = raw_write },
6628      { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
6629        .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
6630        .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
6631        .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
6632        .writefn = scr_write, .raw_writefn = raw_write },
6633      { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
6634        .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
6635        .access = PL3_RW, .resetvalue = 0,
6636        .fieldoffset = offsetof(CPUARMState, cp15.sder) },
6637      { .name = "SDER",
6638        .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
6639        .access = PL3_RW, .resetvalue = 0,
6640        .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
6641      { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6642        .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
6643        .writefn = vbar_write, .resetvalue = 0,
6644        .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
6645      { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
6646        .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
6647        .access = PL3_RW, .resetvalue = 0,
6648        .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
6649      { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
6650        .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
6651        .access = PL3_RW,
6652        /* no .writefn needed as this can't cause an ASID change */
6653        .resetvalue = 0,
6654        .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
6655      { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
6656        .type = ARM_CP_ALIAS,
6657        .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
6658        .access = PL3_RW,
6659        .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
6660      { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
6661        .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
6662        .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
6663      { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
6664        .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
6665        .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
6666      { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
6667        .type = ARM_CP_ALIAS,
6668        .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
6669        .access = PL3_RW,
6670        .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
6671      { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
6672        .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
6673        .access = PL3_RW, .writefn = vbar_write,
6674        .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
6675        .resetvalue = 0 },
6676      { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
6677        .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
6678        .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
6679        .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
6680      { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
6681        .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
6682        .access = PL3_RW, .resetvalue = 0,
6683        .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
6684      { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
6685        .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
6686        .access = PL3_RW, .type = ARM_CP_CONST,
6687        .resetvalue = 0 },
6688      { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
6689        .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
6690        .access = PL3_RW, .type = ARM_CP_CONST,
6691        .resetvalue = 0 },
6692      { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
6693        .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
6694        .access = PL3_RW, .type = ARM_CP_CONST,
6695        .resetvalue = 0 },
6696      { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
6697        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
6698        .access = PL3_W, .type = ARM_CP_NO_RAW,
6699        .writefn = tlbi_aa64_alle3is_write },
6700      { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
6701        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
6702        .access = PL3_W, .type = ARM_CP_NO_RAW,
6703        .writefn = tlbi_aa64_vae3is_write },
6704      { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
6705        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
6706        .access = PL3_W, .type = ARM_CP_NO_RAW,
6707        .writefn = tlbi_aa64_vae3is_write },
6708      { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
6709        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
6710        .access = PL3_W, .type = ARM_CP_NO_RAW,
6711        .writefn = tlbi_aa64_alle3_write },
6712      { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
6713        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
6714        .access = PL3_W, .type = ARM_CP_NO_RAW,
6715        .writefn = tlbi_aa64_vae3_write },
6716      { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
6717        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
6718        .access = PL3_W, .type = ARM_CP_NO_RAW,
6719        .writefn = tlbi_aa64_vae3_write },
6720  };
6721  
6722  #ifndef CONFIG_USER_ONLY
6723  
6724  static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
6725                                   bool isread)
6726  {
6727      if (arm_current_el(env) == 1) {
6728          /* This must be a FEAT_NV access */
6729          return CP_ACCESS_OK;
6730      }
6731      if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
6732          return CP_ACCESS_TRAP_UNCATEGORIZED;
6733      }
6734      return CP_ACCESS_OK;
6735  }
6736  
6737  static CPAccessResult access_el1nvpct(CPUARMState *env, const ARMCPRegInfo *ri,
6738                                        bool isread)
6739  {
6740      if (arm_current_el(env) == 1) {
6741          /* This must be a FEAT_NV access with NVx == 101 */
6742          if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVPCT)) {
6743              return CP_ACCESS_TRAP_EL2;
6744          }
6745      }
6746      return e2h_access(env, ri, isread);
6747  }
6748  
6749  static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
6750                                        bool isread)
6751  {
6752      if (arm_current_el(env) == 1) {
6753          /* This must be a FEAT_NV access with NVx == 101 */
6754          if (FIELD_EX64(env->cp15.cnthctl_el2, CNTHCTL, EL1NVVCT)) {
6755              return CP_ACCESS_TRAP_EL2;
6756          }
6757      }
6758      return e2h_access(env, ri, isread);
6759  }
6760  
6761  /* Test if system register redirection is to occur in the current state.  */
6762  static bool redirect_for_e2h(CPUARMState *env)
6763  {
6764      return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
6765  }
6766  
6767  static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
6768  {
6769      CPReadFn *readfn;
6770  
6771      if (redirect_for_e2h(env)) {
6772          /* Switch to the saved EL2 version of the register.  */
6773          ri = ri->opaque;
6774          readfn = ri->readfn;
6775      } else {
6776          readfn = ri->orig_readfn;
6777      }
6778      if (readfn == NULL) {
6779          readfn = raw_read;
6780      }
6781      return readfn(env, ri);
6782  }
6783  
6784  static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
6785                            uint64_t value)
6786  {
6787      CPWriteFn *writefn;
6788  
6789      if (redirect_for_e2h(env)) {
6790          /* Switch to the saved EL2 version of the register.  */
6791          ri = ri->opaque;
6792          writefn = ri->writefn;
6793      } else {
6794          writefn = ri->orig_writefn;
6795      }
6796      if (writefn == NULL) {
6797          writefn = raw_write;
6798      }
6799      writefn(env, ri, value);
6800  }
6801  
6802  static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri)
6803  {
6804      /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
6805      return ri->orig_readfn(env, ri->opaque);
6806  }
6807  
6808  static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri,
6809                                uint64_t value)
6810  {
6811      /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
6812      return ri->orig_writefn(env, ri->opaque, value);
6813  }
6814  
6815  static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
6816                                           const ARMCPRegInfo *ri,
6817                                           bool isread)
6818  {
6819      if (arm_current_el(env) == 1) {
6820          /*
6821           * This must be a FEAT_NV access (will either trap or redirect
6822           * to memory). None of the registers with _EL12 aliases want to
6823           * apply their trap controls for this kind of access, so don't
6824           * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
6825           */
6826          return CP_ACCESS_OK;
6827      }
6828      /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
6829      if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
6830          return CP_ACCESS_TRAP_UNCATEGORIZED;
6831      }
6832      if (ri->orig_accessfn) {
6833          return ri->orig_accessfn(env, ri->opaque, isread);
6834      }
6835      return CP_ACCESS_OK;
6836  }
6837  
6838  static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
6839  {
6840      struct E2HAlias {
6841          uint32_t src_key, dst_key, new_key;
6842          const char *src_name, *dst_name, *new_name;
6843          bool (*feature)(const ARMISARegisters *id);
6844      };
6845  
6846  #define K(op0, op1, crn, crm, op2) \
6847      ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
6848  
6849      static const struct E2HAlias aliases[] = {
6850          { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
6851            "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
6852          { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
6853            "CPACR", "CPTR_EL2", "CPACR_EL12" },
6854          { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
6855            "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
6856          { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
6857            "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
6858          { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
6859            "TCR_EL1", "TCR_EL2", "TCR_EL12" },
6860          { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
6861            "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
6862          { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
6863            "ELR_EL1", "ELR_EL2", "ELR_EL12" },
6864          { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
6865            "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
6866          { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
6867            "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
6868          { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
6869            "ESR_EL1", "ESR_EL2", "ESR_EL12" },
6870          { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
6871            "FAR_EL1", "FAR_EL2", "FAR_EL12" },
6872          { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
6873            "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
6874          { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
6875            "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
6876          { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
6877            "VBAR", "VBAR_EL2", "VBAR_EL12" },
6878          { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
6879            "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
6880          { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
6881            "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
6882  
6883          /*
6884           * Note that redirection of ZCR is mentioned in the description
6885           * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
6886           * not in the summary table.
6887           */
6888          { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
6889            "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
6890          { K(3, 0,  1, 2, 6), K(3, 4,  1, 2, 6), K(3, 5, 1, 2, 6),
6891            "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
6892  
6893          { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
6894            "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
6895  
6896          { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
6897            "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
6898            isar_feature_aa64_scxtnum },
6899  
6900          /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
6901          /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
6902      };
6903  #undef K
6904  
6905      size_t i;
6906  
6907      for (i = 0; i < ARRAY_SIZE(aliases); i++) {
6908          const struct E2HAlias *a = &aliases[i];
6909          ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
6910          bool ok;
6911  
6912          if (a->feature && !a->feature(&cpu->isar)) {
6913              continue;
6914          }
6915  
6916          src_reg = g_hash_table_lookup(cpu->cp_regs,
6917                                        (gpointer)(uintptr_t)a->src_key);
6918          dst_reg = g_hash_table_lookup(cpu->cp_regs,
6919                                        (gpointer)(uintptr_t)a->dst_key);
6920          g_assert(src_reg != NULL);
6921          g_assert(dst_reg != NULL);
6922  
6923          /* Cross-compare names to detect typos in the keys.  */
6924          g_assert(strcmp(src_reg->name, a->src_name) == 0);
6925          g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
6926  
6927          /* None of the core system registers use opaque; we will.  */
6928          g_assert(src_reg->opaque == NULL);
6929  
6930          /* Create alias before redirection so we dup the right data. */
6931          new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
6932  
6933          new_reg->name = a->new_name;
6934          new_reg->type |= ARM_CP_ALIAS;
6935          /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
6936          new_reg->access &= PL2_RW | PL3_RW;
6937          /* The new_reg op fields are as per new_key, not the target reg */
6938          new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
6939              >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
6940          new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
6941              >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
6942          new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
6943              >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
6944          new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
6945              >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
6946          new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
6947              >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
6948          new_reg->opaque = src_reg;
6949          new_reg->orig_readfn = src_reg->readfn ?: raw_read;
6950          new_reg->orig_writefn = src_reg->writefn ?: raw_write;
6951          new_reg->orig_accessfn = src_reg->accessfn;
6952          if (!new_reg->raw_readfn) {
6953              new_reg->raw_readfn = raw_read;
6954          }
6955          if (!new_reg->raw_writefn) {
6956              new_reg->raw_writefn = raw_write;
6957          }
6958          new_reg->readfn = el2_e2h_e12_read;
6959          new_reg->writefn = el2_e2h_e12_write;
6960          new_reg->accessfn = el2_e2h_e12_access;
6961  
6962          /*
6963           * If the _EL1 register is redirected to memory by FEAT_NV2,
6964           * then it shares the offset with the _EL12 register,
6965           * and which one is redirected depends on HCR_EL2.NV1.
6966           */
6967          if (new_reg->nv2_redirect_offset) {
6968              assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
6969              new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
6970              new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
6971          }
6972  
6973          ok = g_hash_table_insert(cpu->cp_regs,
6974                                   (gpointer)(uintptr_t)a->new_key, new_reg);
6975          g_assert(ok);
6976  
6977          src_reg->opaque = dst_reg;
6978          src_reg->orig_readfn = src_reg->readfn ?: raw_read;
6979          src_reg->orig_writefn = src_reg->writefn ?: raw_write;
6980          if (!src_reg->raw_readfn) {
6981              src_reg->raw_readfn = raw_read;
6982          }
6983          if (!src_reg->raw_writefn) {
6984              src_reg->raw_writefn = raw_write;
6985          }
6986          src_reg->readfn = el2_e2h_read;
6987          src_reg->writefn = el2_e2h_write;
6988      }
6989  }
6990  #endif
6991  
6992  static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
6993                                       bool isread)
6994  {
6995      int cur_el = arm_current_el(env);
6996  
6997      if (cur_el < 2) {
6998          uint64_t hcr = arm_hcr_el2_eff(env);
6999  
7000          if (cur_el == 0) {
7001              if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
7002                  if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
7003                      return CP_ACCESS_TRAP_EL2;
7004                  }
7005              } else {
7006                  if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
7007                      return CP_ACCESS_TRAP;
7008                  }
7009                  if (hcr & HCR_TID2) {
7010                      return CP_ACCESS_TRAP_EL2;
7011                  }
7012              }
7013          } else if (hcr & HCR_TID2) {
7014              return CP_ACCESS_TRAP_EL2;
7015          }
7016      }
7017  
7018      if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
7019          return CP_ACCESS_TRAP_EL2;
7020      }
7021  
7022      return CP_ACCESS_OK;
7023  }
7024  
7025  /*
7026   * Check for traps to RAS registers, which are controlled
7027   * by HCR_EL2.TERR and SCR_EL3.TERR.
7028   */
7029  static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri,
7030                                    bool isread)
7031  {
7032      int el = arm_current_el(env);
7033  
7034      if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) {
7035          return CP_ACCESS_TRAP_EL2;
7036      }
7037      if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) {
7038          return CP_ACCESS_TRAP_EL3;
7039      }
7040      return CP_ACCESS_OK;
7041  }
7042  
7043  static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri)
7044  {
7045      int el = arm_current_el(env);
7046  
7047      if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
7048          return env->cp15.vdisr_el2;
7049      }
7050      if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
7051          return 0; /* RAZ/WI */
7052      }
7053      return env->cp15.disr_el1;
7054  }
7055  
7056  static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
7057  {
7058      int el = arm_current_el(env);
7059  
7060      if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) {
7061          env->cp15.vdisr_el2 = val;
7062          return;
7063      }
7064      if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) {
7065          return; /* RAZ/WI */
7066      }
7067      env->cp15.disr_el1 = val;
7068  }
7069  
7070  /*
7071   * Minimal RAS implementation with no Error Records.
7072   * Which means that all of the Error Record registers:
7073   *   ERXADDR_EL1
7074   *   ERXCTLR_EL1
7075   *   ERXFR_EL1
7076   *   ERXMISC0_EL1
7077   *   ERXMISC1_EL1
7078   *   ERXMISC2_EL1
7079   *   ERXMISC3_EL1
7080   *   ERXPFGCDN_EL1  (RASv1p1)
7081   *   ERXPFGCTL_EL1  (RASv1p1)
7082   *   ERXPFGF_EL1    (RASv1p1)
7083   *   ERXSTATUS_EL1
7084   * and
7085   *   ERRSELR_EL1
7086   * may generate UNDEFINED, which is the effect we get by not
7087   * listing them at all.
7088   *
7089   * These registers have fine-grained trap bits, but UNDEF-to-EL1
7090   * is higher priority than FGT-to-EL2 so we do not need to list them
7091   * in order to check for an FGT.
7092   */
7093  static const ARMCPRegInfo minimal_ras_reginfo[] = {
7094      { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH,
7095        .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1,
7096        .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1),
7097        .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write },
7098      { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH,
7099        .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0,
7100        .access = PL1_R, .accessfn = access_terr,
7101        .fgt = FGT_ERRIDR_EL1,
7102        .type = ARM_CP_CONST, .resetvalue = 0 },
7103      { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH,
7104        .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1,
7105        .nv2_redirect_offset = 0x500,
7106        .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) },
7107      { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH,
7108        .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3,
7109        .nv2_redirect_offset = 0x508,
7110        .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) },
7111  };
7112  
7113  /*
7114   * Return the exception level to which exceptions should be taken
7115   * via SVEAccessTrap.  This excludes the check for whether the exception
7116   * should be routed through AArch64.AdvSIMDFPAccessTrap.  That can easily
7117   * be found by testing 0 < fp_exception_el < sve_exception_el.
7118   *
7119   * C.f. the ARM pseudocode function CheckSVEEnabled.  Note that the
7120   * pseudocode does *not* separate out the FP trap checks, but has them
7121   * all in one function.
7122   */
7123  int sve_exception_el(CPUARMState *env, int el)
7124  {
7125  #ifndef CONFIG_USER_ONLY
7126      if (el <= 1 && !el_is_in_host(env, el)) {
7127          switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
7128          case 1:
7129              if (el != 0) {
7130                  break;
7131              }
7132              /* fall through */
7133          case 0:
7134          case 2:
7135              return 1;
7136          }
7137      }
7138  
7139      if (el <= 2 && arm_is_el2_enabled(env)) {
7140          /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
7141          if (env->cp15.hcr_el2 & HCR_E2H) {
7142              switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
7143              case 1:
7144                  if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
7145                      break;
7146                  }
7147                  /* fall through */
7148              case 0:
7149              case 2:
7150                  return 2;
7151              }
7152          } else {
7153              if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
7154                  return 2;
7155              }
7156          }
7157      }
7158  
7159      /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
7160      if (arm_feature(env, ARM_FEATURE_EL3)
7161          && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
7162          return 3;
7163      }
7164  #endif
7165      return 0;
7166  }
7167  
7168  /*
7169   * Return the exception level to which exceptions should be taken for SME.
7170   * C.f. the ARM pseudocode function CheckSMEAccess.
7171   */
7172  int sme_exception_el(CPUARMState *env, int el)
7173  {
7174  #ifndef CONFIG_USER_ONLY
7175      if (el <= 1 && !el_is_in_host(env, el)) {
7176          switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) {
7177          case 1:
7178              if (el != 0) {
7179                  break;
7180              }
7181              /* fall through */
7182          case 0:
7183          case 2:
7184              return 1;
7185          }
7186      }
7187  
7188      if (el <= 2 && arm_is_el2_enabled(env)) {
7189          /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */
7190          if (env->cp15.hcr_el2 & HCR_E2H) {
7191              switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) {
7192              case 1:
7193                  if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) {
7194                      break;
7195                  }
7196                  /* fall through */
7197              case 0:
7198              case 2:
7199                  return 2;
7200              }
7201          } else {
7202              if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) {
7203                  return 2;
7204              }
7205          }
7206      }
7207  
7208      /* CPTR_EL3.  Since ESM is negative we must check for EL3.  */
7209      if (arm_feature(env, ARM_FEATURE_EL3)
7210          && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
7211          return 3;
7212      }
7213  #endif
7214      return 0;
7215  }
7216  
7217  /*
7218   * Given that SVE is enabled, return the vector length for EL.
7219   */
7220  uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm)
7221  {
7222      ARMCPU *cpu = env_archcpu(env);
7223      uint64_t *cr = env->vfp.zcr_el;
7224      uint32_t map = cpu->sve_vq.map;
7225      uint32_t len = ARM_MAX_VQ - 1;
7226  
7227      if (sm) {
7228          cr = env->vfp.smcr_el;
7229          map = cpu->sme_vq.map;
7230      }
7231  
7232      if (el <= 1 && !el_is_in_host(env, el)) {
7233          len = MIN(len, 0xf & (uint32_t)cr[1]);
7234      }
7235      if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
7236          len = MIN(len, 0xf & (uint32_t)cr[2]);
7237      }
7238      if (arm_feature(env, ARM_FEATURE_EL3)) {
7239          len = MIN(len, 0xf & (uint32_t)cr[3]);
7240      }
7241  
7242      map &= MAKE_64BIT_MASK(0, len + 1);
7243      if (map != 0) {
7244          return 31 - clz32(map);
7245      }
7246  
7247      /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */
7248      assert(sm);
7249      return ctz32(cpu->sme_vq.map);
7250  }
7251  
7252  uint32_t sve_vqm1_for_el(CPUARMState *env, int el)
7253  {
7254      return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM));
7255  }
7256  
7257  static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7258                        uint64_t value)
7259  {
7260      int cur_el = arm_current_el(env);
7261      int old_len = sve_vqm1_for_el(env, cur_el);
7262      int new_len;
7263  
7264      /* Bits other than [3:0] are RAZ/WI.  */
7265      QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
7266      raw_write(env, ri, value & 0xf);
7267  
7268      /*
7269       * Because we arrived here, we know both FP and SVE are enabled;
7270       * otherwise we would have trapped access to the ZCR_ELn register.
7271       */
7272      new_len = sve_vqm1_for_el(env, cur_el);
7273      if (new_len < old_len) {
7274          aarch64_sve_narrow_vq(env, new_len + 1);
7275      }
7276  }
7277  
7278  static const ARMCPRegInfo zcr_reginfo[] = {
7279      { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
7280        .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
7281        .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
7282        .access = PL1_RW, .type = ARM_CP_SVE,
7283        .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
7284        .writefn = zcr_write, .raw_writefn = raw_write },
7285      { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
7286        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
7287        .access = PL2_RW, .type = ARM_CP_SVE,
7288        .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
7289        .writefn = zcr_write, .raw_writefn = raw_write },
7290      { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
7291        .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
7292        .access = PL3_RW, .type = ARM_CP_SVE,
7293        .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
7294        .writefn = zcr_write, .raw_writefn = raw_write },
7295  };
7296  
7297  #ifdef TARGET_AARCH64
7298  static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
7299                                      bool isread)
7300  {
7301      int el = arm_current_el(env);
7302  
7303      if (el == 0) {
7304          uint64_t sctlr = arm_sctlr(env, el);
7305          if (!(sctlr & SCTLR_EnTP2)) {
7306              return CP_ACCESS_TRAP;
7307          }
7308      }
7309      /* TODO: FEAT_FGT */
7310      if (el < 3
7311          && arm_feature(env, ARM_FEATURE_EL3)
7312          && !(env->cp15.scr_el3 & SCR_ENTP2)) {
7313          return CP_ACCESS_TRAP_EL3;
7314      }
7315      return CP_ACCESS_OK;
7316  }
7317  
7318  static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri,
7319                                        bool isread)
7320  {
7321      /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */
7322      if (arm_current_el(env) == 2
7323          && arm_feature(env, ARM_FEATURE_EL3)
7324          && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
7325          return CP_ACCESS_TRAP_EL3;
7326      }
7327      return CP_ACCESS_OK;
7328  }
7329  
7330  static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri,
7331                                     bool isread)
7332  {
7333      if (arm_current_el(env) < 3
7334          && arm_feature(env, ARM_FEATURE_EL3)
7335          && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) {
7336          return CP_ACCESS_TRAP_EL3;
7337      }
7338      return CP_ACCESS_OK;
7339  }
7340  
7341  /* ResetSVEState */
7342  static void arm_reset_sve_state(CPUARMState *env)
7343  {
7344      memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs));
7345      /* Recall that FFR is stored as pregs[16]. */
7346      memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs));
7347      vfp_set_fpcr(env, 0x0800009f);
7348  }
7349  
7350  void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
7351  {
7352      uint64_t change = (env->svcr ^ new) & mask;
7353  
7354      if (change == 0) {
7355          return;
7356      }
7357      env->svcr ^= change;
7358  
7359      if (change & R_SVCR_SM_MASK) {
7360          arm_reset_sve_state(env);
7361      }
7362  
7363      /*
7364       * ResetSMEState.
7365       *
7366       * SetPSTATE_ZA zeros on enable and disable.  We can zero this only
7367       * on enable: while disabled, the storage is inaccessible and the
7368       * value does not matter.  We're not saving the storage in vmstate
7369       * when disabled either.
7370       */
7371      if (change & new & R_SVCR_ZA_MASK) {
7372          memset(env->zarray, 0, sizeof(env->zarray));
7373      }
7374  
7375      if (tcg_enabled()) {
7376          arm_rebuild_hflags(env);
7377      }
7378  }
7379  
7380  static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7381                         uint64_t value)
7382  {
7383      aarch64_set_svcr(env, value, -1);
7384  }
7385  
7386  static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7387                         uint64_t value)
7388  {
7389      int cur_el = arm_current_el(env);
7390      int old_len = sve_vqm1_for_el(env, cur_el);
7391      int new_len;
7392  
7393      QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
7394      value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
7395      raw_write(env, ri, value);
7396  
7397      /*
7398       * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage
7399       * when SVL is widened (old values kept, or zeros).  Choose to keep the
7400       * current values for simplicity.  But for QEMU internals, we must still
7401       * apply the narrower SVL to the Zregs and Pregs -- see the comment
7402       * above aarch64_sve_narrow_vq.
7403       */
7404      new_len = sve_vqm1_for_el(env, cur_el);
7405      if (new_len < old_len) {
7406          aarch64_sve_narrow_vq(env, new_len + 1);
7407      }
7408  }
7409  
7410  static const ARMCPRegInfo sme_reginfo[] = {
7411      { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64,
7412        .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5,
7413        .access = PL0_RW, .accessfn = access_tpidr2,
7414        .fgt = FGT_NTPIDR2_EL0,
7415        .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) },
7416      { .name = "SVCR", .state = ARM_CP_STATE_AA64,
7417        .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2,
7418        .access = PL0_RW, .type = ARM_CP_SME,
7419        .fieldoffset = offsetof(CPUARMState, svcr),
7420        .writefn = svcr_write, .raw_writefn = raw_write },
7421      { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
7422        .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
7423        .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
7424        .access = PL1_RW, .type = ARM_CP_SME,
7425        .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
7426        .writefn = smcr_write, .raw_writefn = raw_write },
7427      { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64,
7428        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6,
7429        .access = PL2_RW, .type = ARM_CP_SME,
7430        .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]),
7431        .writefn = smcr_write, .raw_writefn = raw_write },
7432      { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64,
7433        .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6,
7434        .access = PL3_RW, .type = ARM_CP_SME,
7435        .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]),
7436        .writefn = smcr_write, .raw_writefn = raw_write },
7437      { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64,
7438        .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6,
7439        .access = PL1_R, .accessfn = access_aa64_tid1,
7440        /*
7441         * IMPLEMENTOR = 0 (software)
7442         * REVISION    = 0 (implementation defined)
7443         * SMPS        = 0 (no streaming execution priority in QEMU)
7444         * AFFINITY    = 0 (streaming sve mode not shared with other PEs)
7445         */
7446        .type = ARM_CP_CONST, .resetvalue = 0, },
7447      /*
7448       * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0.
7449       */
7450      { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64,
7451        .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4,
7452        .access = PL1_RW, .accessfn = access_smpri,
7453        .fgt = FGT_NSMPRI_EL1,
7454        .type = ARM_CP_CONST, .resetvalue = 0 },
7455      { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64,
7456        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5,
7457        .nv2_redirect_offset = 0x1f8,
7458        .access = PL2_RW, .accessfn = access_smprimap,
7459        .type = ARM_CP_CONST, .resetvalue = 0 },
7460  };
7461  
7462  static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri,
7463                                    uint64_t value)
7464  {
7465      CPUState *cs = env_cpu(env);
7466  
7467      tlb_flush(cs);
7468  }
7469  
7470  static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
7471                          uint64_t value)
7472  {
7473      /* L0GPTSZ is RO; other bits not mentioned are RES0. */
7474      uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK |
7475          R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
7476          R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
7477  
7478      env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
7479  }
7480  
7481  static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
7482  {
7483      env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ,
7484                                       env_archcpu(env)->reset_l0gptsz);
7485  }
7486  
7487  static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri,
7488                                      uint64_t value)
7489  {
7490      CPUState *cs = env_cpu(env);
7491  
7492      tlb_flush_all_cpus_synced(cs);
7493  }
7494  
7495  static const ARMCPRegInfo rme_reginfo[] = {
7496      { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64,
7497        .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6,
7498        .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset,
7499        .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) },
7500      { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64,
7501        .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4,
7502        .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) },
7503      { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64,
7504        .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5,
7505        .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) },
7506      { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64,
7507        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4,
7508        .access = PL3_W, .type = ARM_CP_NO_RAW,
7509        .writefn = tlbi_aa64_paall_write },
7510      { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64,
7511        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4,
7512        .access = PL3_W, .type = ARM_CP_NO_RAW,
7513        .writefn = tlbi_aa64_paallos_write },
7514      /*
7515       * QEMU does not have a way to invalidate by physical address, thus
7516       * invalidating a range of physical addresses is accomplished by
7517       * flushing all tlb entries in the outer shareable domain,
7518       * just like PAALLOS.
7519       */
7520      { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64,
7521        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7,
7522        .access = PL3_W, .type = ARM_CP_NO_RAW,
7523        .writefn = tlbi_aa64_paallos_write },
7524      { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64,
7525        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3,
7526        .access = PL3_W, .type = ARM_CP_NO_RAW,
7527        .writefn = tlbi_aa64_paallos_write },
7528      { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64,
7529        .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1,
7530        .access = PL3_W, .type = ARM_CP_NOP },
7531  };
7532  
7533  static const ARMCPRegInfo rme_mte_reginfo[] = {
7534      { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64,
7535        .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5,
7536        .access = PL3_W, .type = ARM_CP_NOP },
7537  };
7538  
7539  static void aa64_allint_write(CPUARMState *env, const ARMCPRegInfo *ri,
7540                                uint64_t value)
7541  {
7542      env->pstate = (env->pstate & ~PSTATE_ALLINT) | (value & PSTATE_ALLINT);
7543  }
7544  
7545  static uint64_t aa64_allint_read(CPUARMState *env, const ARMCPRegInfo *ri)
7546  {
7547      return env->pstate & PSTATE_ALLINT;
7548  }
7549  
7550  static CPAccessResult aa64_allint_access(CPUARMState *env,
7551                                           const ARMCPRegInfo *ri, bool isread)
7552  {
7553      if (!isread && arm_current_el(env) == 1 &&
7554          (arm_hcrx_el2_eff(env) & HCRX_TALLINT)) {
7555          return CP_ACCESS_TRAP_EL2;
7556      }
7557      return CP_ACCESS_OK;
7558  }
7559  
7560  static const ARMCPRegInfo nmi_reginfo[] = {
7561      { .name = "ALLINT", .state = ARM_CP_STATE_AA64,
7562        .opc0 = 3, .opc1 = 0, .opc2 = 0, .crn = 4, .crm = 3,
7563        .type = ARM_CP_NO_RAW,
7564        .access = PL1_RW, .accessfn = aa64_allint_access,
7565        .fieldoffset = offsetof(CPUARMState, pstate),
7566        .writefn = aa64_allint_write, .readfn = aa64_allint_read,
7567        .resetfn = arm_cp_reset_ignore },
7568  };
7569  #endif /* TARGET_AARCH64 */
7570  
7571  static void define_pmu_regs(ARMCPU *cpu)
7572  {
7573      /*
7574       * v7 performance monitor control register: same implementor
7575       * field as main ID register, and we implement four counters in
7576       * addition to the cycle count register.
7577       */
7578      unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
7579      ARMCPRegInfo pmcr = {
7580          .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
7581          .access = PL0_RW,
7582          .fgt = FGT_PMCR_EL0,
7583          .type = ARM_CP_IO | ARM_CP_ALIAS,
7584          .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
7585          .accessfn = pmreg_access,
7586          .readfn = pmcr_read, .raw_readfn = raw_read,
7587          .writefn = pmcr_write, .raw_writefn = raw_write,
7588      };
7589      ARMCPRegInfo pmcr64 = {
7590          .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
7591          .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
7592          .access = PL0_RW, .accessfn = pmreg_access,
7593          .fgt = FGT_PMCR_EL0,
7594          .type = ARM_CP_IO,
7595          .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
7596          .resetvalue = cpu->isar.reset_pmcr_el0,
7597          .readfn = pmcr_read, .raw_readfn = raw_read,
7598          .writefn = pmcr_write, .raw_writefn = raw_write,
7599      };
7600  
7601      define_one_arm_cp_reg(cpu, &pmcr);
7602      define_one_arm_cp_reg(cpu, &pmcr64);
7603      for (i = 0; i < pmcrn; i++) {
7604          char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
7605          char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
7606          char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
7607          char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
7608          ARMCPRegInfo pmev_regs[] = {
7609              { .name = pmevcntr_name, .cp = 15, .crn = 14,
7610                .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
7611                .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
7612                .fgt = FGT_PMEVCNTRN_EL0,
7613                .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
7614                .accessfn = pmreg_access_xevcntr },
7615              { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
7616                .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
7617                .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
7618                .type = ARM_CP_IO,
7619                .fgt = FGT_PMEVCNTRN_EL0,
7620                .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
7621                .raw_readfn = pmevcntr_rawread,
7622                .raw_writefn = pmevcntr_rawwrite },
7623              { .name = pmevtyper_name, .cp = 15, .crn = 14,
7624                .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
7625                .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
7626                .fgt = FGT_PMEVTYPERN_EL0,
7627                .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
7628                .accessfn = pmreg_access },
7629              { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
7630                .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
7631                .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
7632                .fgt = FGT_PMEVTYPERN_EL0,
7633                .type = ARM_CP_IO,
7634                .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
7635                .raw_writefn = pmevtyper_rawwrite },
7636          };
7637          define_arm_cp_regs(cpu, pmev_regs);
7638          g_free(pmevcntr_name);
7639          g_free(pmevcntr_el0_name);
7640          g_free(pmevtyper_name);
7641          g_free(pmevtyper_el0_name);
7642      }
7643      if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
7644          ARMCPRegInfo v81_pmu_regs[] = {
7645              { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
7646                .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
7647                .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7648                .fgt = FGT_PMCEIDN_EL0,
7649                .resetvalue = extract64(cpu->pmceid0, 32, 32) },
7650              { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
7651                .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
7652                .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7653                .fgt = FGT_PMCEIDN_EL0,
7654                .resetvalue = extract64(cpu->pmceid1, 32, 32) },
7655          };
7656          define_arm_cp_regs(cpu, v81_pmu_regs);
7657      }
7658      if (cpu_isar_feature(any_pmuv3p4, cpu)) {
7659          static const ARMCPRegInfo v84_pmmir = {
7660              .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
7661              .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
7662              .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7663              .fgt = FGT_PMMIR_EL1,
7664              .resetvalue = 0
7665          };
7666          define_one_arm_cp_reg(cpu, &v84_pmmir);
7667      }
7668  }
7669  
7670  #ifndef CONFIG_USER_ONLY
7671  /*
7672   * We don't know until after realize whether there's a GICv3
7673   * attached, and that is what registers the gicv3 sysregs.
7674   * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
7675   * at runtime.
7676   */
7677  static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
7678  {
7679      ARMCPU *cpu = env_archcpu(env);
7680      uint64_t pfr1 = cpu->isar.id_pfr1;
7681  
7682      if (env->gicv3state) {
7683          pfr1 |= 1 << 28;
7684      }
7685      return pfr1;
7686  }
7687  
7688  static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
7689  {
7690      ARMCPU *cpu = env_archcpu(env);
7691      uint64_t pfr0 = cpu->isar.id_aa64pfr0;
7692  
7693      if (env->gicv3state) {
7694          pfr0 |= 1 << 24;
7695      }
7696      return pfr0;
7697  }
7698  #endif
7699  
7700  /*
7701   * Shared logic between LORID and the rest of the LOR* registers.
7702   * Secure state exclusion has already been dealt with.
7703   */
7704  static CPAccessResult access_lor_ns(CPUARMState *env,
7705                                      const ARMCPRegInfo *ri, bool isread)
7706  {
7707      int el = arm_current_el(env);
7708  
7709      if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
7710          return CP_ACCESS_TRAP_EL2;
7711      }
7712      if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
7713          return CP_ACCESS_TRAP_EL3;
7714      }
7715      return CP_ACCESS_OK;
7716  }
7717  
7718  static CPAccessResult access_lor_other(CPUARMState *env,
7719                                         const ARMCPRegInfo *ri, bool isread)
7720  {
7721      if (arm_is_secure_below_el3(env)) {
7722          /* Access denied in secure mode.  */
7723          return CP_ACCESS_TRAP;
7724      }
7725      return access_lor_ns(env, ri, isread);
7726  }
7727  
7728  /*
7729   * A trivial implementation of ARMv8.1-LOR leaves all of these
7730   * registers fixed at 0, which indicates that there are zero
7731   * supported Limited Ordering regions.
7732   */
7733  static const ARMCPRegInfo lor_reginfo[] = {
7734      { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
7735        .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
7736        .access = PL1_RW, .accessfn = access_lor_other,
7737        .fgt = FGT_LORSA_EL1,
7738        .type = ARM_CP_CONST, .resetvalue = 0 },
7739      { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
7740        .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
7741        .access = PL1_RW, .accessfn = access_lor_other,
7742        .fgt = FGT_LOREA_EL1,
7743        .type = ARM_CP_CONST, .resetvalue = 0 },
7744      { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
7745        .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
7746        .access = PL1_RW, .accessfn = access_lor_other,
7747        .fgt = FGT_LORN_EL1,
7748        .type = ARM_CP_CONST, .resetvalue = 0 },
7749      { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
7750        .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
7751        .access = PL1_RW, .accessfn = access_lor_other,
7752        .fgt = FGT_LORC_EL1,
7753        .type = ARM_CP_CONST, .resetvalue = 0 },
7754      { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
7755        .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
7756        .access = PL1_R, .accessfn = access_lor_ns,
7757        .fgt = FGT_LORID_EL1,
7758        .type = ARM_CP_CONST, .resetvalue = 0 },
7759  };
7760  
7761  #ifdef TARGET_AARCH64
7762  static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
7763                                     bool isread)
7764  {
7765      int el = arm_current_el(env);
7766  
7767      if (el < 2 &&
7768          arm_is_el2_enabled(env) &&
7769          !(arm_hcr_el2_eff(env) & HCR_APK)) {
7770          return CP_ACCESS_TRAP_EL2;
7771      }
7772      if (el < 3 &&
7773          arm_feature(env, ARM_FEATURE_EL3) &&
7774          !(env->cp15.scr_el3 & SCR_APK)) {
7775          return CP_ACCESS_TRAP_EL3;
7776      }
7777      return CP_ACCESS_OK;
7778  }
7779  
7780  static const ARMCPRegInfo pauth_reginfo[] = {
7781      { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7782        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
7783        .access = PL1_RW, .accessfn = access_pauth,
7784        .fgt = FGT_APDAKEY,
7785        .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
7786      { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7787        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
7788        .access = PL1_RW, .accessfn = access_pauth,
7789        .fgt = FGT_APDAKEY,
7790        .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
7791      { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7792        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
7793        .access = PL1_RW, .accessfn = access_pauth,
7794        .fgt = FGT_APDBKEY,
7795        .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
7796      { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7797        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
7798        .access = PL1_RW, .accessfn = access_pauth,
7799        .fgt = FGT_APDBKEY,
7800        .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
7801      { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7802        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
7803        .access = PL1_RW, .accessfn = access_pauth,
7804        .fgt = FGT_APGAKEY,
7805        .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
7806      { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7807        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
7808        .access = PL1_RW, .accessfn = access_pauth,
7809        .fgt = FGT_APGAKEY,
7810        .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
7811      { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7812        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
7813        .access = PL1_RW, .accessfn = access_pauth,
7814        .fgt = FGT_APIAKEY,
7815        .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
7816      { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7817        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
7818        .access = PL1_RW, .accessfn = access_pauth,
7819        .fgt = FGT_APIAKEY,
7820        .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
7821      { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
7822        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
7823        .access = PL1_RW, .accessfn = access_pauth,
7824        .fgt = FGT_APIBKEY,
7825        .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
7826      { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
7827        .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
7828        .access = PL1_RW, .accessfn = access_pauth,
7829        .fgt = FGT_APIBKEY,
7830        .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
7831  };
7832  
7833  static const ARMCPRegInfo tlbirange_reginfo[] = {
7834      { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64,
7835        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1,
7836        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7837        .fgt = FGT_TLBIRVAE1IS,
7838        .writefn = tlbi_aa64_rvae1is_write },
7839      { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64,
7840        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3,
7841        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7842        .fgt = FGT_TLBIRVAAE1IS,
7843        .writefn = tlbi_aa64_rvae1is_write },
7844     { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64,
7845        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5,
7846        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7847        .fgt = FGT_TLBIRVALE1IS,
7848        .writefn = tlbi_aa64_rvae1is_write },
7849      { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64,
7850        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7,
7851        .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW,
7852        .fgt = FGT_TLBIRVAALE1IS,
7853        .writefn = tlbi_aa64_rvae1is_write },
7854      { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64,
7855        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
7856        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7857        .fgt = FGT_TLBIRVAE1OS,
7858        .writefn = tlbi_aa64_rvae1is_write },
7859      { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64,
7860        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3,
7861        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7862        .fgt = FGT_TLBIRVAAE1OS,
7863        .writefn = tlbi_aa64_rvae1is_write },
7864     { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64,
7865        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5,
7866        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7867        .fgt = FGT_TLBIRVALE1OS,
7868        .writefn = tlbi_aa64_rvae1is_write },
7869      { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64,
7870        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7,
7871        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7872        .fgt = FGT_TLBIRVAALE1OS,
7873        .writefn = tlbi_aa64_rvae1is_write },
7874      { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64,
7875        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
7876        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7877        .fgt = FGT_TLBIRVAE1,
7878        .writefn = tlbi_aa64_rvae1_write },
7879      { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64,
7880        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3,
7881        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7882        .fgt = FGT_TLBIRVAAE1,
7883        .writefn = tlbi_aa64_rvae1_write },
7884     { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64,
7885        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5,
7886        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7887        .fgt = FGT_TLBIRVALE1,
7888        .writefn = tlbi_aa64_rvae1_write },
7889      { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64,
7890        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7,
7891        .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
7892        .fgt = FGT_TLBIRVAALE1,
7893        .writefn = tlbi_aa64_rvae1_write },
7894      { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64,
7895        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2,
7896        .access = PL2_W, .type = ARM_CP_NO_RAW,
7897        .writefn = tlbi_aa64_ripas2e1is_write },
7898      { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64,
7899        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6,
7900        .access = PL2_W, .type = ARM_CP_NO_RAW,
7901        .writefn = tlbi_aa64_ripas2e1is_write },
7902      { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64,
7903        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1,
7904        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7905        .writefn = tlbi_aa64_rvae2is_write },
7906     { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64,
7907        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5,
7908        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7909        .writefn = tlbi_aa64_rvae2is_write },
7910      { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64,
7911        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2,
7912        .access = PL2_W, .type = ARM_CP_NO_RAW,
7913        .writefn = tlbi_aa64_ripas2e1_write },
7914      { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64,
7915        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6,
7916        .access = PL2_W, .type = ARM_CP_NO_RAW,
7917        .writefn = tlbi_aa64_ripas2e1_write },
7918     { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64,
7919        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1,
7920        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7921        .writefn = tlbi_aa64_rvae2is_write },
7922     { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64,
7923        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5,
7924        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7925        .writefn = tlbi_aa64_rvae2is_write },
7926      { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64,
7927        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1,
7928        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7929        .writefn = tlbi_aa64_rvae2_write },
7930     { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64,
7931        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5,
7932        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7933        .writefn = tlbi_aa64_rvae2_write },
7934     { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64,
7935        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1,
7936        .access = PL3_W, .type = ARM_CP_NO_RAW,
7937        .writefn = tlbi_aa64_rvae3is_write },
7938     { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64,
7939        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5,
7940        .access = PL3_W, .type = ARM_CP_NO_RAW,
7941        .writefn = tlbi_aa64_rvae3is_write },
7942     { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64,
7943        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1,
7944        .access = PL3_W, .type = ARM_CP_NO_RAW,
7945        .writefn = tlbi_aa64_rvae3is_write },
7946     { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64,
7947        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5,
7948        .access = PL3_W, .type = ARM_CP_NO_RAW,
7949        .writefn = tlbi_aa64_rvae3is_write },
7950     { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64,
7951        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1,
7952        .access = PL3_W, .type = ARM_CP_NO_RAW,
7953        .writefn = tlbi_aa64_rvae3_write },
7954     { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64,
7955        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5,
7956        .access = PL3_W, .type = ARM_CP_NO_RAW,
7957        .writefn = tlbi_aa64_rvae3_write },
7958  };
7959  
7960  static const ARMCPRegInfo tlbios_reginfo[] = {
7961      { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64,
7962        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
7963        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7964        .fgt = FGT_TLBIVMALLE1OS,
7965        .writefn = tlbi_aa64_vmalle1is_write },
7966      { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
7967        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
7968        .fgt = FGT_TLBIVAE1OS,
7969        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7970        .writefn = tlbi_aa64_vae1is_write },
7971      { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
7972        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
7973        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7974        .fgt = FGT_TLBIASIDE1OS,
7975        .writefn = tlbi_aa64_vmalle1is_write },
7976      { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
7977        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
7978        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7979        .fgt = FGT_TLBIVAAE1OS,
7980        .writefn = tlbi_aa64_vae1is_write },
7981      { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
7982        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
7983        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7984        .fgt = FGT_TLBIVALE1OS,
7985        .writefn = tlbi_aa64_vae1is_write },
7986      { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
7987        .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
7988        .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW,
7989        .fgt = FGT_TLBIVAALE1OS,
7990        .writefn = tlbi_aa64_vae1is_write },
7991      { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
7992        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
7993        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7994        .writefn = tlbi_aa64_alle2is_write },
7995      { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
7996        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
7997        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
7998        .writefn = tlbi_aa64_vae2is_write },
7999     { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
8000        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
8001        .access = PL2_W, .type = ARM_CP_NO_RAW,
8002        .writefn = tlbi_aa64_alle1is_write },
8003      { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
8004        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
8005        .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF,
8006        .writefn = tlbi_aa64_vae2is_write },
8007      { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
8008        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
8009        .access = PL2_W, .type = ARM_CP_NO_RAW,
8010        .writefn = tlbi_aa64_alle1is_write },
8011      { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64,
8012        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0,
8013        .access = PL2_W, .type = ARM_CP_NOP },
8014      { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64,
8015        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3,
8016        .access = PL2_W, .type = ARM_CP_NOP },
8017      { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64,
8018        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4,
8019        .access = PL2_W, .type = ARM_CP_NOP },
8020      { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64,
8021        .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7,
8022        .access = PL2_W, .type = ARM_CP_NOP },
8023      { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64,
8024        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
8025        .access = PL3_W, .type = ARM_CP_NO_RAW,
8026        .writefn = tlbi_aa64_alle3is_write },
8027      { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
8028        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
8029        .access = PL3_W, .type = ARM_CP_NO_RAW,
8030        .writefn = tlbi_aa64_vae3is_write },
8031      { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
8032        .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
8033        .access = PL3_W, .type = ARM_CP_NO_RAW,
8034        .writefn = tlbi_aa64_vae3is_write },
8035  };
8036  
8037  static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
8038  {
8039      Error *err = NULL;
8040      uint64_t ret;
8041  
8042      /* Success sets NZCV = 0000.  */
8043      env->NF = env->CF = env->VF = 0, env->ZF = 1;
8044  
8045      if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
8046          /*
8047           * ??? Failed, for unknown reasons in the crypto subsystem.
8048           * The best we can do is log the reason and return the
8049           * timed-out indication to the guest.  There is no reason
8050           * we know to expect this failure to be transitory, so the
8051           * guest may well hang retrying the operation.
8052           */
8053          qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
8054                        ri->name, error_get_pretty(err));
8055          error_free(err);
8056  
8057          env->ZF = 0; /* NZCF = 0100 */
8058          return 0;
8059      }
8060      return ret;
8061  }
8062  
8063  /* We do not support re-seeding, so the two registers operate the same.  */
8064  static const ARMCPRegInfo rndr_reginfo[] = {
8065      { .name = "RNDR", .state = ARM_CP_STATE_AA64,
8066        .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
8067        .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
8068        .access = PL0_R, .readfn = rndr_readfn },
8069      { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
8070        .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
8071        .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
8072        .access = PL0_R, .readfn = rndr_readfn },
8073  };
8074  
8075  static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
8076                            uint64_t value)
8077  {
8078  #ifdef CONFIG_TCG
8079      ARMCPU *cpu = env_archcpu(env);
8080      /* CTR_EL0 System register -> DminLine, bits [19:16] */
8081      uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
8082      uint64_t vaddr_in = (uint64_t) value;
8083      uint64_t vaddr = vaddr_in & ~(dline_size - 1);
8084      void *haddr;
8085      int mem_idx = arm_env_mmu_index(env);
8086  
8087      /* This won't be crossing page boundaries */
8088      haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
8089      if (haddr) {
8090  #ifndef CONFIG_USER_ONLY
8091  
8092          ram_addr_t offset;
8093          MemoryRegion *mr;
8094  
8095          /* RCU lock is already being held */
8096          mr = memory_region_from_host(haddr, &offset);
8097  
8098          if (mr) {
8099              memory_region_writeback(mr, offset, dline_size);
8100          }
8101  #endif /*CONFIG_USER_ONLY*/
8102      }
8103  #else
8104      /* Handled by hardware accelerator. */
8105      g_assert_not_reached();
8106  #endif /* CONFIG_TCG */
8107  }
8108  
8109  static const ARMCPRegInfo dcpop_reg[] = {
8110      { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
8111        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
8112        .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
8113        .fgt = FGT_DCCVAP,
8114        .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
8115  };
8116  
8117  static const ARMCPRegInfo dcpodp_reg[] = {
8118      { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
8119        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
8120        .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
8121        .fgt = FGT_DCCVADP,
8122        .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
8123  };
8124  
8125  static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
8126                                         bool isread)
8127  {
8128      if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
8129          return CP_ACCESS_TRAP_EL2;
8130      }
8131  
8132      return CP_ACCESS_OK;
8133  }
8134  
8135  static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
8136                                   bool isread)
8137  {
8138      int el = arm_current_el(env);
8139      if (el < 2 && arm_is_el2_enabled(env)) {
8140          uint64_t hcr = arm_hcr_el2_eff(env);
8141          if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
8142              return CP_ACCESS_TRAP_EL2;
8143          }
8144      }
8145      if (el < 3 &&
8146          arm_feature(env, ARM_FEATURE_EL3) &&
8147          !(env->cp15.scr_el3 & SCR_ATA)) {
8148          return CP_ACCESS_TRAP_EL3;
8149      }
8150      return CP_ACCESS_OK;
8151  }
8152  
8153  static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri,
8154                                        bool isread)
8155  {
8156      CPAccessResult nv1 = access_nv1(env, ri, isread);
8157  
8158      if (nv1 != CP_ACCESS_OK) {
8159          return nv1;
8160      }
8161      return access_mte(env, ri, isread);
8162  }
8163  
8164  static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri,
8165                                        bool isread)
8166  {
8167      /*
8168       * TFSR_EL2: similar to generic access_mte(), but we need to
8169       * account for FEAT_NV. At EL1 this must be a FEAT_NV access;
8170       * if NV2 is enabled then we will redirect this to TFSR_EL1
8171       * after doing the HCR and SCR ATA traps; otherwise this will
8172       * be a trap to EL2 and the HCR/SCR traps do not apply.
8173       */
8174      int el = arm_current_el(env);
8175  
8176      if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) {
8177          return CP_ACCESS_OK;
8178      }
8179      if (el < 2 && arm_is_el2_enabled(env)) {
8180          uint64_t hcr = arm_hcr_el2_eff(env);
8181          if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
8182              return CP_ACCESS_TRAP_EL2;
8183          }
8184      }
8185      if (el < 3 &&
8186          arm_feature(env, ARM_FEATURE_EL3) &&
8187          !(env->cp15.scr_el3 & SCR_ATA)) {
8188          return CP_ACCESS_TRAP_EL3;
8189      }
8190      return CP_ACCESS_OK;
8191  }
8192  
8193  static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
8194  {
8195      return env->pstate & PSTATE_TCO;
8196  }
8197  
8198  static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
8199  {
8200      env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
8201  }
8202  
8203  static const ARMCPRegInfo mte_reginfo[] = {
8204      { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
8205        .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
8206        .access = PL1_RW, .accessfn = access_mte,
8207        .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
8208      { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
8209        .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
8210        .access = PL1_RW, .accessfn = access_tfsr_el1,
8211        .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
8212        .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
8213      { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
8214        .type = ARM_CP_NV2_REDIRECT,
8215        .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
8216        .access = PL2_RW, .accessfn = access_tfsr_el2,
8217        .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
8218      { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
8219        .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
8220        .access = PL3_RW,
8221        .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
8222      { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
8223        .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
8224        .access = PL1_RW, .accessfn = access_mte,
8225        .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
8226      { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
8227        .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
8228        .access = PL1_RW, .accessfn = access_mte,
8229        .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
8230      { .name = "TCO", .state = ARM_CP_STATE_AA64,
8231        .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
8232        .type = ARM_CP_NO_RAW,
8233        .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
8234      { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
8235        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
8236        .type = ARM_CP_NOP, .access = PL1_W,
8237        .fgt = FGT_DCIVAC,
8238        .accessfn = aa64_cacheop_poc_access },
8239      { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
8240        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
8241        .fgt = FGT_DCISW,
8242        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8243      { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
8244        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
8245        .type = ARM_CP_NOP, .access = PL1_W,
8246        .fgt = FGT_DCIVAC,
8247        .accessfn = aa64_cacheop_poc_access },
8248      { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
8249        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
8250        .fgt = FGT_DCISW,
8251        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8252      { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
8253        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
8254        .fgt = FGT_DCCSW,
8255        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8256      { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
8257        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
8258        .fgt = FGT_DCCSW,
8259        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8260      { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
8261        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
8262        .fgt = FGT_DCCISW,
8263        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8264      { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
8265        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
8266        .fgt = FGT_DCCISW,
8267        .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
8268  };
8269  
8270  static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
8271      { .name = "TCO", .state = ARM_CP_STATE_AA64,
8272        .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
8273        .type = ARM_CP_CONST, .access = PL0_RW, },
8274  };
8275  
8276  static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
8277      { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
8278        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
8279        .type = ARM_CP_NOP, .access = PL0_W,
8280        .fgt = FGT_DCCVAC,
8281        .accessfn = aa64_cacheop_poc_access },
8282      { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
8283        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
8284        .type = ARM_CP_NOP, .access = PL0_W,
8285        .fgt = FGT_DCCVAC,
8286        .accessfn = aa64_cacheop_poc_access },
8287      { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
8288        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
8289        .type = ARM_CP_NOP, .access = PL0_W,
8290        .fgt = FGT_DCCVAP,
8291        .accessfn = aa64_cacheop_poc_access },
8292      { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
8293        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
8294        .type = ARM_CP_NOP, .access = PL0_W,
8295        .fgt = FGT_DCCVAP,
8296        .accessfn = aa64_cacheop_poc_access },
8297      { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
8298        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
8299        .type = ARM_CP_NOP, .access = PL0_W,
8300        .fgt = FGT_DCCVADP,
8301        .accessfn = aa64_cacheop_poc_access },
8302      { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
8303        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
8304        .type = ARM_CP_NOP, .access = PL0_W,
8305        .fgt = FGT_DCCVADP,
8306        .accessfn = aa64_cacheop_poc_access },
8307      { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
8308        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
8309        .type = ARM_CP_NOP, .access = PL0_W,
8310        .fgt = FGT_DCCIVAC,
8311        .accessfn = aa64_cacheop_poc_access },
8312      { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
8313        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
8314        .type = ARM_CP_NOP, .access = PL0_W,
8315        .fgt = FGT_DCCIVAC,
8316        .accessfn = aa64_cacheop_poc_access },
8317      { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
8318        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
8319        .access = PL0_W, .type = ARM_CP_DC_GVA,
8320  #ifndef CONFIG_USER_ONLY
8321        /* Avoid overhead of an access check that always passes in user-mode */
8322        .accessfn = aa64_zva_access,
8323        .fgt = FGT_DCZVA,
8324  #endif
8325      },
8326      { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
8327        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
8328        .access = PL0_W, .type = ARM_CP_DC_GZVA,
8329  #ifndef CONFIG_USER_ONLY
8330        /* Avoid overhead of an access check that always passes in user-mode */
8331        .accessfn = aa64_zva_access,
8332        .fgt = FGT_DCZVA,
8333  #endif
8334      },
8335  };
8336  
8337  static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri,
8338                                       bool isread)
8339  {
8340      uint64_t hcr = arm_hcr_el2_eff(env);
8341      int el = arm_current_el(env);
8342  
8343      if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) {
8344          if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) {
8345              if (hcr & HCR_TGE) {
8346                  return CP_ACCESS_TRAP_EL2;
8347              }
8348              return CP_ACCESS_TRAP;
8349          }
8350      } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) {
8351          return CP_ACCESS_TRAP_EL2;
8352      }
8353      if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) {
8354          return CP_ACCESS_TRAP_EL2;
8355      }
8356      if (el < 3
8357          && arm_feature(env, ARM_FEATURE_EL3)
8358          && !(env->cp15.scr_el3 & SCR_ENSCXT)) {
8359          return CP_ACCESS_TRAP_EL3;
8360      }
8361      return CP_ACCESS_OK;
8362  }
8363  
8364  static CPAccessResult access_scxtnum_el1(CPUARMState *env,
8365                                           const ARMCPRegInfo *ri,
8366                                           bool isread)
8367  {
8368      CPAccessResult nv1 = access_nv1(env, ri, isread);
8369  
8370      if (nv1 != CP_ACCESS_OK) {
8371          return nv1;
8372      }
8373      return access_scxtnum(env, ri, isread);
8374  }
8375  
8376  static const ARMCPRegInfo scxtnum_reginfo[] = {
8377      { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64,
8378        .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7,
8379        .access = PL0_RW, .accessfn = access_scxtnum,
8380        .fgt = FGT_SCXTNUM_EL0,
8381        .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) },
8382      { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64,
8383        .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7,
8384        .access = PL1_RW, .accessfn = access_scxtnum_el1,
8385        .fgt = FGT_SCXTNUM_EL1,
8386        .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
8387        .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
8388      { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
8389        .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
8390        .access = PL2_RW, .accessfn = access_scxtnum,
8391        .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) },
8392      { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64,
8393        .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7,
8394        .access = PL3_RW,
8395        .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) },
8396  };
8397  
8398  static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri,
8399                                   bool isread)
8400  {
8401      if (arm_current_el(env) == 2 &&
8402          arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) {
8403          return CP_ACCESS_TRAP_EL3;
8404      }
8405      return CP_ACCESS_OK;
8406  }
8407  
8408  static const ARMCPRegInfo fgt_reginfo[] = {
8409      { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64,
8410        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
8411        .nv2_redirect_offset = 0x1b8,
8412        .access = PL2_RW, .accessfn = access_fgt,
8413        .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) },
8414      { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64,
8415        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5,
8416        .nv2_redirect_offset = 0x1c0,
8417        .access = PL2_RW, .accessfn = access_fgt,
8418        .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) },
8419      { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64,
8420        .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4,
8421        .nv2_redirect_offset = 0x1d0,
8422        .access = PL2_RW, .accessfn = access_fgt,
8423        .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) },
8424      { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64,
8425        .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5,
8426        .nv2_redirect_offset = 0x1d8,
8427        .access = PL2_RW, .accessfn = access_fgt,
8428        .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) },
8429      { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64,
8430        .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6,
8431        .nv2_redirect_offset = 0x1c8,
8432        .access = PL2_RW, .accessfn = access_fgt,
8433        .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) },
8434  };
8435  
8436  static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri,
8437                         uint64_t value)
8438  {
8439      /*
8440       * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee
8441       * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything
8442       * about the RESS bits at the top -- we choose the "generate an EL2
8443       * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let
8444       * the ptw.c code detect the resulting invalid address).
8445       */
8446      env->cp15.vncr_el2 = value & ~0xfffULL;
8447  }
8448  
8449  static const ARMCPRegInfo nv2_reginfo[] = {
8450      { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64,
8451        .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0,
8452        .access = PL2_RW,
8453        .writefn = vncr_write,
8454        .nv2_redirect_offset = 0xb0,
8455        .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
8456  };
8457  
8458  #endif /* TARGET_AARCH64 */
8459  
8460  static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
8461                                       bool isread)
8462  {
8463      int el = arm_current_el(env);
8464  
8465      if (el == 0) {
8466          uint64_t sctlr = arm_sctlr(env, el);
8467          if (!(sctlr & SCTLR_EnRCTX)) {
8468              return CP_ACCESS_TRAP;
8469          }
8470      } else if (el == 1) {
8471          uint64_t hcr = arm_hcr_el2_eff(env);
8472          if (hcr & HCR_NV) {
8473              return CP_ACCESS_TRAP_EL2;
8474          }
8475      }
8476      return CP_ACCESS_OK;
8477  }
8478  
8479  static const ARMCPRegInfo predinv_reginfo[] = {
8480      { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
8481        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
8482        .fgt = FGT_CFPRCTX,
8483        .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8484      { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
8485        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
8486        .fgt = FGT_DVPRCTX,
8487        .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8488      { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
8489        .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
8490        .fgt = FGT_CPPRCTX,
8491        .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8492      /*
8493       * Note the AArch32 opcodes have a different OPC1.
8494       */
8495      { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
8496        .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
8497        .fgt = FGT_CFPRCTX,
8498        .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8499      { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
8500        .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
8501        .fgt = FGT_DVPRCTX,
8502        .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8503      { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
8504        .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
8505        .fgt = FGT_CPPRCTX,
8506        .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
8507  };
8508  
8509  static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
8510  {
8511      /* Read the high 32 bits of the current CCSIDR */
8512      return extract64(ccsidr_read(env, ri), 32, 32);
8513  }
8514  
8515  static const ARMCPRegInfo ccsidr2_reginfo[] = {
8516      { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
8517        .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
8518        .access = PL1_R,
8519        .accessfn = access_tid4,
8520        .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
8521  };
8522  
8523  static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
8524                                         bool isread)
8525  {
8526      if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
8527          return CP_ACCESS_TRAP_EL2;
8528      }
8529  
8530      return CP_ACCESS_OK;
8531  }
8532  
8533  static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
8534                                         bool isread)
8535  {
8536      if (arm_feature(env, ARM_FEATURE_V8)) {
8537          return access_aa64_tid3(env, ri, isread);
8538      }
8539  
8540      return CP_ACCESS_OK;
8541  }
8542  
8543  static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
8544                                       bool isread)
8545  {
8546      if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
8547          return CP_ACCESS_TRAP_EL2;
8548      }
8549  
8550      return CP_ACCESS_OK;
8551  }
8552  
8553  static CPAccessResult access_joscr_jmcr(CPUARMState *env,
8554                                          const ARMCPRegInfo *ri, bool isread)
8555  {
8556      /*
8557       * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only
8558       * in v7A, not in v8A.
8559       */
8560      if (!arm_feature(env, ARM_FEATURE_V8) &&
8561          arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) &&
8562          (env->cp15.hstr_el2 & HSTR_TJDBX)) {
8563          return CP_ACCESS_TRAP_EL2;
8564      }
8565      return CP_ACCESS_OK;
8566  }
8567  
8568  static const ARMCPRegInfo jazelle_regs[] = {
8569      { .name = "JIDR",
8570        .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
8571        .access = PL1_R, .accessfn = access_jazelle,
8572        .type = ARM_CP_CONST, .resetvalue = 0 },
8573      { .name = "JOSCR",
8574        .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
8575        .accessfn = access_joscr_jmcr,
8576        .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
8577      { .name = "JMCR",
8578        .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
8579        .accessfn = access_joscr_jmcr,
8580        .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
8581  };
8582  
8583  static const ARMCPRegInfo contextidr_el2 = {
8584      .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
8585      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
8586      .access = PL2_RW,
8587      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2])
8588  };
8589  
8590  static const ARMCPRegInfo vhe_reginfo[] = {
8591      { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
8592        .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
8593        .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
8594        .raw_writefn = raw_write,
8595        .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
8596  #ifndef CONFIG_USER_ONLY
8597      { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
8598        .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
8599        .fieldoffset =
8600          offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
8601        .type = ARM_CP_IO, .access = PL2_RW,
8602        .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
8603      { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
8604        .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
8605        .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
8606        .resetfn = gt_hv_timer_reset,
8607        .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
8608      { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
8609        .type = ARM_CP_IO,
8610        .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
8611        .access = PL2_RW,
8612        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
8613        .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
8614      { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
8615        .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
8616        .type = ARM_CP_IO | ARM_CP_ALIAS,
8617        .access = PL2_RW, .accessfn = access_el1nvpct,
8618        .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1,
8619        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
8620        .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
8621      { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
8622        .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
8623        .type = ARM_CP_IO | ARM_CP_ALIAS,
8624        .access = PL2_RW, .accessfn = access_el1nvvct,
8625        .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1,
8626        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
8627        .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
8628      { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
8629        .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
8630        .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
8631        .access = PL2_RW, .accessfn = e2h_access,
8632        .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
8633      { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
8634        .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
8635        .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
8636        .access = PL2_RW, .accessfn = e2h_access,
8637        .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
8638      { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
8639        .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
8640        .type = ARM_CP_IO | ARM_CP_ALIAS,
8641        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
8642        .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1,
8643        .access = PL2_RW, .accessfn = access_el1nvpct,
8644        .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
8645      { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
8646        .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
8647        .type = ARM_CP_IO | ARM_CP_ALIAS,
8648        .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1,
8649        .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
8650        .access = PL2_RW, .accessfn = access_el1nvvct,
8651        .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
8652  #endif
8653  };
8654  
8655  #ifndef CONFIG_USER_ONLY
8656  static const ARMCPRegInfo ats1e1_reginfo[] = {
8657      { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
8658        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
8659        .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8660        .fgt = FGT_ATS1E1RP,
8661        .accessfn = at_s1e01_access, .writefn = ats_write64 },
8662      { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
8663        .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
8664        .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8665        .fgt = FGT_ATS1E1WP,
8666        .accessfn = at_s1e01_access, .writefn = ats_write64 },
8667  };
8668  
8669  static const ARMCPRegInfo ats1cp_reginfo[] = {
8670      { .name = "ATS1CPRP",
8671        .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
8672        .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8673        .writefn = ats_write },
8674      { .name = "ATS1CPWP",
8675        .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
8676        .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
8677        .writefn = ats_write },
8678  };
8679  #endif
8680  
8681  /*
8682   * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
8683   * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
8684   * is non-zero, which is never for ARMv7, optionally in ARMv8
8685   * and mandatorily for ARMv8.2 and up.
8686   * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
8687   * implementation is RAZ/WI we can ignore this detail, as we
8688   * do for ACTLR.
8689   */
8690  static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
8691      { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
8692        .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
8693        .access = PL1_RW, .accessfn = access_tacr,
8694        .type = ARM_CP_CONST, .resetvalue = 0 },
8695      { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
8696        .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
8697        .access = PL2_RW, .type = ARM_CP_CONST,
8698        .resetvalue = 0 },
8699  };
8700  
8701  void register_cp_regs_for_features(ARMCPU *cpu)
8702  {
8703      /* Register all the coprocessor registers based on feature bits */
8704      CPUARMState *env = &cpu->env;
8705      if (arm_feature(env, ARM_FEATURE_M)) {
8706          /* M profile has no coprocessor registers */
8707          return;
8708      }
8709  
8710      define_arm_cp_regs(cpu, cp_reginfo);
8711      if (!arm_feature(env, ARM_FEATURE_V8)) {
8712          /*
8713           * Must go early as it is full of wildcards that may be
8714           * overridden by later definitions.
8715           */
8716          define_arm_cp_regs(cpu, not_v8_cp_reginfo);
8717      }
8718  
8719      if (arm_feature(env, ARM_FEATURE_V6)) {
8720          /* The ID registers all have impdef reset values */
8721          ARMCPRegInfo v6_idregs[] = {
8722              { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
8723                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
8724                .access = PL1_R, .type = ARM_CP_CONST,
8725                .accessfn = access_aa32_tid3,
8726                .resetvalue = cpu->isar.id_pfr0 },
8727              /*
8728               * ID_PFR1 is not a plain ARM_CP_CONST because we don't know
8729               * the value of the GIC field until after we define these regs.
8730               */
8731              { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
8732                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
8733                .access = PL1_R, .type = ARM_CP_NO_RAW,
8734                .accessfn = access_aa32_tid3,
8735  #ifdef CONFIG_USER_ONLY
8736                .type = ARM_CP_CONST,
8737                .resetvalue = cpu->isar.id_pfr1,
8738  #else
8739                .type = ARM_CP_NO_RAW,
8740                .accessfn = access_aa32_tid3,
8741                .readfn = id_pfr1_read,
8742                .writefn = arm_cp_write_ignore
8743  #endif
8744              },
8745              { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
8746                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
8747                .access = PL1_R, .type = ARM_CP_CONST,
8748                .accessfn = access_aa32_tid3,
8749                .resetvalue = cpu->isar.id_dfr0 },
8750              { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
8751                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
8752                .access = PL1_R, .type = ARM_CP_CONST,
8753                .accessfn = access_aa32_tid3,
8754                .resetvalue = cpu->id_afr0 },
8755              { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
8756                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
8757                .access = PL1_R, .type = ARM_CP_CONST,
8758                .accessfn = access_aa32_tid3,
8759                .resetvalue = cpu->isar.id_mmfr0 },
8760              { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
8761                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
8762                .access = PL1_R, .type = ARM_CP_CONST,
8763                .accessfn = access_aa32_tid3,
8764                .resetvalue = cpu->isar.id_mmfr1 },
8765              { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
8766                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
8767                .access = PL1_R, .type = ARM_CP_CONST,
8768                .accessfn = access_aa32_tid3,
8769                .resetvalue = cpu->isar.id_mmfr2 },
8770              { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
8771                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
8772                .access = PL1_R, .type = ARM_CP_CONST,
8773                .accessfn = access_aa32_tid3,
8774                .resetvalue = cpu->isar.id_mmfr3 },
8775              { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
8776                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
8777                .access = PL1_R, .type = ARM_CP_CONST,
8778                .accessfn = access_aa32_tid3,
8779                .resetvalue = cpu->isar.id_isar0 },
8780              { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
8781                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
8782                .access = PL1_R, .type = ARM_CP_CONST,
8783                .accessfn = access_aa32_tid3,
8784                .resetvalue = cpu->isar.id_isar1 },
8785              { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
8786                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
8787                .access = PL1_R, .type = ARM_CP_CONST,
8788                .accessfn = access_aa32_tid3,
8789                .resetvalue = cpu->isar.id_isar2 },
8790              { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
8791                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
8792                .access = PL1_R, .type = ARM_CP_CONST,
8793                .accessfn = access_aa32_tid3,
8794                .resetvalue = cpu->isar.id_isar3 },
8795              { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
8796                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
8797                .access = PL1_R, .type = ARM_CP_CONST,
8798                .accessfn = access_aa32_tid3,
8799                .resetvalue = cpu->isar.id_isar4 },
8800              { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
8801                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
8802                .access = PL1_R, .type = ARM_CP_CONST,
8803                .accessfn = access_aa32_tid3,
8804                .resetvalue = cpu->isar.id_isar5 },
8805              { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
8806                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
8807                .access = PL1_R, .type = ARM_CP_CONST,
8808                .accessfn = access_aa32_tid3,
8809                .resetvalue = cpu->isar.id_mmfr4 },
8810              { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
8811                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
8812                .access = PL1_R, .type = ARM_CP_CONST,
8813                .accessfn = access_aa32_tid3,
8814                .resetvalue = cpu->isar.id_isar6 },
8815          };
8816          define_arm_cp_regs(cpu, v6_idregs);
8817          define_arm_cp_regs(cpu, v6_cp_reginfo);
8818      } else {
8819          define_arm_cp_regs(cpu, not_v6_cp_reginfo);
8820      }
8821      if (arm_feature(env, ARM_FEATURE_V6K)) {
8822          define_arm_cp_regs(cpu, v6k_cp_reginfo);
8823      }
8824      if (arm_feature(env, ARM_FEATURE_V7MP) &&
8825          !arm_feature(env, ARM_FEATURE_PMSA)) {
8826          define_arm_cp_regs(cpu, v7mp_cp_reginfo);
8827      }
8828      if (arm_feature(env, ARM_FEATURE_V7VE)) {
8829          define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
8830      }
8831      if (arm_feature(env, ARM_FEATURE_V7)) {
8832          ARMCPRegInfo clidr = {
8833              .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
8834              .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
8835              .access = PL1_R, .type = ARM_CP_CONST,
8836              .accessfn = access_tid4,
8837              .fgt = FGT_CLIDR_EL1,
8838              .resetvalue = cpu->clidr
8839          };
8840          define_one_arm_cp_reg(cpu, &clidr);
8841          define_arm_cp_regs(cpu, v7_cp_reginfo);
8842          define_debug_regs(cpu);
8843          define_pmu_regs(cpu);
8844      } else {
8845          define_arm_cp_regs(cpu, not_v7_cp_reginfo);
8846      }
8847      if (arm_feature(env, ARM_FEATURE_V8)) {
8848          /*
8849           * v8 ID registers, which all have impdef reset values.
8850           * Note that within the ID register ranges the unused slots
8851           * must all RAZ, not UNDEF; future architecture versions may
8852           * define new registers here.
8853           * ID registers which are AArch64 views of the AArch32 ID registers
8854           * which already existed in v6 and v7 are handled elsewhere,
8855           * in v6_idregs[].
8856           */
8857          int i;
8858          ARMCPRegInfo v8_idregs[] = {
8859              /*
8860               * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
8861               * emulation because we don't know the right value for the
8862               * GIC field until after we define these regs.
8863               */
8864              { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
8865                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
8866                .access = PL1_R,
8867  #ifdef CONFIG_USER_ONLY
8868                .type = ARM_CP_CONST,
8869                .resetvalue = cpu->isar.id_aa64pfr0
8870  #else
8871                .type = ARM_CP_NO_RAW,
8872                .accessfn = access_aa64_tid3,
8873                .readfn = id_aa64pfr0_read,
8874                .writefn = arm_cp_write_ignore
8875  #endif
8876              },
8877              { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
8878                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
8879                .access = PL1_R, .type = ARM_CP_CONST,
8880                .accessfn = access_aa64_tid3,
8881                .resetvalue = cpu->isar.id_aa64pfr1},
8882              { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8883                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
8884                .access = PL1_R, .type = ARM_CP_CONST,
8885                .accessfn = access_aa64_tid3,
8886                .resetvalue = 0 },
8887              { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8888                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
8889                .access = PL1_R, .type = ARM_CP_CONST,
8890                .accessfn = access_aa64_tid3,
8891                .resetvalue = 0 },
8892              { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
8893                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
8894                .access = PL1_R, .type = ARM_CP_CONST,
8895                .accessfn = access_aa64_tid3,
8896                .resetvalue = cpu->isar.id_aa64zfr0 },
8897              { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
8898                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
8899                .access = PL1_R, .type = ARM_CP_CONST,
8900                .accessfn = access_aa64_tid3,
8901                .resetvalue = cpu->isar.id_aa64smfr0 },
8902              { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8903                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
8904                .access = PL1_R, .type = ARM_CP_CONST,
8905                .accessfn = access_aa64_tid3,
8906                .resetvalue = 0 },
8907              { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8908                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
8909                .access = PL1_R, .type = ARM_CP_CONST,
8910                .accessfn = access_aa64_tid3,
8911                .resetvalue = 0 },
8912              { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
8913                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
8914                .access = PL1_R, .type = ARM_CP_CONST,
8915                .accessfn = access_aa64_tid3,
8916                .resetvalue = cpu->isar.id_aa64dfr0 },
8917              { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
8918                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
8919                .access = PL1_R, .type = ARM_CP_CONST,
8920                .accessfn = access_aa64_tid3,
8921                .resetvalue = cpu->isar.id_aa64dfr1 },
8922              { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8923                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
8924                .access = PL1_R, .type = ARM_CP_CONST,
8925                .accessfn = access_aa64_tid3,
8926                .resetvalue = 0 },
8927              { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8928                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
8929                .access = PL1_R, .type = ARM_CP_CONST,
8930                .accessfn = access_aa64_tid3,
8931                .resetvalue = 0 },
8932              { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
8933                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
8934                .access = PL1_R, .type = ARM_CP_CONST,
8935                .accessfn = access_aa64_tid3,
8936                .resetvalue = cpu->id_aa64afr0 },
8937              { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
8938                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
8939                .access = PL1_R, .type = ARM_CP_CONST,
8940                .accessfn = access_aa64_tid3,
8941                .resetvalue = cpu->id_aa64afr1 },
8942              { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8943                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
8944                .access = PL1_R, .type = ARM_CP_CONST,
8945                .accessfn = access_aa64_tid3,
8946                .resetvalue = 0 },
8947              { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8948                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
8949                .access = PL1_R, .type = ARM_CP_CONST,
8950                .accessfn = access_aa64_tid3,
8951                .resetvalue = 0 },
8952              { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
8953                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
8954                .access = PL1_R, .type = ARM_CP_CONST,
8955                .accessfn = access_aa64_tid3,
8956                .resetvalue = cpu->isar.id_aa64isar0 },
8957              { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
8958                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
8959                .access = PL1_R, .type = ARM_CP_CONST,
8960                .accessfn = access_aa64_tid3,
8961                .resetvalue = cpu->isar.id_aa64isar1 },
8962              { .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
8963                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
8964                .access = PL1_R, .type = ARM_CP_CONST,
8965                .accessfn = access_aa64_tid3,
8966                .resetvalue = cpu->isar.id_aa64isar2 },
8967              { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8968                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
8969                .access = PL1_R, .type = ARM_CP_CONST,
8970                .accessfn = access_aa64_tid3,
8971                .resetvalue = 0 },
8972              { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8973                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
8974                .access = PL1_R, .type = ARM_CP_CONST,
8975                .accessfn = access_aa64_tid3,
8976                .resetvalue = 0 },
8977              { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8978                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
8979                .access = PL1_R, .type = ARM_CP_CONST,
8980                .accessfn = access_aa64_tid3,
8981                .resetvalue = 0 },
8982              { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8983                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
8984                .access = PL1_R, .type = ARM_CP_CONST,
8985                .accessfn = access_aa64_tid3,
8986                .resetvalue = 0 },
8987              { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
8988                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
8989                .access = PL1_R, .type = ARM_CP_CONST,
8990                .accessfn = access_aa64_tid3,
8991                .resetvalue = 0 },
8992              { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
8993                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
8994                .access = PL1_R, .type = ARM_CP_CONST,
8995                .accessfn = access_aa64_tid3,
8996                .resetvalue = cpu->isar.id_aa64mmfr0 },
8997              { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
8998                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
8999                .access = PL1_R, .type = ARM_CP_CONST,
9000                .accessfn = access_aa64_tid3,
9001                .resetvalue = cpu->isar.id_aa64mmfr1 },
9002              { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
9003                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
9004                .access = PL1_R, .type = ARM_CP_CONST,
9005                .accessfn = access_aa64_tid3,
9006                .resetvalue = cpu->isar.id_aa64mmfr2 },
9007              { .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
9008                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
9009                .access = PL1_R, .type = ARM_CP_CONST,
9010                .accessfn = access_aa64_tid3,
9011                .resetvalue = cpu->isar.id_aa64mmfr3 },
9012              { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
9013                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
9014                .access = PL1_R, .type = ARM_CP_CONST,
9015                .accessfn = access_aa64_tid3,
9016                .resetvalue = 0 },
9017              { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
9018                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
9019                .access = PL1_R, .type = ARM_CP_CONST,
9020                .accessfn = access_aa64_tid3,
9021                .resetvalue = 0 },
9022              { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
9023                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
9024                .access = PL1_R, .type = ARM_CP_CONST,
9025                .accessfn = access_aa64_tid3,
9026                .resetvalue = 0 },
9027              { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
9028                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
9029                .access = PL1_R, .type = ARM_CP_CONST,
9030                .accessfn = access_aa64_tid3,
9031                .resetvalue = 0 },
9032              { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
9033                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
9034                .access = PL1_R, .type = ARM_CP_CONST,
9035                .accessfn = access_aa64_tid3,
9036                .resetvalue = cpu->isar.mvfr0 },
9037              { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
9038                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
9039                .access = PL1_R, .type = ARM_CP_CONST,
9040                .accessfn = access_aa64_tid3,
9041                .resetvalue = cpu->isar.mvfr1 },
9042              { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
9043                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
9044                .access = PL1_R, .type = ARM_CP_CONST,
9045                .accessfn = access_aa64_tid3,
9046                .resetvalue = cpu->isar.mvfr2 },
9047              /*
9048               * "0, c0, c3, {0,1,2}" are the encodings corresponding to
9049               * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
9050               * as RAZ, since it is in the "reserved for future ID
9051               * registers, RAZ" part of the AArch32 encoding space.
9052               */
9053              { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
9054                .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
9055                .access = PL1_R, .type = ARM_CP_CONST,
9056                .accessfn = access_aa64_tid3,
9057                .resetvalue = 0 },
9058              { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
9059                .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
9060                .access = PL1_R, .type = ARM_CP_CONST,
9061                .accessfn = access_aa64_tid3,
9062                .resetvalue = 0 },
9063              { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
9064                .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
9065                .access = PL1_R, .type = ARM_CP_CONST,
9066                .accessfn = access_aa64_tid3,
9067                .resetvalue = 0 },
9068              /*
9069               * Other encodings in "0, c0, c3, ..." are STATE_BOTH because
9070               * they're also RAZ for AArch64, and in v8 are gradually
9071               * being filled with AArch64-view-of-AArch32-ID-register
9072               * for new ID registers.
9073               */
9074              { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
9075                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
9076                .access = PL1_R, .type = ARM_CP_CONST,
9077                .accessfn = access_aa64_tid3,
9078                .resetvalue = 0 },
9079              { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH,
9080                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
9081                .access = PL1_R, .type = ARM_CP_CONST,
9082                .accessfn = access_aa64_tid3,
9083                .resetvalue = cpu->isar.id_pfr2 },
9084              { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
9085                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
9086                .access = PL1_R, .type = ARM_CP_CONST,
9087                .accessfn = access_aa64_tid3,
9088                .resetvalue = cpu->isar.id_dfr1 },
9089              { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
9090                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
9091                .access = PL1_R, .type = ARM_CP_CONST,
9092                .accessfn = access_aa64_tid3,
9093                .resetvalue = cpu->isar.id_mmfr5 },
9094              { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
9095                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
9096                .access = PL1_R, .type = ARM_CP_CONST,
9097                .accessfn = access_aa64_tid3,
9098                .resetvalue = 0 },
9099              { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
9100                .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
9101                .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
9102                .fgt = FGT_PMCEIDN_EL0,
9103                .resetvalue = extract64(cpu->pmceid0, 0, 32) },
9104              { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
9105                .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
9106                .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
9107                .fgt = FGT_PMCEIDN_EL0,
9108                .resetvalue = cpu->pmceid0 },
9109              { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
9110                .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
9111                .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
9112                .fgt = FGT_PMCEIDN_EL0,
9113                .resetvalue = extract64(cpu->pmceid1, 0, 32) },
9114              { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
9115                .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
9116                .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
9117                .fgt = FGT_PMCEIDN_EL0,
9118                .resetvalue = cpu->pmceid1 },
9119          };
9120  #ifdef CONFIG_USER_ONLY
9121          static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
9122              { .name = "ID_AA64PFR0_EL1",
9123                .exported_bits = R_ID_AA64PFR0_FP_MASK |
9124                                 R_ID_AA64PFR0_ADVSIMD_MASK |
9125                                 R_ID_AA64PFR0_SVE_MASK |
9126                                 R_ID_AA64PFR0_DIT_MASK,
9127                .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) |
9128                              (0x1u << R_ID_AA64PFR0_EL1_SHIFT) },
9129              { .name = "ID_AA64PFR1_EL1",
9130                .exported_bits = R_ID_AA64PFR1_BT_MASK |
9131                                 R_ID_AA64PFR1_SSBS_MASK |
9132                                 R_ID_AA64PFR1_MTE_MASK |
9133                                 R_ID_AA64PFR1_SME_MASK },
9134              { .name = "ID_AA64PFR*_EL1_RESERVED",
9135                .is_glob = true },
9136              { .name = "ID_AA64ZFR0_EL1",
9137                .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK |
9138                                 R_ID_AA64ZFR0_AES_MASK |
9139                                 R_ID_AA64ZFR0_BITPERM_MASK |
9140                                 R_ID_AA64ZFR0_BFLOAT16_MASK |
9141                                 R_ID_AA64ZFR0_B16B16_MASK |
9142                                 R_ID_AA64ZFR0_SHA3_MASK |
9143                                 R_ID_AA64ZFR0_SM4_MASK |
9144                                 R_ID_AA64ZFR0_I8MM_MASK |
9145                                 R_ID_AA64ZFR0_F32MM_MASK |
9146                                 R_ID_AA64ZFR0_F64MM_MASK },
9147              { .name = "ID_AA64SMFR0_EL1",
9148                .exported_bits = R_ID_AA64SMFR0_F32F32_MASK |
9149                                 R_ID_AA64SMFR0_BI32I32_MASK |
9150                                 R_ID_AA64SMFR0_B16F32_MASK |
9151                                 R_ID_AA64SMFR0_F16F32_MASK |
9152                                 R_ID_AA64SMFR0_I8I32_MASK |
9153                                 R_ID_AA64SMFR0_F16F16_MASK |
9154                                 R_ID_AA64SMFR0_B16B16_MASK |
9155                                 R_ID_AA64SMFR0_I16I32_MASK |
9156                                 R_ID_AA64SMFR0_F64F64_MASK |
9157                                 R_ID_AA64SMFR0_I16I64_MASK |
9158                                 R_ID_AA64SMFR0_SMEVER_MASK |
9159                                 R_ID_AA64SMFR0_FA64_MASK },
9160              { .name = "ID_AA64MMFR0_EL1",
9161                .exported_bits = R_ID_AA64MMFR0_ECV_MASK,
9162                .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) |
9163                              (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) },
9164              { .name = "ID_AA64MMFR1_EL1",
9165                .exported_bits = R_ID_AA64MMFR1_AFP_MASK },
9166              { .name = "ID_AA64MMFR2_EL1",
9167                .exported_bits = R_ID_AA64MMFR2_AT_MASK },
9168              { .name = "ID_AA64MMFR3_EL1",
9169                .exported_bits = 0 },
9170              { .name = "ID_AA64MMFR*_EL1_RESERVED",
9171                .is_glob = true },
9172              { .name = "ID_AA64DFR0_EL1",
9173                .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) },
9174              { .name = "ID_AA64DFR1_EL1" },
9175              { .name = "ID_AA64DFR*_EL1_RESERVED",
9176                .is_glob = true },
9177              { .name = "ID_AA64AFR*",
9178                .is_glob = true },
9179              { .name = "ID_AA64ISAR0_EL1",
9180                .exported_bits = R_ID_AA64ISAR0_AES_MASK |
9181                                 R_ID_AA64ISAR0_SHA1_MASK |
9182                                 R_ID_AA64ISAR0_SHA2_MASK |
9183                                 R_ID_AA64ISAR0_CRC32_MASK |
9184                                 R_ID_AA64ISAR0_ATOMIC_MASK |
9185                                 R_ID_AA64ISAR0_RDM_MASK |
9186                                 R_ID_AA64ISAR0_SHA3_MASK |
9187                                 R_ID_AA64ISAR0_SM3_MASK |
9188                                 R_ID_AA64ISAR0_SM4_MASK |
9189                                 R_ID_AA64ISAR0_DP_MASK |
9190                                 R_ID_AA64ISAR0_FHM_MASK |
9191                                 R_ID_AA64ISAR0_TS_MASK |
9192                                 R_ID_AA64ISAR0_RNDR_MASK },
9193              { .name = "ID_AA64ISAR1_EL1",
9194                .exported_bits = R_ID_AA64ISAR1_DPB_MASK |
9195                                 R_ID_AA64ISAR1_APA_MASK |
9196                                 R_ID_AA64ISAR1_API_MASK |
9197                                 R_ID_AA64ISAR1_JSCVT_MASK |
9198                                 R_ID_AA64ISAR1_FCMA_MASK |
9199                                 R_ID_AA64ISAR1_LRCPC_MASK |
9200                                 R_ID_AA64ISAR1_GPA_MASK |
9201                                 R_ID_AA64ISAR1_GPI_MASK |
9202                                 R_ID_AA64ISAR1_FRINTTS_MASK |
9203                                 R_ID_AA64ISAR1_SB_MASK |
9204                                 R_ID_AA64ISAR1_BF16_MASK |
9205                                 R_ID_AA64ISAR1_DGH_MASK |
9206                                 R_ID_AA64ISAR1_I8MM_MASK },
9207              { .name = "ID_AA64ISAR2_EL1",
9208                .exported_bits = R_ID_AA64ISAR2_WFXT_MASK |
9209                                 R_ID_AA64ISAR2_RPRES_MASK |
9210                                 R_ID_AA64ISAR2_GPA3_MASK |
9211                                 R_ID_AA64ISAR2_APA3_MASK |
9212                                 R_ID_AA64ISAR2_MOPS_MASK |
9213                                 R_ID_AA64ISAR2_BC_MASK |
9214                                 R_ID_AA64ISAR2_RPRFM_MASK |
9215                                 R_ID_AA64ISAR2_CSSC_MASK },
9216              { .name = "ID_AA64ISAR*_EL1_RESERVED",
9217                .is_glob = true },
9218          };
9219          modify_arm_cp_regs(v8_idregs, v8_user_idregs);
9220  #endif
9221          /*
9222           * RVBAR_EL1 and RMR_EL1 only implemented if EL1 is the highest EL.
9223           * TODO: For RMR, a write with bit 1 set should do something with
9224           * cpu_reset(). In the meantime, "the bit is strictly a request",
9225           * so we are in spec just ignoring writes.
9226           */
9227          if (!arm_feature(env, ARM_FEATURE_EL3) &&
9228              !arm_feature(env, ARM_FEATURE_EL2)) {
9229              ARMCPRegInfo el1_reset_regs[] = {
9230                  { .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH,
9231                    .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
9232                    .access = PL1_R,
9233                    .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
9234                  { .name = "RMR_EL1", .state = ARM_CP_STATE_BOTH,
9235                    .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
9236                    .access = PL1_RW, .type = ARM_CP_CONST,
9237                    .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) }
9238              };
9239              define_arm_cp_regs(cpu, el1_reset_regs);
9240          }
9241          define_arm_cp_regs(cpu, v8_idregs);
9242          define_arm_cp_regs(cpu, v8_cp_reginfo);
9243          if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
9244              define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
9245          }
9246  
9247          for (i = 4; i < 16; i++) {
9248              /*
9249               * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
9250               * For pre-v8 cores there are RAZ patterns for these in
9251               * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
9252               * v8 extends the "must RAZ" part of the ID register space
9253               * to also cover c0, 0, c{8-15}, {0-7}.
9254               * These are STATE_AA32 because in the AArch64 sysreg space
9255               * c4-c7 is where the AArch64 ID registers live (and we've
9256               * already defined those in v8_idregs[]), and c8-c15 are not
9257               * "must RAZ" for AArch64.
9258               */
9259              g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
9260              ARMCPRegInfo v8_aa32_raz_idregs = {
9261                  .name = name,
9262                  .state = ARM_CP_STATE_AA32,
9263                  .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
9264                  .access = PL1_R, .type = ARM_CP_CONST,
9265                  .accessfn = access_aa64_tid3,
9266                  .resetvalue = 0 };
9267              define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
9268          }
9269      }
9270  
9271      /*
9272       * Register the base EL2 cpregs.
9273       * Pre v8, these registers are implemented only as part of the
9274       * Virtualization Extensions (EL2 present).  Beginning with v8,
9275       * if EL2 is missing but EL3 is enabled, mostly these become
9276       * RES0 from EL3, with some specific exceptions.
9277       */
9278      if (arm_feature(env, ARM_FEATURE_EL2)
9279          || (arm_feature(env, ARM_FEATURE_EL3)
9280              && arm_feature(env, ARM_FEATURE_V8))) {
9281          uint64_t vmpidr_def = mpidr_read_val(env);
9282          ARMCPRegInfo vpidr_regs[] = {
9283              { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
9284                .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
9285                .access = PL2_RW, .accessfn = access_el3_aa32ns,
9286                .resetvalue = cpu->midr,
9287                .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
9288                .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
9289              { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
9290                .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
9291                .access = PL2_RW, .resetvalue = cpu->midr,
9292                .type = ARM_CP_EL3_NO_EL2_C_NZ,
9293                .nv2_redirect_offset = 0x88,
9294                .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
9295              { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
9296                .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
9297                .access = PL2_RW, .accessfn = access_el3_aa32ns,
9298                .resetvalue = vmpidr_def,
9299                .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ,
9300                .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
9301              { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
9302                .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
9303                .access = PL2_RW, .resetvalue = vmpidr_def,
9304                .type = ARM_CP_EL3_NO_EL2_C_NZ,
9305                .nv2_redirect_offset = 0x50,
9306                .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
9307          };
9308          /*
9309           * The only field of MDCR_EL2 that has a defined architectural reset
9310           * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
9311           */
9312          ARMCPRegInfo mdcr_el2 = {
9313              .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO,
9314              .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
9315              .writefn = mdcr_el2_write,
9316              .access = PL2_RW, .resetvalue = pmu_num_counters(env),
9317              .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
9318          };
9319          define_one_arm_cp_reg(cpu, &mdcr_el2);
9320          define_arm_cp_regs(cpu, vpidr_regs);
9321          define_arm_cp_regs(cpu, el2_cp_reginfo);
9322          if (arm_feature(env, ARM_FEATURE_V8)) {
9323              define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
9324          }
9325          if (cpu_isar_feature(aa64_sel2, cpu)) {
9326              define_arm_cp_regs(cpu, el2_sec_cp_reginfo);
9327          }
9328          /*
9329           * RVBAR_EL2 and RMR_EL2 only implemented if EL2 is the highest EL.
9330           * See commentary near RMR_EL1.
9331           */
9332          if (!arm_feature(env, ARM_FEATURE_EL3)) {
9333              static const ARMCPRegInfo el2_reset_regs[] = {
9334                  { .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
9335                    .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
9336                    .access = PL2_R,
9337                    .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
9338                  { .name = "RVBAR", .type = ARM_CP_ALIAS,
9339                    .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
9340                    .access = PL2_R,
9341                    .fieldoffset = offsetof(CPUARMState, cp15.rvbar) },
9342                  { .name = "RMR_EL2", .state = ARM_CP_STATE_AA64,
9343                    .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 2,
9344                    .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
9345              };
9346              define_arm_cp_regs(cpu, el2_reset_regs);
9347          }
9348      }
9349  
9350      /* Register the base EL3 cpregs. */
9351      if (arm_feature(env, ARM_FEATURE_EL3)) {
9352          define_arm_cp_regs(cpu, el3_cp_reginfo);
9353          ARMCPRegInfo el3_regs[] = {
9354              { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
9355                .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
9356                .access = PL3_R,
9357                .fieldoffset = offsetof(CPUARMState, cp15.rvbar), },
9358              { .name = "RMR_EL3", .state = ARM_CP_STATE_AA64,
9359                .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 2,
9360                .access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 1 },
9361              { .name = "RMR", .state = ARM_CP_STATE_AA32,
9362                .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 2,
9363                .access = PL3_RW, .type = ARM_CP_CONST,
9364                .resetvalue = arm_feature(env, ARM_FEATURE_AARCH64) },
9365              { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
9366                .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
9367                .access = PL3_RW,
9368                .raw_writefn = raw_write, .writefn = sctlr_write,
9369                .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
9370                .resetvalue = cpu->reset_sctlr },
9371          };
9372  
9373          define_arm_cp_regs(cpu, el3_regs);
9374      }
9375      /*
9376       * The behaviour of NSACR is sufficiently various that we don't
9377       * try to describe it in a single reginfo:
9378       *  if EL3 is 64 bit, then trap to EL3 from S EL1,
9379       *     reads as constant 0xc00 from NS EL1 and NS EL2
9380       *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
9381       *  if v7 without EL3, register doesn't exist
9382       *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
9383       */
9384      if (arm_feature(env, ARM_FEATURE_EL3)) {
9385          if (arm_feature(env, ARM_FEATURE_AARCH64)) {
9386              static const ARMCPRegInfo nsacr = {
9387                  .name = "NSACR", .type = ARM_CP_CONST,
9388                  .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9389                  .access = PL1_RW, .accessfn = nsacr_access,
9390                  .resetvalue = 0xc00
9391              };
9392              define_one_arm_cp_reg(cpu, &nsacr);
9393          } else {
9394              static const ARMCPRegInfo nsacr = {
9395                  .name = "NSACR",
9396                  .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9397                  .access = PL3_RW | PL1_R,
9398                  .resetvalue = 0,
9399                  .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
9400              };
9401              define_one_arm_cp_reg(cpu, &nsacr);
9402          }
9403      } else {
9404          if (arm_feature(env, ARM_FEATURE_V8)) {
9405              static const ARMCPRegInfo nsacr = {
9406                  .name = "NSACR", .type = ARM_CP_CONST,
9407                  .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
9408                  .access = PL1_R,
9409                  .resetvalue = 0xc00
9410              };
9411              define_one_arm_cp_reg(cpu, &nsacr);
9412          }
9413      }
9414  
9415      if (arm_feature(env, ARM_FEATURE_PMSA)) {
9416          if (arm_feature(env, ARM_FEATURE_V6)) {
9417              /* PMSAv6 not implemented */
9418              assert(arm_feature(env, ARM_FEATURE_V7));
9419              define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
9420              define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
9421          } else {
9422              define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
9423          }
9424      } else {
9425          define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
9426          define_arm_cp_regs(cpu, vmsa_cp_reginfo);
9427          /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
9428          if (cpu_isar_feature(aa32_hpd, cpu)) {
9429              define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
9430          }
9431      }
9432      if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
9433          define_arm_cp_regs(cpu, t2ee_cp_reginfo);
9434      }
9435      if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
9436          define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
9437      }
9438      if (cpu_isar_feature(aa64_ecv_traps, cpu)) {
9439          define_arm_cp_regs(cpu, gen_timer_ecv_cp_reginfo);
9440      }
9441  #ifndef CONFIG_USER_ONLY
9442      if (cpu_isar_feature(aa64_ecv, cpu)) {
9443          define_one_arm_cp_reg(cpu, &gen_timer_cntpoff_reginfo);
9444      }
9445  #endif
9446      if (arm_feature(env, ARM_FEATURE_VAPA)) {
9447          ARMCPRegInfo vapa_cp_reginfo[] = {
9448              { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
9449                .access = PL1_RW, .resetvalue = 0,
9450                .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
9451                                       offsetoflow32(CPUARMState, cp15.par_ns) },
9452                .writefn = par_write},
9453  #ifndef CONFIG_USER_ONLY
9454              /* This underdecoding is safe because the reginfo is NO_RAW. */
9455              { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
9456                .access = PL1_W, .accessfn = ats_access,
9457                .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
9458  #endif
9459          };
9460  
9461          /*
9462           * When LPAE exists this 32-bit PAR register is an alias of the
9463           * 64-bit AArch32 PAR register defined in lpae_cp_reginfo[]
9464           */
9465          if (arm_feature(env, ARM_FEATURE_LPAE)) {
9466              vapa_cp_reginfo[0].type = ARM_CP_ALIAS | ARM_CP_NO_GDB;
9467          }
9468          define_arm_cp_regs(cpu, vapa_cp_reginfo);
9469      }
9470      if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
9471          define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
9472      }
9473      if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
9474          define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
9475      }
9476      if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
9477          define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
9478      }
9479      if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
9480          define_arm_cp_regs(cpu, omap_cp_reginfo);
9481      }
9482      if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
9483          define_arm_cp_regs(cpu, strongarm_cp_reginfo);
9484      }
9485      if (arm_feature(env, ARM_FEATURE_XSCALE)) {
9486          define_arm_cp_regs(cpu, xscale_cp_reginfo);
9487      }
9488      if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
9489          define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
9490      }
9491      if (arm_feature(env, ARM_FEATURE_LPAE)) {
9492          define_arm_cp_regs(cpu, lpae_cp_reginfo);
9493      }
9494      if (cpu_isar_feature(aa32_jazelle, cpu)) {
9495          define_arm_cp_regs(cpu, jazelle_regs);
9496      }
9497      /*
9498       * Slightly awkwardly, the OMAP and StrongARM cores need all of
9499       * cp15 crn=0 to be writes-ignored, whereas for other cores they should
9500       * be read-only (ie write causes UNDEF exception).
9501       */
9502      {
9503          ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
9504              /*
9505               * Pre-v8 MIDR space.
9506               * Note that the MIDR isn't a simple constant register because
9507               * of the TI925 behaviour where writes to another register can
9508               * cause the MIDR value to change.
9509               *
9510               * Unimplemented registers in the c15 0 0 0 space default to
9511               * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
9512               * and friends override accordingly.
9513               */
9514              { .name = "MIDR",
9515                .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
9516                .access = PL1_R, .resetvalue = cpu->midr,
9517                .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
9518                .readfn = midr_read,
9519                .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
9520                .type = ARM_CP_OVERRIDE },
9521              /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
9522              { .name = "DUMMY",
9523                .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
9524                .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9525              { .name = "DUMMY",
9526                .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
9527                .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9528              { .name = "DUMMY",
9529                .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
9530                .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9531              { .name = "DUMMY",
9532                .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
9533                .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9534              { .name = "DUMMY",
9535                .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
9536                .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
9537          };
9538          ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
9539              { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
9540                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
9541                .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
9542                .fgt = FGT_MIDR_EL1,
9543                .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
9544                .readfn = midr_read },
9545              /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */
9546              { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
9547                .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
9548                .access = PL1_R, .resetvalue = cpu->midr },
9549              { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
9550                .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
9551                .access = PL1_R,
9552                .accessfn = access_aa64_tid1,
9553                .fgt = FGT_REVIDR_EL1,
9554                .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
9555          };
9556          ARMCPRegInfo id_v8_midr_alias_cp_reginfo = {
9557              .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST | ARM_CP_NO_GDB,
9558              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
9559              .access = PL1_R, .resetvalue = cpu->midr
9560          };
9561          ARMCPRegInfo id_cp_reginfo[] = {
9562              /* These are common to v8 and pre-v8 */
9563              { .name = "CTR",
9564                .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
9565                .access = PL1_R, .accessfn = ctr_el0_access,
9566                .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
9567              { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
9568                .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
9569                .access = PL0_R, .accessfn = ctr_el0_access,
9570                .fgt = FGT_CTR_EL0,
9571                .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
9572              /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
9573              { .name = "TCMTR",
9574                .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
9575                .access = PL1_R,
9576                .accessfn = access_aa32_tid1,
9577                .type = ARM_CP_CONST, .resetvalue = 0 },
9578          };
9579          /* TLBTR is specific to VMSA */
9580          ARMCPRegInfo id_tlbtr_reginfo = {
9581                .name = "TLBTR",
9582                .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
9583                .access = PL1_R,
9584                .accessfn = access_aa32_tid1,
9585                .type = ARM_CP_CONST, .resetvalue = 0,
9586          };
9587          /* MPUIR is specific to PMSA V6+ */
9588          ARMCPRegInfo id_mpuir_reginfo = {
9589                .name = "MPUIR",
9590                .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
9591                .access = PL1_R, .type = ARM_CP_CONST,
9592                .resetvalue = cpu->pmsav7_dregion << 8
9593          };
9594          /* HMPUIR is specific to PMSA V8 */
9595          ARMCPRegInfo id_hmpuir_reginfo = {
9596              .name = "HMPUIR",
9597              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4,
9598              .access = PL2_R, .type = ARM_CP_CONST,
9599              .resetvalue = cpu->pmsav8r_hdregion
9600          };
9601          static const ARMCPRegInfo crn0_wi_reginfo = {
9602              .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
9603              .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
9604              .type = ARM_CP_NOP | ARM_CP_OVERRIDE
9605          };
9606  #ifdef CONFIG_USER_ONLY
9607          static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
9608              { .name = "MIDR_EL1",
9609                .exported_bits = R_MIDR_EL1_REVISION_MASK |
9610                                 R_MIDR_EL1_PARTNUM_MASK |
9611                                 R_MIDR_EL1_ARCHITECTURE_MASK |
9612                                 R_MIDR_EL1_VARIANT_MASK |
9613                                 R_MIDR_EL1_IMPLEMENTER_MASK },
9614              { .name = "REVIDR_EL1" },
9615          };
9616          modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
9617  #endif
9618          if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
9619              arm_feature(env, ARM_FEATURE_STRONGARM)) {
9620              size_t i;
9621              /*
9622               * Register the blanket "writes ignored" value first to cover the
9623               * whole space. Then update the specific ID registers to allow write
9624               * access, so that they ignore writes rather than causing them to
9625               * UNDEF.
9626               */
9627              define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
9628              for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) {
9629                  id_pre_v8_midr_cp_reginfo[i].access = PL1_RW;
9630              }
9631              for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) {
9632                  id_cp_reginfo[i].access = PL1_RW;
9633              }
9634              id_mpuir_reginfo.access = PL1_RW;
9635              id_tlbtr_reginfo.access = PL1_RW;
9636          }
9637          if (arm_feature(env, ARM_FEATURE_V8)) {
9638              define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
9639              if (!arm_feature(env, ARM_FEATURE_PMSA)) {
9640                  define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo);
9641              }
9642          } else {
9643              define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
9644          }
9645          define_arm_cp_regs(cpu, id_cp_reginfo);
9646          if (!arm_feature(env, ARM_FEATURE_PMSA)) {
9647              define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
9648          } else if (arm_feature(env, ARM_FEATURE_PMSA) &&
9649                     arm_feature(env, ARM_FEATURE_V8)) {
9650              uint32_t i = 0;
9651              char *tmp_string;
9652  
9653              define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
9654              define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo);
9655              define_arm_cp_regs(cpu, pmsav8r_cp_reginfo);
9656  
9657              /* Register alias is only valid for first 32 indexes */
9658              for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) {
9659                  uint8_t crm = 0b1000 | extract32(i, 1, 3);
9660                  uint8_t opc1 = extract32(i, 4, 1);
9661                  uint8_t opc2 = extract32(i, 0, 1) << 2;
9662  
9663                  tmp_string = g_strdup_printf("PRBAR%u", i);
9664                  ARMCPRegInfo tmp_prbarn_reginfo = {
9665                      .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
9666                      .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9667                      .access = PL1_RW, .resetvalue = 0,
9668                      .accessfn = access_tvm_trvm,
9669                      .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9670                  };
9671                  define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo);
9672                  g_free(tmp_string);
9673  
9674                  opc2 = extract32(i, 0, 1) << 2 | 0x1;
9675                  tmp_string = g_strdup_printf("PRLAR%u", i);
9676                  ARMCPRegInfo tmp_prlarn_reginfo = {
9677                      .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW,
9678                      .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9679                      .access = PL1_RW, .resetvalue = 0,
9680                      .accessfn = access_tvm_trvm,
9681                      .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9682                  };
9683                  define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo);
9684                  g_free(tmp_string);
9685              }
9686  
9687              /* Register alias is only valid for first 32 indexes */
9688              for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) {
9689                  uint8_t crm = 0b1000 | extract32(i, 1, 3);
9690                  uint8_t opc1 = 0b100 | extract32(i, 4, 1);
9691                  uint8_t opc2 = extract32(i, 0, 1) << 2;
9692  
9693                  tmp_string = g_strdup_printf("HPRBAR%u", i);
9694                  ARMCPRegInfo tmp_hprbarn_reginfo = {
9695                      .name = tmp_string,
9696                      .type = ARM_CP_NO_RAW,
9697                      .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9698                      .access = PL2_RW, .resetvalue = 0,
9699                      .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9700                  };
9701                  define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo);
9702                  g_free(tmp_string);
9703  
9704                  opc2 = extract32(i, 0, 1) << 2 | 0x1;
9705                  tmp_string = g_strdup_printf("HPRLAR%u", i);
9706                  ARMCPRegInfo tmp_hprlarn_reginfo = {
9707                      .name = tmp_string,
9708                      .type = ARM_CP_NO_RAW,
9709                      .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2,
9710                      .access = PL2_RW, .resetvalue = 0,
9711                      .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read
9712                  };
9713                  define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo);
9714                  g_free(tmp_string);
9715              }
9716          } else if (arm_feature(env, ARM_FEATURE_V7)) {
9717              define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
9718          }
9719      }
9720  
9721      if (arm_feature(env, ARM_FEATURE_MPIDR)) {
9722          ARMCPRegInfo mpidr_cp_reginfo[] = {
9723              { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
9724                .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
9725                .fgt = FGT_MPIDR_EL1,
9726                .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
9727          };
9728  #ifdef CONFIG_USER_ONLY
9729          static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
9730              { .name = "MPIDR_EL1",
9731                .fixed_bits = 0x0000000080000000 },
9732          };
9733          modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
9734  #endif
9735          define_arm_cp_regs(cpu, mpidr_cp_reginfo);
9736      }
9737  
9738      if (arm_feature(env, ARM_FEATURE_AUXCR)) {
9739          ARMCPRegInfo auxcr_reginfo[] = {
9740              { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
9741                .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
9742                .access = PL1_RW, .accessfn = access_tacr,
9743                .nv2_redirect_offset = 0x118,
9744                .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
9745              { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
9746                .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
9747                .access = PL2_RW, .type = ARM_CP_CONST,
9748                .resetvalue = 0 },
9749              { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
9750                .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
9751                .access = PL3_RW, .type = ARM_CP_CONST,
9752                .resetvalue = 0 },
9753          };
9754          define_arm_cp_regs(cpu, auxcr_reginfo);
9755          if (cpu_isar_feature(aa32_ac2, cpu)) {
9756              define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
9757          }
9758      }
9759  
9760      if (arm_feature(env, ARM_FEATURE_CBAR)) {
9761          /*
9762           * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
9763           * There are two flavours:
9764           *  (1) older 32-bit only cores have a simple 32-bit CBAR
9765           *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
9766           *      32-bit register visible to AArch32 at a different encoding
9767           *      to the "flavour 1" register and with the bits rearranged to
9768           *      be able to squash a 64-bit address into the 32-bit view.
9769           * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
9770           * in future if we support AArch32-only configs of some of the
9771           * AArch64 cores we might need to add a specific feature flag
9772           * to indicate cores with "flavour 2" CBAR.
9773           */
9774          if (arm_feature(env, ARM_FEATURE_V8)) {
9775              /* 32 bit view is [31:18] 0...0 [43:32]. */
9776              uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
9777                  | extract64(cpu->reset_cbar, 32, 12);
9778              ARMCPRegInfo cbar_reginfo[] = {
9779                  { .name = "CBAR",
9780                    .type = ARM_CP_CONST,
9781                    .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
9782                    .access = PL1_R, .resetvalue = cbar32 },
9783                  { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
9784                    .type = ARM_CP_CONST,
9785                    .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
9786                    .access = PL1_R, .resetvalue = cpu->reset_cbar },
9787              };
9788              /* We don't implement a r/w 64 bit CBAR currently */
9789              assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
9790              define_arm_cp_regs(cpu, cbar_reginfo);
9791          } else {
9792              ARMCPRegInfo cbar = {
9793                  .name = "CBAR",
9794                  .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
9795                  .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar,
9796                  .fieldoffset = offsetof(CPUARMState,
9797                                          cp15.c15_config_base_address)
9798              };
9799              if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
9800                  cbar.access = PL1_R;
9801                  cbar.fieldoffset = 0;
9802                  cbar.type = ARM_CP_CONST;
9803              }
9804              define_one_arm_cp_reg(cpu, &cbar);
9805          }
9806      }
9807  
9808      if (arm_feature(env, ARM_FEATURE_VBAR)) {
9809          static const ARMCPRegInfo vbar_cp_reginfo[] = {
9810              { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
9811                .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
9812                .access = PL1_RW, .writefn = vbar_write,
9813                .accessfn = access_nv1,
9814                .fgt = FGT_VBAR_EL1,
9815                .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
9816                .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
9817                                       offsetof(CPUARMState, cp15.vbar_ns) },
9818                .resetvalue = 0 },
9819          };
9820          define_arm_cp_regs(cpu, vbar_cp_reginfo);
9821      }
9822  
9823      /* Generic registers whose values depend on the implementation */
9824      {
9825          ARMCPRegInfo sctlr = {
9826              .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
9827              .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
9828              .access = PL1_RW, .accessfn = access_tvm_trvm,
9829              .fgt = FGT_SCTLR_EL1,
9830              .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
9831              .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
9832                                     offsetof(CPUARMState, cp15.sctlr_ns) },
9833              .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
9834              .raw_writefn = raw_write,
9835          };
9836          if (arm_feature(env, ARM_FEATURE_XSCALE)) {
9837              /*
9838               * Normally we would always end the TB on an SCTLR write, but Linux
9839               * arch/arm/mach-pxa/sleep.S expects two instructions following
9840               * an MMU enable to execute from cache.  Imitate this behaviour.
9841               */
9842              sctlr.type |= ARM_CP_SUPPRESS_TB_END;
9843          }
9844          define_one_arm_cp_reg(cpu, &sctlr);
9845  
9846          if (arm_feature(env, ARM_FEATURE_PMSA) &&
9847              arm_feature(env, ARM_FEATURE_V8)) {
9848              ARMCPRegInfo vsctlr = {
9849                  .name = "VSCTLR", .state = ARM_CP_STATE_AA32,
9850                  .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
9851                  .access = PL2_RW, .resetvalue = 0x0,
9852                  .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr),
9853              };
9854              define_one_arm_cp_reg(cpu, &vsctlr);
9855          }
9856      }
9857  
9858      if (cpu_isar_feature(aa64_lor, cpu)) {
9859          define_arm_cp_regs(cpu, lor_reginfo);
9860      }
9861      if (cpu_isar_feature(aa64_pan, cpu)) {
9862          define_one_arm_cp_reg(cpu, &pan_reginfo);
9863      }
9864  #ifndef CONFIG_USER_ONLY
9865      if (cpu_isar_feature(aa64_ats1e1, cpu)) {
9866          define_arm_cp_regs(cpu, ats1e1_reginfo);
9867      }
9868      if (cpu_isar_feature(aa32_ats1e1, cpu)) {
9869          define_arm_cp_regs(cpu, ats1cp_reginfo);
9870      }
9871  #endif
9872      if (cpu_isar_feature(aa64_uao, cpu)) {
9873          define_one_arm_cp_reg(cpu, &uao_reginfo);
9874      }
9875  
9876      if (cpu_isar_feature(aa64_dit, cpu)) {
9877          define_one_arm_cp_reg(cpu, &dit_reginfo);
9878      }
9879      if (cpu_isar_feature(aa64_ssbs, cpu)) {
9880          define_one_arm_cp_reg(cpu, &ssbs_reginfo);
9881      }
9882      if (cpu_isar_feature(any_ras, cpu)) {
9883          define_arm_cp_regs(cpu, minimal_ras_reginfo);
9884      }
9885  
9886      if (cpu_isar_feature(aa64_vh, cpu) ||
9887          cpu_isar_feature(aa64_debugv8p2, cpu)) {
9888          define_one_arm_cp_reg(cpu, &contextidr_el2);
9889      }
9890      if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
9891          define_arm_cp_regs(cpu, vhe_reginfo);
9892      }
9893  
9894      if (cpu_isar_feature(aa64_sve, cpu)) {
9895          define_arm_cp_regs(cpu, zcr_reginfo);
9896      }
9897  
9898      if (cpu_isar_feature(aa64_hcx, cpu)) {
9899          define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
9900      }
9901  
9902  #ifdef TARGET_AARCH64
9903      if (cpu_isar_feature(aa64_sme, cpu)) {
9904          define_arm_cp_regs(cpu, sme_reginfo);
9905      }
9906      if (cpu_isar_feature(aa64_pauth, cpu)) {
9907          define_arm_cp_regs(cpu, pauth_reginfo);
9908      }
9909      if (cpu_isar_feature(aa64_rndr, cpu)) {
9910          define_arm_cp_regs(cpu, rndr_reginfo);
9911      }
9912      if (cpu_isar_feature(aa64_tlbirange, cpu)) {
9913          define_arm_cp_regs(cpu, tlbirange_reginfo);
9914      }
9915      if (cpu_isar_feature(aa64_tlbios, cpu)) {
9916          define_arm_cp_regs(cpu, tlbios_reginfo);
9917      }
9918      /* Data Cache clean instructions up to PoP */
9919      if (cpu_isar_feature(aa64_dcpop, cpu)) {
9920          define_one_arm_cp_reg(cpu, dcpop_reg);
9921  
9922          if (cpu_isar_feature(aa64_dcpodp, cpu)) {
9923              define_one_arm_cp_reg(cpu, dcpodp_reg);
9924          }
9925      }
9926  
9927      /*
9928       * If full MTE is enabled, add all of the system registers.
9929       * If only "instructions available at EL0" are enabled,
9930       * then define only a RAZ/WI version of PSTATE.TCO.
9931       */
9932      if (cpu_isar_feature(aa64_mte, cpu)) {
9933          ARMCPRegInfo gmid_reginfo = {
9934              .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
9935              .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
9936              .access = PL1_R, .accessfn = access_aa64_tid5,
9937              .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
9938          };
9939          define_one_arm_cp_reg(cpu, &gmid_reginfo);
9940          define_arm_cp_regs(cpu, mte_reginfo);
9941          define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
9942      } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
9943          define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
9944          define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
9945      }
9946  
9947      if (cpu_isar_feature(aa64_scxtnum, cpu)) {
9948          define_arm_cp_regs(cpu, scxtnum_reginfo);
9949      }
9950  
9951      if (cpu_isar_feature(aa64_fgt, cpu)) {
9952          define_arm_cp_regs(cpu, fgt_reginfo);
9953      }
9954  
9955      if (cpu_isar_feature(aa64_rme, cpu)) {
9956          define_arm_cp_regs(cpu, rme_reginfo);
9957          if (cpu_isar_feature(aa64_mte, cpu)) {
9958              define_arm_cp_regs(cpu, rme_mte_reginfo);
9959          }
9960      }
9961  
9962      if (cpu_isar_feature(aa64_nv2, cpu)) {
9963          define_arm_cp_regs(cpu, nv2_reginfo);
9964      }
9965  
9966      if (cpu_isar_feature(aa64_nmi, cpu)) {
9967          define_arm_cp_regs(cpu, nmi_reginfo);
9968      }
9969  #endif
9970  
9971      if (cpu_isar_feature(any_predinv, cpu)) {
9972          define_arm_cp_regs(cpu, predinv_reginfo);
9973      }
9974  
9975      if (cpu_isar_feature(any_ccidx, cpu)) {
9976          define_arm_cp_regs(cpu, ccsidr2_reginfo);
9977      }
9978  
9979  #ifndef CONFIG_USER_ONLY
9980      /*
9981       * Register redirections and aliases must be done last,
9982       * after the registers from the other extensions have been defined.
9983       */
9984      if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
9985          define_arm_vh_e2h_redirects_aliases(cpu);
9986      }
9987  #endif
9988  }
9989  
9990  /*
9991   * Private utility function for define_one_arm_cp_reg_with_opaque():
9992   * add a single reginfo struct to the hash table.
9993   */
9994  static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
9995                                     void *opaque, CPState state,
9996                                     CPSecureState secstate,
9997                                     int crm, int opc1, int opc2,
9998                                     const char *name)
9999  {
10000      CPUARMState *env = &cpu->env;
10001      uint32_t key;
10002      ARMCPRegInfo *r2;
10003      bool is64 = r->type & ARM_CP_64BIT;
10004      bool ns = secstate & ARM_CP_SECSTATE_NS;
10005      int cp = r->cp;
10006      size_t name_len;
10007      bool make_const;
10008  
10009      switch (state) {
10010      case ARM_CP_STATE_AA32:
10011          /* We assume it is a cp15 register if the .cp field is left unset. */
10012          if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
10013              cp = 15;
10014          }
10015          key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
10016          break;
10017      case ARM_CP_STATE_AA64:
10018          /*
10019           * To allow abbreviation of ARMCPRegInfo definitions, we treat
10020           * cp == 0 as equivalent to the value for "standard guest-visible
10021           * sysreg".  STATE_BOTH definitions are also always "standard sysreg"
10022           * in their AArch64 view (the .cp value may be non-zero for the
10023           * benefit of the AArch32 view).
10024           */
10025          if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
10026              cp = CP_REG_ARM64_SYSREG_CP;
10027          }
10028          key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
10029          break;
10030      default:
10031          g_assert_not_reached();
10032      }
10033  
10034      /* Overriding of an existing definition must be explicitly requested. */
10035      if (!(r->type & ARM_CP_OVERRIDE)) {
10036          const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key);
10037          if (oldreg) {
10038              assert(oldreg->type & ARM_CP_OVERRIDE);
10039          }
10040      }
10041  
10042      /*
10043       * Eliminate registers that are not present because the EL is missing.
10044       * Doing this here makes it easier to put all registers for a given
10045       * feature into the same ARMCPRegInfo array and define them all at once.
10046       */
10047      make_const = false;
10048      if (arm_feature(env, ARM_FEATURE_EL3)) {
10049          /*
10050           * An EL2 register without EL2 but with EL3 is (usually) RES0.
10051           * See rule RJFFP in section D1.1.3 of DDI0487H.a.
10052           */
10053          int min_el = ctz32(r->access) / 2;
10054          if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
10055              if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
10056                  return;
10057              }
10058              make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
10059          }
10060      } else {
10061          CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
10062                                   ? PL2_RW : PL1_RW);
10063          if ((r->access & max_el) == 0) {
10064              return;
10065          }
10066      }
10067  
10068      /* Combine cpreg and name into one allocation. */
10069      name_len = strlen(name) + 1;
10070      r2 = g_malloc(sizeof(*r2) + name_len);
10071      *r2 = *r;
10072      r2->name = memcpy(r2 + 1, name, name_len);
10073  
10074      /*
10075       * Update fields to match the instantiation, overwiting wildcards
10076       * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
10077       */
10078      r2->cp = cp;
10079      r2->crm = crm;
10080      r2->opc1 = opc1;
10081      r2->opc2 = opc2;
10082      r2->state = state;
10083      r2->secure = secstate;
10084      if (opaque) {
10085          r2->opaque = opaque;
10086      }
10087  
10088      if (make_const) {
10089          /* This should not have been a very special register to begin. */
10090          int old_special = r2->type & ARM_CP_SPECIAL_MASK;
10091          assert(old_special == 0 || old_special == ARM_CP_NOP);
10092          /*
10093           * Set the special function to CONST, retaining the other flags.
10094           * This is important for e.g. ARM_CP_SVE so that we still
10095           * take the SVE trap if CPTR_EL3.EZ == 0.
10096           */
10097          r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
10098          /*
10099           * Usually, these registers become RES0, but there are a few
10100           * special cases like VPIDR_EL2 which have a constant non-zero
10101           * value with writes ignored.
10102           */
10103          if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
10104              r2->resetvalue = 0;
10105          }
10106          /*
10107           * ARM_CP_CONST has precedence, so removing the callbacks and
10108           * offsets are not strictly necessary, but it is potentially
10109           * less confusing to debug later.
10110           */
10111          r2->readfn = NULL;
10112          r2->writefn = NULL;
10113          r2->raw_readfn = NULL;
10114          r2->raw_writefn = NULL;
10115          r2->resetfn = NULL;
10116          r2->fieldoffset = 0;
10117          r2->bank_fieldoffsets[0] = 0;
10118          r2->bank_fieldoffsets[1] = 0;
10119      } else {
10120          bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
10121  
10122          if (isbanked) {
10123              /*
10124               * Register is banked (using both entries in array).
10125               * Overwriting fieldoffset as the array is only used to define
10126               * banked registers but later only fieldoffset is used.
10127               */
10128              r2->fieldoffset = r->bank_fieldoffsets[ns];
10129          }
10130          if (state == ARM_CP_STATE_AA32) {
10131              if (isbanked) {
10132                  /*
10133                   * If the register is banked then we don't need to migrate or
10134                   * reset the 32-bit instance in certain cases:
10135                   *
10136                   * 1) If the register has both 32-bit and 64-bit instances
10137                   *    then we can count on the 64-bit instance taking care
10138                   *    of the non-secure bank.
10139                   * 2) If ARMv8 is enabled then we can count on a 64-bit
10140                   *    version taking care of the secure bank.  This requires
10141                   *    that separate 32 and 64-bit definitions are provided.
10142                   */
10143                  if ((r->state == ARM_CP_STATE_BOTH && ns) ||
10144                      (arm_feature(env, ARM_FEATURE_V8) && !ns)) {
10145                      r2->type |= ARM_CP_ALIAS;
10146                  }
10147              } else if ((secstate != r->secure) && !ns) {
10148                  /*
10149                   * The register is not banked so we only want to allow
10150                   * migration of the non-secure instance.
10151                   */
10152                  r2->type |= ARM_CP_ALIAS;
10153              }
10154  
10155              if (HOST_BIG_ENDIAN &&
10156                  r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
10157                  r2->fieldoffset += sizeof(uint32_t);
10158              }
10159          }
10160      }
10161  
10162      /*
10163       * By convention, for wildcarded registers only the first
10164       * entry is used for migration; the others are marked as
10165       * ALIAS so we don't try to transfer the register
10166       * multiple times. Special registers (ie NOP/WFI) are
10167       * never migratable and not even raw-accessible.
10168       */
10169      if (r2->type & ARM_CP_SPECIAL_MASK) {
10170          r2->type |= ARM_CP_NO_RAW;
10171      }
10172      if (((r->crm == CP_ANY) && crm != 0) ||
10173          ((r->opc1 == CP_ANY) && opc1 != 0) ||
10174          ((r->opc2 == CP_ANY) && opc2 != 0)) {
10175          r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
10176      }
10177  
10178      /*
10179       * Check that raw accesses are either forbidden or handled. Note that
10180       * we can't assert this earlier because the setup of fieldoffset for
10181       * banked registers has to be done first.
10182       */
10183      if (!(r2->type & ARM_CP_NO_RAW)) {
10184          assert(!raw_accessors_invalid(r2));
10185      }
10186  
10187      g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
10188  }
10189  
10190  
10191  void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
10192                                         const ARMCPRegInfo *r, void *opaque)
10193  {
10194      /*
10195       * Define implementations of coprocessor registers.
10196       * We store these in a hashtable because typically
10197       * there are less than 150 registers in a space which
10198       * is 16*16*16*8*8 = 262144 in size.
10199       * Wildcarding is supported for the crm, opc1 and opc2 fields.
10200       * If a register is defined twice then the second definition is
10201       * used, so this can be used to define some generic registers and
10202       * then override them with implementation specific variations.
10203       * At least one of the original and the second definition should
10204       * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
10205       * against accidental use.
10206       *
10207       * The state field defines whether the register is to be
10208       * visible in the AArch32 or AArch64 execution state. If the
10209       * state is set to ARM_CP_STATE_BOTH then we synthesise a
10210       * reginfo structure for the AArch32 view, which sees the lower
10211       * 32 bits of the 64 bit register.
10212       *
10213       * Only registers visible in AArch64 may set r->opc0; opc0 cannot
10214       * be wildcarded. AArch64 registers are always considered to be 64
10215       * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
10216       * the register, if any.
10217       */
10218      int crm, opc1, opc2;
10219      int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
10220      int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
10221      int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
10222      int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
10223      int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
10224      int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
10225      CPState state;
10226  
10227      /* 64 bit registers have only CRm and Opc1 fields */
10228      assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
10229      /* op0 only exists in the AArch64 encodings */
10230      assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
10231      /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
10232      assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
10233      /*
10234       * This API is only for Arm's system coprocessors (14 and 15) or
10235       * (M-profile or v7A-and-earlier only) for implementation defined
10236       * coprocessors in the range 0..7.  Our decode assumes this, since
10237       * 8..13 can be used for other insns including VFP and Neon. See
10238       * valid_cp() in translate.c.  Assert here that we haven't tried
10239       * to use an invalid coprocessor number.
10240       */
10241      switch (r->state) {
10242      case ARM_CP_STATE_BOTH:
10243          /* 0 has a special meaning, but otherwise the same rules as AA32. */
10244          if (r->cp == 0) {
10245              break;
10246          }
10247          /* fall through */
10248      case ARM_CP_STATE_AA32:
10249          if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
10250              !arm_feature(&cpu->env, ARM_FEATURE_M)) {
10251              assert(r->cp >= 14 && r->cp <= 15);
10252          } else {
10253              assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
10254          }
10255          break;
10256      case ARM_CP_STATE_AA64:
10257          assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
10258          break;
10259      default:
10260          g_assert_not_reached();
10261      }
10262      /*
10263       * The AArch64 pseudocode CheckSystemAccess() specifies that op1
10264       * encodes a minimum access level for the register. We roll this
10265       * runtime check into our general permission check code, so check
10266       * here that the reginfo's specified permissions are strict enough
10267       * to encompass the generic architectural permission check.
10268       */
10269      if (r->state != ARM_CP_STATE_AA32) {
10270          CPAccessRights mask;
10271          switch (r->opc1) {
10272          case 0:
10273              /* min_EL EL1, but some accessible to EL0 via kernel ABI */
10274              mask = PL0U_R | PL1_RW;
10275              break;
10276          case 1: case 2:
10277              /* min_EL EL1 */
10278              mask = PL1_RW;
10279              break;
10280          case 3:
10281              /* min_EL EL0 */
10282              mask = PL0_RW;
10283              break;
10284          case 4:
10285          case 5:
10286              /* min_EL EL2 */
10287              mask = PL2_RW;
10288              break;
10289          case 6:
10290              /* min_EL EL3 */
10291              mask = PL3_RW;
10292              break;
10293          case 7:
10294              /* min_EL EL1, secure mode only (we don't check the latter) */
10295              mask = PL1_RW;
10296              break;
10297          default:
10298              /* broken reginfo with out-of-range opc1 */
10299              g_assert_not_reached();
10300          }
10301          /* assert our permissions are not too lax (stricter is fine) */
10302          assert((r->access & ~mask) == 0);
10303      }
10304  
10305      /*
10306       * Check that the register definition has enough info to handle
10307       * reads and writes if they are permitted.
10308       */
10309      if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) {
10310          if (r->access & PL3_R) {
10311              assert((r->fieldoffset ||
10312                     (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
10313                     r->readfn);
10314          }
10315          if (r->access & PL3_W) {
10316              assert((r->fieldoffset ||
10317                     (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
10318                     r->writefn);
10319          }
10320      }
10321  
10322      for (crm = crmmin; crm <= crmmax; crm++) {
10323          for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
10324              for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
10325                  for (state = ARM_CP_STATE_AA32;
10326                       state <= ARM_CP_STATE_AA64; state++) {
10327                      if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
10328                          continue;
10329                      }
10330                      if (state == ARM_CP_STATE_AA32) {
10331                          /*
10332                           * Under AArch32 CP registers can be common
10333                           * (same for secure and non-secure world) or banked.
10334                           */
10335                          char *name;
10336  
10337                          switch (r->secure) {
10338                          case ARM_CP_SECSTATE_S:
10339                          case ARM_CP_SECSTATE_NS:
10340                              add_cpreg_to_hashtable(cpu, r, opaque, state,
10341                                                     r->secure, crm, opc1, opc2,
10342                                                     r->name);
10343                              break;
10344                          case ARM_CP_SECSTATE_BOTH:
10345                              name = g_strdup_printf("%s_S", r->name);
10346                              add_cpreg_to_hashtable(cpu, r, opaque, state,
10347                                                     ARM_CP_SECSTATE_S,
10348                                                     crm, opc1, opc2, name);
10349                              g_free(name);
10350                              add_cpreg_to_hashtable(cpu, r, opaque, state,
10351                                                     ARM_CP_SECSTATE_NS,
10352                                                     crm, opc1, opc2, r->name);
10353                              break;
10354                          default:
10355                              g_assert_not_reached();
10356                          }
10357                      } else {
10358                          /*
10359                           * AArch64 registers get mapped to non-secure instance
10360                           * of AArch32
10361                           */
10362                          add_cpreg_to_hashtable(cpu, r, opaque, state,
10363                                                 ARM_CP_SECSTATE_NS,
10364                                                 crm, opc1, opc2, r->name);
10365                      }
10366                  }
10367              }
10368          }
10369      }
10370  }
10371  
10372  /* Define a whole list of registers */
10373  void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
10374                                          void *opaque, size_t len)
10375  {
10376      size_t i;
10377      for (i = 0; i < len; ++i) {
10378          define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
10379      }
10380  }
10381  
10382  /*
10383   * Modify ARMCPRegInfo for access from userspace.
10384   *
10385   * This is a data driven modification directed by
10386   * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
10387   * user-space cannot alter any values and dynamic values pertaining to
10388   * execution state are hidden from user space view anyway.
10389   */
10390  void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len,
10391                                   const ARMCPRegUserSpaceInfo *mods,
10392                                   size_t mods_len)
10393  {
10394      for (size_t mi = 0; mi < mods_len; ++mi) {
10395          const ARMCPRegUserSpaceInfo *m = mods + mi;
10396          GPatternSpec *pat = NULL;
10397  
10398          if (m->is_glob) {
10399              pat = g_pattern_spec_new(m->name);
10400          }
10401          for (size_t ri = 0; ri < regs_len; ++ri) {
10402              ARMCPRegInfo *r = regs + ri;
10403  
10404              if (pat && g_pattern_match_string(pat, r->name)) {
10405                  r->type = ARM_CP_CONST;
10406                  r->access = PL0U_R;
10407                  r->resetvalue = 0;
10408                  /* continue */
10409              } else if (strcmp(r->name, m->name) == 0) {
10410                  r->type = ARM_CP_CONST;
10411                  r->access = PL0U_R;
10412                  r->resetvalue &= m->exported_bits;
10413                  r->resetvalue |= m->fixed_bits;
10414                  break;
10415              }
10416          }
10417          if (pat) {
10418              g_pattern_spec_free(pat);
10419          }
10420      }
10421  }
10422  
10423  const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
10424  {
10425      return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp);
10426  }
10427  
10428  void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
10429                           uint64_t value)
10430  {
10431      /* Helper coprocessor write function for write-ignore registers */
10432  }
10433  
10434  uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
10435  {
10436      /* Helper coprocessor write function for read-as-zero registers */
10437      return 0;
10438  }
10439  
10440  void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
10441  {
10442      /* Helper coprocessor reset function for do-nothing-on-reset registers */
10443  }
10444  
10445  static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
10446  {
10447      /*
10448       * Return true if it is not valid for us to switch to
10449       * this CPU mode (ie all the UNPREDICTABLE cases in
10450       * the ARM ARM CPSRWriteByInstr pseudocode).
10451       */
10452  
10453      /* Changes to or from Hyp via MSR and CPS are illegal. */
10454      if (write_type == CPSRWriteByInstr &&
10455          ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
10456           mode == ARM_CPU_MODE_HYP)) {
10457          return 1;
10458      }
10459  
10460      switch (mode) {
10461      case ARM_CPU_MODE_USR:
10462          return 0;
10463      case ARM_CPU_MODE_SYS:
10464      case ARM_CPU_MODE_SVC:
10465      case ARM_CPU_MODE_ABT:
10466      case ARM_CPU_MODE_UND:
10467      case ARM_CPU_MODE_IRQ:
10468      case ARM_CPU_MODE_FIQ:
10469          /*
10470           * Note that we don't implement the IMPDEF NSACR.RFR which in v7
10471           * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
10472           */
10473          /*
10474           * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
10475           * and CPS are treated as illegal mode changes.
10476           */
10477          if (write_type == CPSRWriteByInstr &&
10478              (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
10479              (arm_hcr_el2_eff(env) & HCR_TGE)) {
10480              return 1;
10481          }
10482          return 0;
10483      case ARM_CPU_MODE_HYP:
10484          return !arm_is_el2_enabled(env) || arm_current_el(env) < 2;
10485      case ARM_CPU_MODE_MON:
10486          return arm_current_el(env) < 3;
10487      default:
10488          return 1;
10489      }
10490  }
10491  
10492  uint32_t cpsr_read(CPUARMState *env)
10493  {
10494      int ZF;
10495      ZF = (env->ZF == 0);
10496      return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
10497          (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
10498          | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
10499          | ((env->condexec_bits & 0xfc) << 8)
10500          | (env->GE << 16) | (env->daif & CPSR_AIF);
10501  }
10502  
10503  void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
10504                  CPSRWriteType write_type)
10505  {
10506      uint32_t changed_daif;
10507      bool rebuild_hflags = (write_type != CPSRWriteRaw) &&
10508          (mask & (CPSR_M | CPSR_E | CPSR_IL));
10509  
10510      if (mask & CPSR_NZCV) {
10511          env->ZF = (~val) & CPSR_Z;
10512          env->NF = val;
10513          env->CF = (val >> 29) & 1;
10514          env->VF = (val << 3) & 0x80000000;
10515      }
10516      if (mask & CPSR_Q) {
10517          env->QF = ((val & CPSR_Q) != 0);
10518      }
10519      if (mask & CPSR_T) {
10520          env->thumb = ((val & CPSR_T) != 0);
10521      }
10522      if (mask & CPSR_IT_0_1) {
10523          env->condexec_bits &= ~3;
10524          env->condexec_bits |= (val >> 25) & 3;
10525      }
10526      if (mask & CPSR_IT_2_7) {
10527          env->condexec_bits &= 3;
10528          env->condexec_bits |= (val >> 8) & 0xfc;
10529      }
10530      if (mask & CPSR_GE) {
10531          env->GE = (val >> 16) & 0xf;
10532      }
10533  
10534      /*
10535       * In a V7 implementation that includes the security extensions but does
10536       * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
10537       * whether non-secure software is allowed to change the CPSR_F and CPSR_A
10538       * bits respectively.
10539       *
10540       * In a V8 implementation, it is permitted for privileged software to
10541       * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
10542       */
10543      if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
10544          arm_feature(env, ARM_FEATURE_EL3) &&
10545          !arm_feature(env, ARM_FEATURE_EL2) &&
10546          !arm_is_secure(env)) {
10547  
10548          changed_daif = (env->daif ^ val) & mask;
10549  
10550          if (changed_daif & CPSR_A) {
10551              /*
10552               * Check to see if we are allowed to change the masking of async
10553               * abort exceptions from a non-secure state.
10554               */
10555              if (!(env->cp15.scr_el3 & SCR_AW)) {
10556                  qemu_log_mask(LOG_GUEST_ERROR,
10557                                "Ignoring attempt to switch CPSR_A flag from "
10558                                "non-secure world with SCR.AW bit clear\n");
10559                  mask &= ~CPSR_A;
10560              }
10561          }
10562  
10563          if (changed_daif & CPSR_F) {
10564              /*
10565               * Check to see if we are allowed to change the masking of FIQ
10566               * exceptions from a non-secure state.
10567               */
10568              if (!(env->cp15.scr_el3 & SCR_FW)) {
10569                  qemu_log_mask(LOG_GUEST_ERROR,
10570                                "Ignoring attempt to switch CPSR_F flag from "
10571                                "non-secure world with SCR.FW bit clear\n");
10572                  mask &= ~CPSR_F;
10573              }
10574  
10575              /*
10576               * Check whether non-maskable FIQ (NMFI) support is enabled.
10577               * If this bit is set software is not allowed to mask
10578               * FIQs, but is allowed to set CPSR_F to 0.
10579               */
10580              if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
10581                  (val & CPSR_F)) {
10582                  qemu_log_mask(LOG_GUEST_ERROR,
10583                                "Ignoring attempt to enable CPSR_F flag "
10584                                "(non-maskable FIQ [NMFI] support enabled)\n");
10585                  mask &= ~CPSR_F;
10586              }
10587          }
10588      }
10589  
10590      env->daif &= ~(CPSR_AIF & mask);
10591      env->daif |= val & CPSR_AIF & mask;
10592  
10593      if (write_type != CPSRWriteRaw &&
10594          ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
10595          if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
10596              /*
10597               * Note that we can only get here in USR mode if this is a
10598               * gdb stub write; for this case we follow the architectural
10599               * behaviour for guest writes in USR mode of ignoring an attempt
10600               * to switch mode. (Those are caught by translate.c for writes
10601               * triggered by guest instructions.)
10602               */
10603              mask &= ~CPSR_M;
10604          } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
10605              /*
10606               * Attempt to switch to an invalid mode: this is UNPREDICTABLE in
10607               * v7, and has defined behaviour in v8:
10608               *  + leave CPSR.M untouched
10609               *  + allow changes to the other CPSR fields
10610               *  + set PSTATE.IL
10611               * For user changes via the GDB stub, we don't set PSTATE.IL,
10612               * as this would be unnecessarily harsh for a user error.
10613               */
10614              mask &= ~CPSR_M;
10615              if (write_type != CPSRWriteByGDBStub &&
10616                  arm_feature(env, ARM_FEATURE_V8)) {
10617                  mask |= CPSR_IL;
10618                  val |= CPSR_IL;
10619              }
10620              qemu_log_mask(LOG_GUEST_ERROR,
10621                            "Illegal AArch32 mode switch attempt from %s to %s\n",
10622                            aarch32_mode_name(env->uncached_cpsr),
10623                            aarch32_mode_name(val));
10624          } else {
10625              qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
10626                            write_type == CPSRWriteExceptionReturn ?
10627                            "Exception return from AArch32" :
10628                            "AArch32 mode switch from",
10629                            aarch32_mode_name(env->uncached_cpsr),
10630                            aarch32_mode_name(val), env->regs[15]);
10631              switch_mode(env, val & CPSR_M);
10632          }
10633      }
10634      mask &= ~CACHED_CPSR_BITS;
10635      env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
10636      if (tcg_enabled() && rebuild_hflags) {
10637          arm_rebuild_hflags(env);
10638      }
10639  }
10640  
10641  #ifdef CONFIG_USER_ONLY
10642  
10643  static void switch_mode(CPUARMState *env, int mode)
10644  {
10645      ARMCPU *cpu = env_archcpu(env);
10646  
10647      if (mode != ARM_CPU_MODE_USR) {
10648          cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
10649      }
10650  }
10651  
10652  uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
10653                                   uint32_t cur_el, bool secure)
10654  {
10655      return 1;
10656  }
10657  
10658  void aarch64_sync_64_to_32(CPUARMState *env)
10659  {
10660      g_assert_not_reached();
10661  }
10662  
10663  #else
10664  
10665  static void switch_mode(CPUARMState *env, int mode)
10666  {
10667      int old_mode;
10668      int i;
10669  
10670      old_mode = env->uncached_cpsr & CPSR_M;
10671      if (mode == old_mode) {
10672          return;
10673      }
10674  
10675      if (old_mode == ARM_CPU_MODE_FIQ) {
10676          memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
10677          memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
10678      } else if (mode == ARM_CPU_MODE_FIQ) {
10679          memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
10680          memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
10681      }
10682  
10683      i = bank_number(old_mode);
10684      env->banked_r13[i] = env->regs[13];
10685      env->banked_spsr[i] = env->spsr;
10686  
10687      i = bank_number(mode);
10688      env->regs[13] = env->banked_r13[i];
10689      env->spsr = env->banked_spsr[i];
10690  
10691      env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
10692      env->regs[14] = env->banked_r14[r14_bank_number(mode)];
10693  }
10694  
10695  /*
10696   * Physical Interrupt Target EL Lookup Table
10697   *
10698   * [ From ARM ARM section G1.13.4 (Table G1-15) ]
10699   *
10700   * The below multi-dimensional table is used for looking up the target
10701   * exception level given numerous condition criteria.  Specifically, the
10702   * target EL is based on SCR and HCR routing controls as well as the
10703   * currently executing EL and secure state.
10704   *
10705   *    Dimensions:
10706   *    target_el_table[2][2][2][2][2][4]
10707   *                    |  |  |  |  |  +--- Current EL
10708   *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
10709   *                    |  |  |  +--------- HCR mask override
10710   *                    |  |  +------------ SCR exec state control
10711   *                    |  +--------------- SCR mask override
10712   *                    +------------------ 32-bit(0)/64-bit(1) EL3
10713   *
10714   *    The table values are as such:
10715   *    0-3 = EL0-EL3
10716   *     -1 = Cannot occur
10717   *
10718   * The ARM ARM target EL table includes entries indicating that an "exception
10719   * is not taken".  The two cases where this is applicable are:
10720   *    1) An exception is taken from EL3 but the SCR does not have the exception
10721   *    routed to EL3.
10722   *    2) An exception is taken from EL2 but the HCR does not have the exception
10723   *    routed to EL2.
10724   * In these two cases, the below table contain a target of EL1.  This value is
10725   * returned as it is expected that the consumer of the table data will check
10726   * for "target EL >= current EL" to ensure the exception is not taken.
10727   *
10728   *            SCR     HCR
10729   *         64  EA     AMO                 From
10730   *        BIT IRQ     IMO      Non-secure         Secure
10731   *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
10732   */
10733  static const int8_t target_el_table[2][2][2][2][2][4] = {
10734      {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
10735         {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
10736        {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
10737         {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
10738       {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
10739         {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
10740        {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
10741         {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
10742      {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
10743         {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 2,  2, -1,  1 },},},
10744        {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1,  1,  1 },},
10745         {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 2,  2,  2,  1 },},},},
10746       {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
10747         {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
10748        {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},
10749         {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3,  3,  3 },},},},},
10750  };
10751  
10752  /*
10753   * Determine the target EL for physical exceptions
10754   */
10755  uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
10756                                   uint32_t cur_el, bool secure)
10757  {
10758      CPUARMState *env = cpu_env(cs);
10759      bool rw;
10760      bool scr;
10761      bool hcr;
10762      int target_el;
10763      /* Is the highest EL AArch64? */
10764      bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
10765      uint64_t hcr_el2;
10766  
10767      if (arm_feature(env, ARM_FEATURE_EL3)) {
10768          rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
10769      } else {
10770          /*
10771           * Either EL2 is the highest EL (and so the EL2 register width
10772           * is given by is64); or there is no EL2 or EL3, in which case
10773           * the value of 'rw' does not affect the table lookup anyway.
10774           */
10775          rw = is64;
10776      }
10777  
10778      hcr_el2 = arm_hcr_el2_eff(env);
10779      switch (excp_idx) {
10780      case EXCP_IRQ:
10781      case EXCP_NMI:
10782          scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
10783          hcr = hcr_el2 & HCR_IMO;
10784          break;
10785      case EXCP_FIQ:
10786          scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
10787          hcr = hcr_el2 & HCR_FMO;
10788          break;
10789      default:
10790          scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
10791          hcr = hcr_el2 & HCR_AMO;
10792          break;
10793      };
10794  
10795      /*
10796       * For these purposes, TGE and AMO/IMO/FMO both force the
10797       * interrupt to EL2.  Fold TGE into the bit extracted above.
10798       */
10799      hcr |= (hcr_el2 & HCR_TGE) != 0;
10800  
10801      /* Perform a table-lookup for the target EL given the current state */
10802      target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
10803  
10804      assert(target_el > 0);
10805  
10806      return target_el;
10807  }
10808  
10809  void arm_log_exception(CPUState *cs)
10810  {
10811      int idx = cs->exception_index;
10812  
10813      if (qemu_loglevel_mask(CPU_LOG_INT)) {
10814          const char *exc = NULL;
10815          static const char * const excnames[] = {
10816              [EXCP_UDEF] = "Undefined Instruction",
10817              [EXCP_SWI] = "SVC",
10818              [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
10819              [EXCP_DATA_ABORT] = "Data Abort",
10820              [EXCP_IRQ] = "IRQ",
10821              [EXCP_FIQ] = "FIQ",
10822              [EXCP_BKPT] = "Breakpoint",
10823              [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
10824              [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
10825              [EXCP_HVC] = "Hypervisor Call",
10826              [EXCP_HYP_TRAP] = "Hypervisor Trap",
10827              [EXCP_SMC] = "Secure Monitor Call",
10828              [EXCP_VIRQ] = "Virtual IRQ",
10829              [EXCP_VFIQ] = "Virtual FIQ",
10830              [EXCP_SEMIHOST] = "Semihosting call",
10831              [EXCP_NOCP] = "v7M NOCP UsageFault",
10832              [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
10833              [EXCP_STKOF] = "v8M STKOF UsageFault",
10834              [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
10835              [EXCP_LSERR] = "v8M LSERR UsageFault",
10836              [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
10837              [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
10838              [EXCP_VSERR] = "Virtual SERR",
10839              [EXCP_GPC] = "Granule Protection Check",
10840              [EXCP_NMI] = "NMI",
10841              [EXCP_VINMI] = "Virtual IRQ NMI",
10842              [EXCP_VFNMI] = "Virtual FIQ NMI",
10843          };
10844  
10845          if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
10846              exc = excnames[idx];
10847          }
10848          if (!exc) {
10849              exc = "unknown";
10850          }
10851          qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n",
10852                        idx, exc, cs->cpu_index);
10853      }
10854  }
10855  
10856  /*
10857   * Function used to synchronize QEMU's AArch64 register set with AArch32
10858   * register set.  This is necessary when switching between AArch32 and AArch64
10859   * execution state.
10860   */
10861  void aarch64_sync_32_to_64(CPUARMState *env)
10862  {
10863      int i;
10864      uint32_t mode = env->uncached_cpsr & CPSR_M;
10865  
10866      /* We can blanket copy R[0:7] to X[0:7] */
10867      for (i = 0; i < 8; i++) {
10868          env->xregs[i] = env->regs[i];
10869      }
10870  
10871      /*
10872       * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
10873       * Otherwise, they come from the banked user regs.
10874       */
10875      if (mode == ARM_CPU_MODE_FIQ) {
10876          for (i = 8; i < 13; i++) {
10877              env->xregs[i] = env->usr_regs[i - 8];
10878          }
10879      } else {
10880          for (i = 8; i < 13; i++) {
10881              env->xregs[i] = env->regs[i];
10882          }
10883      }
10884  
10885      /*
10886       * Registers x13-x23 are the various mode SP and FP registers. Registers
10887       * r13 and r14 are only copied if we are in that mode, otherwise we copy
10888       * from the mode banked register.
10889       */
10890      if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
10891          env->xregs[13] = env->regs[13];
10892          env->xregs[14] = env->regs[14];
10893      } else {
10894          env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
10895          /* HYP is an exception in that it is copied from r14 */
10896          if (mode == ARM_CPU_MODE_HYP) {
10897              env->xregs[14] = env->regs[14];
10898          } else {
10899              env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
10900          }
10901      }
10902  
10903      if (mode == ARM_CPU_MODE_HYP) {
10904          env->xregs[15] = env->regs[13];
10905      } else {
10906          env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
10907      }
10908  
10909      if (mode == ARM_CPU_MODE_IRQ) {
10910          env->xregs[16] = env->regs[14];
10911          env->xregs[17] = env->regs[13];
10912      } else {
10913          env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
10914          env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
10915      }
10916  
10917      if (mode == ARM_CPU_MODE_SVC) {
10918          env->xregs[18] = env->regs[14];
10919          env->xregs[19] = env->regs[13];
10920      } else {
10921          env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
10922          env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
10923      }
10924  
10925      if (mode == ARM_CPU_MODE_ABT) {
10926          env->xregs[20] = env->regs[14];
10927          env->xregs[21] = env->regs[13];
10928      } else {
10929          env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
10930          env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
10931      }
10932  
10933      if (mode == ARM_CPU_MODE_UND) {
10934          env->xregs[22] = env->regs[14];
10935          env->xregs[23] = env->regs[13];
10936      } else {
10937          env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
10938          env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
10939      }
10940  
10941      /*
10942       * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
10943       * mode, then we can copy from r8-r14.  Otherwise, we copy from the
10944       * FIQ bank for r8-r14.
10945       */
10946      if (mode == ARM_CPU_MODE_FIQ) {
10947          for (i = 24; i < 31; i++) {
10948              env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
10949          }
10950      } else {
10951          for (i = 24; i < 29; i++) {
10952              env->xregs[i] = env->fiq_regs[i - 24];
10953          }
10954          env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
10955          env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
10956      }
10957  
10958      env->pc = env->regs[15];
10959  }
10960  
10961  /*
10962   * Function used to synchronize QEMU's AArch32 register set with AArch64
10963   * register set.  This is necessary when switching between AArch32 and AArch64
10964   * execution state.
10965   */
10966  void aarch64_sync_64_to_32(CPUARMState *env)
10967  {
10968      int i;
10969      uint32_t mode = env->uncached_cpsr & CPSR_M;
10970  
10971      /* We can blanket copy X[0:7] to R[0:7] */
10972      for (i = 0; i < 8; i++) {
10973          env->regs[i] = env->xregs[i];
10974      }
10975  
10976      /*
10977       * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
10978       * Otherwise, we copy x8-x12 into the banked user regs.
10979       */
10980      if (mode == ARM_CPU_MODE_FIQ) {
10981          for (i = 8; i < 13; i++) {
10982              env->usr_regs[i - 8] = env->xregs[i];
10983          }
10984      } else {
10985          for (i = 8; i < 13; i++) {
10986              env->regs[i] = env->xregs[i];
10987          }
10988      }
10989  
10990      /*
10991       * Registers r13 & r14 depend on the current mode.
10992       * If we are in a given mode, we copy the corresponding x registers to r13
10993       * and r14.  Otherwise, we copy the x register to the banked r13 and r14
10994       * for the mode.
10995       */
10996      if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
10997          env->regs[13] = env->xregs[13];
10998          env->regs[14] = env->xregs[14];
10999      } else {
11000          env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
11001  
11002          /*
11003           * HYP is an exception in that it does not have its own banked r14 but
11004           * shares the USR r14
11005           */
11006          if (mode == ARM_CPU_MODE_HYP) {
11007              env->regs[14] = env->xregs[14];
11008          } else {
11009              env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
11010          }
11011      }
11012  
11013      if (mode == ARM_CPU_MODE_HYP) {
11014          env->regs[13] = env->xregs[15];
11015      } else {
11016          env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
11017      }
11018  
11019      if (mode == ARM_CPU_MODE_IRQ) {
11020          env->regs[14] = env->xregs[16];
11021          env->regs[13] = env->xregs[17];
11022      } else {
11023          env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
11024          env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
11025      }
11026  
11027      if (mode == ARM_CPU_MODE_SVC) {
11028          env->regs[14] = env->xregs[18];
11029          env->regs[13] = env->xregs[19];
11030      } else {
11031          env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
11032          env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
11033      }
11034  
11035      if (mode == ARM_CPU_MODE_ABT) {
11036          env->regs[14] = env->xregs[20];
11037          env->regs[13] = env->xregs[21];
11038      } else {
11039          env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
11040          env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
11041      }
11042  
11043      if (mode == ARM_CPU_MODE_UND) {
11044          env->regs[14] = env->xregs[22];
11045          env->regs[13] = env->xregs[23];
11046      } else {
11047          env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
11048          env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
11049      }
11050  
11051      /*
11052       * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
11053       * mode, then we can copy to r8-r14.  Otherwise, we copy to the
11054       * FIQ bank for r8-r14.
11055       */
11056      if (mode == ARM_CPU_MODE_FIQ) {
11057          for (i = 24; i < 31; i++) {
11058              env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
11059          }
11060      } else {
11061          for (i = 24; i < 29; i++) {
11062              env->fiq_regs[i - 24] = env->xregs[i];
11063          }
11064          env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
11065          env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
11066      }
11067  
11068      env->regs[15] = env->pc;
11069  }
11070  
11071  static void take_aarch32_exception(CPUARMState *env, int new_mode,
11072                                     uint32_t mask, uint32_t offset,
11073                                     uint32_t newpc)
11074  {
11075      int new_el;
11076  
11077      /* Change the CPU state so as to actually take the exception. */
11078      switch_mode(env, new_mode);
11079  
11080      /*
11081       * For exceptions taken to AArch32 we must clear the SS bit in both
11082       * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
11083       */
11084      env->pstate &= ~PSTATE_SS;
11085      env->spsr = cpsr_read(env);
11086      /* Clear IT bits.  */
11087      env->condexec_bits = 0;
11088      /* Switch to the new mode, and to the correct instruction set.  */
11089      env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
11090  
11091      /* This must be after mode switching. */
11092      new_el = arm_current_el(env);
11093  
11094      /* Set new mode endianness */
11095      env->uncached_cpsr &= ~CPSR_E;
11096      if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
11097          env->uncached_cpsr |= CPSR_E;
11098      }
11099      /* J and IL must always be cleared for exception entry */
11100      env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
11101      env->daif |= mask;
11102  
11103      if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) {
11104          if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) {
11105              env->uncached_cpsr |= CPSR_SSBS;
11106          } else {
11107              env->uncached_cpsr &= ~CPSR_SSBS;
11108          }
11109      }
11110  
11111      if (new_mode == ARM_CPU_MODE_HYP) {
11112          env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
11113          env->elr_el[2] = env->regs[15];
11114      } else {
11115          /* CPSR.PAN is normally preserved preserved unless...  */
11116          if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
11117              switch (new_el) {
11118              case 3:
11119                  if (!arm_is_secure_below_el3(env)) {
11120                      /* ... the target is EL3, from non-secure state.  */
11121                      env->uncached_cpsr &= ~CPSR_PAN;
11122                      break;
11123                  }
11124                  /* ... the target is EL3, from secure state ... */
11125                  /* fall through */
11126              case 1:
11127                  /* ... the target is EL1 and SCTLR.SPAN is 0.  */
11128                  if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
11129                      env->uncached_cpsr |= CPSR_PAN;
11130                  }
11131                  break;
11132              }
11133          }
11134          /*
11135           * this is a lie, as there was no c1_sys on V4T/V5, but who cares
11136           * and we should just guard the thumb mode on V4
11137           */
11138          if (arm_feature(env, ARM_FEATURE_V4T)) {
11139              env->thumb =
11140                  (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
11141          }
11142          env->regs[14] = env->regs[15] + offset;
11143      }
11144      env->regs[15] = newpc;
11145  
11146      if (tcg_enabled()) {
11147          arm_rebuild_hflags(env);
11148      }
11149  }
11150  
11151  static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
11152  {
11153      /*
11154       * Handle exception entry to Hyp mode; this is sufficiently
11155       * different to entry to other AArch32 modes that we handle it
11156       * separately here.
11157       *
11158       * The vector table entry used is always the 0x14 Hyp mode entry point,
11159       * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp.
11160       * The offset applied to the preferred return address is always zero
11161       * (see DDI0487C.a section G1.12.3).
11162       * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
11163       */
11164      uint32_t addr, mask;
11165      ARMCPU *cpu = ARM_CPU(cs);
11166      CPUARMState *env = &cpu->env;
11167  
11168      switch (cs->exception_index) {
11169      case EXCP_UDEF:
11170          addr = 0x04;
11171          break;
11172      case EXCP_SWI:
11173          addr = 0x08;
11174          break;
11175      case EXCP_BKPT:
11176          /* Fall through to prefetch abort.  */
11177      case EXCP_PREFETCH_ABORT:
11178          env->cp15.ifar_s = env->exception.vaddress;
11179          qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
11180                        (uint32_t)env->exception.vaddress);
11181          addr = 0x0c;
11182          break;
11183      case EXCP_DATA_ABORT:
11184          env->cp15.dfar_s = env->exception.vaddress;
11185          qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
11186                        (uint32_t)env->exception.vaddress);
11187          addr = 0x10;
11188          break;
11189      case EXCP_IRQ:
11190          addr = 0x18;
11191          break;
11192      case EXCP_FIQ:
11193          addr = 0x1c;
11194          break;
11195      case EXCP_HVC:
11196          addr = 0x08;
11197          break;
11198      case EXCP_HYP_TRAP:
11199          addr = 0x14;
11200          break;
11201      default:
11202          cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11203      }
11204  
11205      if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
11206          if (!arm_feature(env, ARM_FEATURE_V8)) {
11207              /*
11208               * QEMU syndrome values are v8-style. v7 has the IL bit
11209               * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
11210               * If this is a v7 CPU, squash the IL bit in those cases.
11211               */
11212              if (cs->exception_index == EXCP_PREFETCH_ABORT ||
11213                  (cs->exception_index == EXCP_DATA_ABORT &&
11214                   !(env->exception.syndrome & ARM_EL_ISV)) ||
11215                  syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
11216                  env->exception.syndrome &= ~ARM_EL_IL;
11217              }
11218          }
11219          env->cp15.esr_el[2] = env->exception.syndrome;
11220      }
11221  
11222      if (arm_current_el(env) != 2 && addr < 0x14) {
11223          addr = 0x14;
11224      }
11225  
11226      mask = 0;
11227      if (!(env->cp15.scr_el3 & SCR_EA)) {
11228          mask |= CPSR_A;
11229      }
11230      if (!(env->cp15.scr_el3 & SCR_IRQ)) {
11231          mask |= CPSR_I;
11232      }
11233      if (!(env->cp15.scr_el3 & SCR_FIQ)) {
11234          mask |= CPSR_F;
11235      }
11236  
11237      addr += env->cp15.hvbar;
11238  
11239      take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
11240  }
11241  
11242  static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
11243  {
11244      ARMCPU *cpu = ARM_CPU(cs);
11245      CPUARMState *env = &cpu->env;
11246      uint32_t addr;
11247      uint32_t mask;
11248      int new_mode;
11249      uint32_t offset;
11250      uint32_t moe;
11251  
11252      /* If this is a debug exception we must update the DBGDSCR.MOE bits */
11253      switch (syn_get_ec(env->exception.syndrome)) {
11254      case EC_BREAKPOINT:
11255      case EC_BREAKPOINT_SAME_EL:
11256          moe = 1;
11257          break;
11258      case EC_WATCHPOINT:
11259      case EC_WATCHPOINT_SAME_EL:
11260          moe = 10;
11261          break;
11262      case EC_AA32_BKPT:
11263          moe = 3;
11264          break;
11265      case EC_VECTORCATCH:
11266          moe = 5;
11267          break;
11268      default:
11269          moe = 0;
11270          break;
11271      }
11272  
11273      if (moe) {
11274          env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
11275      }
11276  
11277      if (env->exception.target_el == 2) {
11278          /* Debug exceptions are reported differently on AArch32 */
11279          switch (syn_get_ec(env->exception.syndrome)) {
11280          case EC_BREAKPOINT:
11281          case EC_BREAKPOINT_SAME_EL:
11282          case EC_AA32_BKPT:
11283          case EC_VECTORCATCH:
11284              env->exception.syndrome = syn_insn_abort(arm_current_el(env) == 2,
11285                                                       0, 0, 0x22);
11286              break;
11287          case EC_WATCHPOINT:
11288              env->exception.syndrome = syn_set_ec(env->exception.syndrome,
11289                                                   EC_DATAABORT);
11290              break;
11291          case EC_WATCHPOINT_SAME_EL:
11292              env->exception.syndrome = syn_set_ec(env->exception.syndrome,
11293                                                   EC_DATAABORT_SAME_EL);
11294              break;
11295          }
11296          arm_cpu_do_interrupt_aarch32_hyp(cs);
11297          return;
11298      }
11299  
11300      switch (cs->exception_index) {
11301      case EXCP_UDEF:
11302          new_mode = ARM_CPU_MODE_UND;
11303          addr = 0x04;
11304          mask = CPSR_I;
11305          if (env->thumb) {
11306              offset = 2;
11307          } else {
11308              offset = 4;
11309          }
11310          break;
11311      case EXCP_SWI:
11312          new_mode = ARM_CPU_MODE_SVC;
11313          addr = 0x08;
11314          mask = CPSR_I;
11315          /* The PC already points to the next instruction.  */
11316          offset = 0;
11317          break;
11318      case EXCP_BKPT:
11319          /* Fall through to prefetch abort.  */
11320      case EXCP_PREFETCH_ABORT:
11321          A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
11322          A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
11323          qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
11324                        env->exception.fsr, (uint32_t)env->exception.vaddress);
11325          new_mode = ARM_CPU_MODE_ABT;
11326          addr = 0x0c;
11327          mask = CPSR_A | CPSR_I;
11328          offset = 4;
11329          break;
11330      case EXCP_DATA_ABORT:
11331          A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
11332          A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
11333          qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
11334                        env->exception.fsr,
11335                        (uint32_t)env->exception.vaddress);
11336          new_mode = ARM_CPU_MODE_ABT;
11337          addr = 0x10;
11338          mask = CPSR_A | CPSR_I;
11339          offset = 8;
11340          break;
11341      case EXCP_IRQ:
11342          new_mode = ARM_CPU_MODE_IRQ;
11343          addr = 0x18;
11344          /* Disable IRQ and imprecise data aborts.  */
11345          mask = CPSR_A | CPSR_I;
11346          offset = 4;
11347          if (env->cp15.scr_el3 & SCR_IRQ) {
11348              /* IRQ routed to monitor mode */
11349              new_mode = ARM_CPU_MODE_MON;
11350              mask |= CPSR_F;
11351          }
11352          break;
11353      case EXCP_FIQ:
11354          new_mode = ARM_CPU_MODE_FIQ;
11355          addr = 0x1c;
11356          /* Disable FIQ, IRQ and imprecise data aborts.  */
11357          mask = CPSR_A | CPSR_I | CPSR_F;
11358          if (env->cp15.scr_el3 & SCR_FIQ) {
11359              /* FIQ routed to monitor mode */
11360              new_mode = ARM_CPU_MODE_MON;
11361          }
11362          offset = 4;
11363          break;
11364      case EXCP_VIRQ:
11365          new_mode = ARM_CPU_MODE_IRQ;
11366          addr = 0x18;
11367          /* Disable IRQ and imprecise data aborts.  */
11368          mask = CPSR_A | CPSR_I;
11369          offset = 4;
11370          break;
11371      case EXCP_VFIQ:
11372          new_mode = ARM_CPU_MODE_FIQ;
11373          addr = 0x1c;
11374          /* Disable FIQ, IRQ and imprecise data aborts.  */
11375          mask = CPSR_A | CPSR_I | CPSR_F;
11376          offset = 4;
11377          break;
11378      case EXCP_VSERR:
11379          {
11380              /*
11381               * Note that this is reported as a data abort, but the DFAR
11382               * has an UNKNOWN value.  Construct the SError syndrome from
11383               * AET and ExT fields.
11384               */
11385              ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
11386  
11387              if (extended_addresses_enabled(env)) {
11388                  env->exception.fsr = arm_fi_to_lfsc(&fi);
11389              } else {
11390                  env->exception.fsr = arm_fi_to_sfsc(&fi);
11391              }
11392              env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
11393              A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
11394              qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
11395                            env->exception.fsr);
11396  
11397              new_mode = ARM_CPU_MODE_ABT;
11398              addr = 0x10;
11399              mask = CPSR_A | CPSR_I;
11400              offset = 8;
11401          }
11402          break;
11403      case EXCP_SMC:
11404          new_mode = ARM_CPU_MODE_MON;
11405          addr = 0x08;
11406          mask = CPSR_A | CPSR_I | CPSR_F;
11407          offset = 0;
11408          break;
11409      default:
11410          cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11411          return; /* Never happens.  Keep compiler happy.  */
11412      }
11413  
11414      if (new_mode == ARM_CPU_MODE_MON) {
11415          addr += env->cp15.mvbar;
11416      } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
11417          /* High vectors. When enabled, base address cannot be remapped. */
11418          addr += 0xffff0000;
11419      } else {
11420          /*
11421           * ARM v7 architectures provide a vector base address register to remap
11422           * the interrupt vector table.
11423           * This register is only followed in non-monitor mode, and is banked.
11424           * Note: only bits 31:5 are valid.
11425           */
11426          addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
11427      }
11428  
11429      if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
11430          env->cp15.scr_el3 &= ~SCR_NS;
11431      }
11432  
11433      take_aarch32_exception(env, new_mode, mask, offset, addr);
11434  }
11435  
11436  static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
11437  {
11438      /*
11439       * Return the register number of the AArch64 view of the AArch32
11440       * register @aarch32_reg. The CPUARMState CPSR is assumed to still
11441       * be that of the AArch32 mode the exception came from.
11442       */
11443      int mode = env->uncached_cpsr & CPSR_M;
11444  
11445      switch (aarch32_reg) {
11446      case 0 ... 7:
11447          return aarch32_reg;
11448      case 8 ... 12:
11449          return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
11450      case 13:
11451          switch (mode) {
11452          case ARM_CPU_MODE_USR:
11453          case ARM_CPU_MODE_SYS:
11454              return 13;
11455          case ARM_CPU_MODE_HYP:
11456              return 15;
11457          case ARM_CPU_MODE_IRQ:
11458              return 17;
11459          case ARM_CPU_MODE_SVC:
11460              return 19;
11461          case ARM_CPU_MODE_ABT:
11462              return 21;
11463          case ARM_CPU_MODE_UND:
11464              return 23;
11465          case ARM_CPU_MODE_FIQ:
11466              return 29;
11467          default:
11468              g_assert_not_reached();
11469          }
11470      case 14:
11471          switch (mode) {
11472          case ARM_CPU_MODE_USR:
11473          case ARM_CPU_MODE_SYS:
11474          case ARM_CPU_MODE_HYP:
11475              return 14;
11476          case ARM_CPU_MODE_IRQ:
11477              return 16;
11478          case ARM_CPU_MODE_SVC:
11479              return 18;
11480          case ARM_CPU_MODE_ABT:
11481              return 20;
11482          case ARM_CPU_MODE_UND:
11483              return 22;
11484          case ARM_CPU_MODE_FIQ:
11485              return 30;
11486          default:
11487              g_assert_not_reached();
11488          }
11489      case 15:
11490          return 31;
11491      default:
11492          g_assert_not_reached();
11493      }
11494  }
11495  
11496  static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
11497  {
11498      uint32_t ret = cpsr_read(env);
11499  
11500      /* Move DIT to the correct location for SPSR_ELx */
11501      if (ret & CPSR_DIT) {
11502          ret &= ~CPSR_DIT;
11503          ret |= PSTATE_DIT;
11504      }
11505      /* Merge PSTATE.SS into SPSR_ELx */
11506      ret |= env->pstate & PSTATE_SS;
11507  
11508      return ret;
11509  }
11510  
11511  static bool syndrome_is_sync_extabt(uint32_t syndrome)
11512  {
11513      /* Return true if this syndrome value is a synchronous external abort */
11514      switch (syn_get_ec(syndrome)) {
11515      case EC_INSNABORT:
11516      case EC_INSNABORT_SAME_EL:
11517      case EC_DATAABORT:
11518      case EC_DATAABORT_SAME_EL:
11519          /* Look at fault status code for all the synchronous ext abort cases */
11520          switch (syndrome & 0x3f) {
11521          case 0x10:
11522          case 0x13:
11523          case 0x14:
11524          case 0x15:
11525          case 0x16:
11526          case 0x17:
11527              return true;
11528          default:
11529              return false;
11530          }
11531      default:
11532          return false;
11533      }
11534  }
11535  
11536  /* Handle exception entry to a target EL which is using AArch64 */
11537  static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
11538  {
11539      ARMCPU *cpu = ARM_CPU(cs);
11540      CPUARMState *env = &cpu->env;
11541      unsigned int new_el = env->exception.target_el;
11542      target_ulong addr = env->cp15.vbar_el[new_el];
11543      unsigned int new_mode = aarch64_pstate_mode(new_el, true);
11544      unsigned int old_mode;
11545      unsigned int cur_el = arm_current_el(env);
11546      int rt;
11547  
11548      if (tcg_enabled()) {
11549          /*
11550           * Note that new_el can never be 0.  If cur_el is 0, then
11551           * el0_a64 is is_a64(), else el0_a64 is ignored.
11552           */
11553          aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
11554      }
11555  
11556      if (cur_el < new_el) {
11557          /*
11558           * Entry vector offset depends on whether the implemented EL
11559           * immediately lower than the target level is using AArch32 or AArch64
11560           */
11561          bool is_aa64;
11562          uint64_t hcr;
11563  
11564          switch (new_el) {
11565          case 3:
11566              is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
11567              break;
11568          case 2:
11569              hcr = arm_hcr_el2_eff(env);
11570              if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
11571                  is_aa64 = (hcr & HCR_RW) != 0;
11572                  break;
11573              }
11574              /* fall through */
11575          case 1:
11576              is_aa64 = is_a64(env);
11577              break;
11578          default:
11579              g_assert_not_reached();
11580          }
11581  
11582          if (is_aa64) {
11583              addr += 0x400;
11584          } else {
11585              addr += 0x600;
11586          }
11587      } else if (pstate_read(env) & PSTATE_SP) {
11588          addr += 0x200;
11589      }
11590  
11591      switch (cs->exception_index) {
11592      case EXCP_GPC:
11593          qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
11594                        env->cp15.mfar_el3);
11595          /* fall through */
11596      case EXCP_PREFETCH_ABORT:
11597      case EXCP_DATA_ABORT:
11598          /*
11599           * FEAT_DoubleFault allows synchronous external aborts taken to EL3
11600           * to be taken to the SError vector entrypoint.
11601           */
11602          if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) &&
11603              syndrome_is_sync_extabt(env->exception.syndrome)) {
11604              addr += 0x180;
11605          }
11606          env->cp15.far_el[new_el] = env->exception.vaddress;
11607          qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
11608                        env->cp15.far_el[new_el]);
11609          /* fall through */
11610      case EXCP_BKPT:
11611      case EXCP_UDEF:
11612      case EXCP_SWI:
11613      case EXCP_HVC:
11614      case EXCP_HYP_TRAP:
11615      case EXCP_SMC:
11616          switch (syn_get_ec(env->exception.syndrome)) {
11617          case EC_ADVSIMDFPACCESSTRAP:
11618              /*
11619               * QEMU internal FP/SIMD syndromes from AArch32 include the
11620               * TA and coproc fields which are only exposed if the exception
11621               * is taken to AArch32 Hyp mode. Mask them out to get a valid
11622               * AArch64 format syndrome.
11623               */
11624              env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
11625              break;
11626          case EC_CP14RTTRAP:
11627          case EC_CP15RTTRAP:
11628          case EC_CP14DTTRAP:
11629              /*
11630               * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
11631               * the raw register field from the insn; when taking this to
11632               * AArch64 we must convert it to the AArch64 view of the register
11633               * number. Notice that we read a 4-bit AArch32 register number and
11634               * write back a 5-bit AArch64 one.
11635               */
11636              rt = extract32(env->exception.syndrome, 5, 4);
11637              rt = aarch64_regnum(env, rt);
11638              env->exception.syndrome = deposit32(env->exception.syndrome,
11639                                                  5, 5, rt);
11640              break;
11641          case EC_CP15RRTTRAP:
11642          case EC_CP14RRTTRAP:
11643              /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
11644              rt = extract32(env->exception.syndrome, 5, 4);
11645              rt = aarch64_regnum(env, rt);
11646              env->exception.syndrome = deposit32(env->exception.syndrome,
11647                                                  5, 5, rt);
11648              rt = extract32(env->exception.syndrome, 10, 4);
11649              rt = aarch64_regnum(env, rt);
11650              env->exception.syndrome = deposit32(env->exception.syndrome,
11651                                                  10, 5, rt);
11652              break;
11653          }
11654          env->cp15.esr_el[new_el] = env->exception.syndrome;
11655          break;
11656      case EXCP_IRQ:
11657      case EXCP_VIRQ:
11658      case EXCP_NMI:
11659      case EXCP_VINMI:
11660          addr += 0x80;
11661          break;
11662      case EXCP_FIQ:
11663      case EXCP_VFIQ:
11664      case EXCP_VFNMI:
11665          addr += 0x100;
11666          break;
11667      case EXCP_VSERR:
11668          addr += 0x180;
11669          /* Construct the SError syndrome from IDS and ISS fields. */
11670          env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
11671          env->cp15.esr_el[new_el] = env->exception.syndrome;
11672          break;
11673      default:
11674          cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
11675      }
11676  
11677      if (is_a64(env)) {
11678          old_mode = pstate_read(env);
11679          aarch64_save_sp(env, arm_current_el(env));
11680          env->elr_el[new_el] = env->pc;
11681  
11682          if (cur_el == 1 && new_el == 1) {
11683              uint64_t hcr = arm_hcr_el2_eff(env);
11684              if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV ||
11685                  (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) {
11686                  /*
11687                   * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR
11688                   * by setting M[3:2] to 0b10.
11689                   * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
11690                   * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
11691                   */
11692                  old_mode = deposit32(old_mode, 2, 2, 2);
11693              }
11694          }
11695      } else {
11696          old_mode = cpsr_read_for_spsr_elx(env);
11697          env->elr_el[new_el] = env->regs[15];
11698  
11699          aarch64_sync_32_to_64(env);
11700  
11701          env->condexec_bits = 0;
11702      }
11703      env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
11704  
11705      qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
11706      qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
11707                    env->elr_el[new_el]);
11708  
11709      if (cpu_isar_feature(aa64_pan, cpu)) {
11710          /* The value of PSTATE.PAN is normally preserved, except when ... */
11711          new_mode |= old_mode & PSTATE_PAN;
11712          switch (new_el) {
11713          case 2:
11714              /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
11715              if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
11716                  != (HCR_E2H | HCR_TGE)) {
11717                  break;
11718              }
11719              /* fall through */
11720          case 1:
11721              /* ... the target is EL1 ... */
11722              /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
11723              if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
11724                  new_mode |= PSTATE_PAN;
11725              }
11726              break;
11727          }
11728      }
11729      if (cpu_isar_feature(aa64_mte, cpu)) {
11730          new_mode |= PSTATE_TCO;
11731      }
11732  
11733      if (cpu_isar_feature(aa64_ssbs, cpu)) {
11734          if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) {
11735              new_mode |= PSTATE_SSBS;
11736          } else {
11737              new_mode &= ~PSTATE_SSBS;
11738          }
11739      }
11740  
11741      if (cpu_isar_feature(aa64_nmi, cpu)) {
11742          if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPINTMASK)) {
11743              new_mode |= PSTATE_ALLINT;
11744          } else {
11745              new_mode &= ~PSTATE_ALLINT;
11746          }
11747      }
11748  
11749      pstate_write(env, PSTATE_DAIF | new_mode);
11750      env->aarch64 = true;
11751      aarch64_restore_sp(env, new_el);
11752  
11753      if (tcg_enabled()) {
11754          helper_rebuild_hflags_a64(env, new_el);
11755      }
11756  
11757      env->pc = addr;
11758  
11759      qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
11760                    new_el, env->pc, pstate_read(env));
11761  }
11762  
11763  /*
11764   * Do semihosting call and set the appropriate return value. All the
11765   * permission and validity checks have been done at translate time.
11766   *
11767   * We only see semihosting exceptions in TCG only as they are not
11768   * trapped to the hypervisor in KVM.
11769   */
11770  #ifdef CONFIG_TCG
11771  static void tcg_handle_semihosting(CPUState *cs)
11772  {
11773      ARMCPU *cpu = ARM_CPU(cs);
11774      CPUARMState *env = &cpu->env;
11775  
11776      if (is_a64(env)) {
11777          qemu_log_mask(CPU_LOG_INT,
11778                        "...handling as semihosting call 0x%" PRIx64 "\n",
11779                        env->xregs[0]);
11780          do_common_semihosting(cs);
11781          env->pc += 4;
11782      } else {
11783          qemu_log_mask(CPU_LOG_INT,
11784                        "...handling as semihosting call 0x%x\n",
11785                        env->regs[0]);
11786          do_common_semihosting(cs);
11787          env->regs[15] += env->thumb ? 2 : 4;
11788      }
11789  }
11790  #endif
11791  
11792  /*
11793   * Handle a CPU exception for A and R profile CPUs.
11794   * Do any appropriate logging, handle PSCI calls, and then hand off
11795   * to the AArch64-entry or AArch32-entry function depending on the
11796   * target exception level's register width.
11797   *
11798   * Note: this is used for both TCG (as the do_interrupt tcg op),
11799   *       and KVM to re-inject guest debug exceptions, and to
11800   *       inject a Synchronous-External-Abort.
11801   */
11802  void arm_cpu_do_interrupt(CPUState *cs)
11803  {
11804      ARMCPU *cpu = ARM_CPU(cs);
11805      CPUARMState *env = &cpu->env;
11806      unsigned int new_el = env->exception.target_el;
11807  
11808      assert(!arm_feature(env, ARM_FEATURE_M));
11809  
11810      arm_log_exception(cs);
11811      qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
11812                    new_el);
11813      if (qemu_loglevel_mask(CPU_LOG_INT)
11814          && !excp_is_internal(cs->exception_index)) {
11815          qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
11816                        syn_get_ec(env->exception.syndrome),
11817                        env->exception.syndrome);
11818      }
11819  
11820      if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) {
11821          arm_handle_psci_call(cpu);
11822          qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
11823          return;
11824      }
11825  
11826      /*
11827       * Semihosting semantics depend on the register width of the code
11828       * that caused the exception, not the target exception level, so
11829       * must be handled here.
11830       */
11831  #ifdef CONFIG_TCG
11832      if (cs->exception_index == EXCP_SEMIHOST) {
11833          tcg_handle_semihosting(cs);
11834          return;
11835      }
11836  #endif
11837  
11838      /*
11839       * Hooks may change global state so BQL should be held, also the
11840       * BQL needs to be held for any modification of
11841       * cs->interrupt_request.
11842       */
11843      g_assert(bql_locked());
11844  
11845      arm_call_pre_el_change_hook(cpu);
11846  
11847      assert(!excp_is_internal(cs->exception_index));
11848      if (arm_el_is_aa64(env, new_el)) {
11849          arm_cpu_do_interrupt_aarch64(cs);
11850      } else {
11851          arm_cpu_do_interrupt_aarch32(cs);
11852      }
11853  
11854      arm_call_el_change_hook(cpu);
11855  
11856      if (!kvm_enabled()) {
11857          cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
11858      }
11859  }
11860  #endif /* !CONFIG_USER_ONLY */
11861  
11862  uint64_t arm_sctlr(CPUARMState *env, int el)
11863  {
11864      /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
11865      if (el == 0) {
11866          ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
11867          el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
11868      }
11869      return env->cp15.sctlr_el[el];
11870  }
11871  
11872  int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
11873  {
11874      if (regime_has_2_ranges(mmu_idx)) {
11875          return extract64(tcr, 37, 2);
11876      } else if (regime_is_stage2(mmu_idx)) {
11877          return 0; /* VTCR_EL2 */
11878      } else {
11879          /* Replicate the single TBI bit so we always have 2 bits.  */
11880          return extract32(tcr, 20, 1) * 3;
11881      }
11882  }
11883  
11884  int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
11885  {
11886      if (regime_has_2_ranges(mmu_idx)) {
11887          return extract64(tcr, 51, 2);
11888      } else if (regime_is_stage2(mmu_idx)) {
11889          return 0; /* VTCR_EL2 */
11890      } else {
11891          /* Replicate the single TBID bit so we always have 2 bits.  */
11892          return extract32(tcr, 29, 1) * 3;
11893      }
11894  }
11895  
11896  int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
11897  {
11898      if (regime_has_2_ranges(mmu_idx)) {
11899          return extract64(tcr, 57, 2);
11900      } else {
11901          /* Replicate the single TCMA bit so we always have 2 bits.  */
11902          return extract32(tcr, 30, 1) * 3;
11903      }
11904  }
11905  
11906  static ARMGranuleSize tg0_to_gran_size(int tg)
11907  {
11908      switch (tg) {
11909      case 0:
11910          return Gran4K;
11911      case 1:
11912          return Gran64K;
11913      case 2:
11914          return Gran16K;
11915      default:
11916          return GranInvalid;
11917      }
11918  }
11919  
11920  static ARMGranuleSize tg1_to_gran_size(int tg)
11921  {
11922      switch (tg) {
11923      case 1:
11924          return Gran16K;
11925      case 2:
11926          return Gran4K;
11927      case 3:
11928          return Gran64K;
11929      default:
11930          return GranInvalid;
11931      }
11932  }
11933  
11934  static inline bool have4k(ARMCPU *cpu, bool stage2)
11935  {
11936      return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
11937          : cpu_isar_feature(aa64_tgran4, cpu);
11938  }
11939  
11940  static inline bool have16k(ARMCPU *cpu, bool stage2)
11941  {
11942      return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
11943          : cpu_isar_feature(aa64_tgran16, cpu);
11944  }
11945  
11946  static inline bool have64k(ARMCPU *cpu, bool stage2)
11947  {
11948      return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
11949          : cpu_isar_feature(aa64_tgran64, cpu);
11950  }
11951  
11952  static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
11953                                           bool stage2)
11954  {
11955      switch (gran) {
11956      case Gran4K:
11957          if (have4k(cpu, stage2)) {
11958              return gran;
11959          }
11960          break;
11961      case Gran16K:
11962          if (have16k(cpu, stage2)) {
11963              return gran;
11964          }
11965          break;
11966      case Gran64K:
11967          if (have64k(cpu, stage2)) {
11968              return gran;
11969          }
11970          break;
11971      case GranInvalid:
11972          break;
11973      }
11974      /*
11975       * If the guest selects a granule size that isn't implemented,
11976       * the architecture requires that we behave as if it selected one
11977       * that is (with an IMPDEF choice of which one to pick). We choose
11978       * to implement the smallest supported granule size.
11979       */
11980      if (have4k(cpu, stage2)) {
11981          return Gran4K;
11982      }
11983      if (have16k(cpu, stage2)) {
11984          return Gran16K;
11985      }
11986      assert(have64k(cpu, stage2));
11987      return Gran64K;
11988  }
11989  
11990  ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
11991                                     ARMMMUIdx mmu_idx, bool data,
11992                                     bool el1_is_aa32)
11993  {
11994      uint64_t tcr = regime_tcr(env, mmu_idx);
11995      bool epd, hpd, tsz_oob, ds, ha, hd;
11996      int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
11997      ARMGranuleSize gran;
11998      ARMCPU *cpu = env_archcpu(env);
11999      bool stage2 = regime_is_stage2(mmu_idx);
12000  
12001      if (!regime_has_2_ranges(mmu_idx)) {
12002          select = 0;
12003          tsz = extract32(tcr, 0, 6);
12004          gran = tg0_to_gran_size(extract32(tcr, 14, 2));
12005          if (stage2) {
12006              /* VTCR_EL2 */
12007              hpd = false;
12008          } else {
12009              hpd = extract32(tcr, 24, 1);
12010          }
12011          epd = false;
12012          sh = extract32(tcr, 12, 2);
12013          ps = extract32(tcr, 16, 3);
12014          ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu);
12015          hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu);
12016          ds = extract64(tcr, 32, 1);
12017      } else {
12018          bool e0pd;
12019  
12020          /*
12021           * Bit 55 is always between the two regions, and is canonical for
12022           * determining if address tagging is enabled.
12023           */
12024          select = extract64(va, 55, 1);
12025          if (!select) {
12026              tsz = extract32(tcr, 0, 6);
12027              gran = tg0_to_gran_size(extract32(tcr, 14, 2));
12028              epd = extract32(tcr, 7, 1);
12029              sh = extract32(tcr, 12, 2);
12030              hpd = extract64(tcr, 41, 1);
12031              e0pd = extract64(tcr, 55, 1);
12032          } else {
12033              tsz = extract32(tcr, 16, 6);
12034              gran = tg1_to_gran_size(extract32(tcr, 30, 2));
12035              epd = extract32(tcr, 23, 1);
12036              sh = extract32(tcr, 28, 2);
12037              hpd = extract64(tcr, 42, 1);
12038              e0pd = extract64(tcr, 56, 1);
12039          }
12040          ps = extract64(tcr, 32, 3);
12041          ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu);
12042          hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu);
12043          ds = extract64(tcr, 59, 1);
12044  
12045          if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
12046              regime_is_user(env, mmu_idx)) {
12047              epd = true;
12048          }
12049      }
12050  
12051      gran = sanitize_gran_size(cpu, gran, stage2);
12052  
12053      if (cpu_isar_feature(aa64_st, cpu)) {
12054          max_tsz = 48 - (gran == Gran64K);
12055      } else {
12056          max_tsz = 39;
12057      }
12058  
12059      /*
12060       * DS is RES0 unless FEAT_LPA2 is supported for the given page size;
12061       * adjust the effective value of DS, as documented.
12062       */
12063      min_tsz = 16;
12064      if (gran == Gran64K) {
12065          if (cpu_isar_feature(aa64_lva, cpu)) {
12066              min_tsz = 12;
12067          }
12068          ds = false;
12069      } else if (ds) {
12070          if (regime_is_stage2(mmu_idx)) {
12071              if (gran == Gran16K) {
12072                  ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
12073              } else {
12074                  ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
12075              }
12076          } else {
12077              if (gran == Gran16K) {
12078                  ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
12079              } else {
12080                  ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
12081              }
12082          }
12083          if (ds) {
12084              min_tsz = 12;
12085          }
12086      }
12087  
12088      if (stage2 && el1_is_aa32) {
12089          /*
12090           * For AArch32 EL1 the min txsz (and thus max IPA size) requirements
12091           * are loosened: a configured IPA of 40 bits is permitted even if
12092           * the implemented PA is less than that (and so a 40 bit IPA would
12093           * fault for an AArch64 EL1). See R_DTLMN.
12094           */
12095          min_tsz = MIN(min_tsz, 24);
12096      }
12097  
12098      if (tsz > max_tsz) {
12099          tsz = max_tsz;
12100          tsz_oob = true;
12101      } else if (tsz < min_tsz) {
12102          tsz = min_tsz;
12103          tsz_oob = true;
12104      } else {
12105          tsz_oob = false;
12106      }
12107  
12108      /* Present TBI as a composite with TBID.  */
12109      tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
12110      if (!data) {
12111          tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
12112      }
12113      tbi = (tbi >> select) & 1;
12114  
12115      return (ARMVAParameters) {
12116          .tsz = tsz,
12117          .ps = ps,
12118          .sh = sh,
12119          .select = select,
12120          .tbi = tbi,
12121          .epd = epd,
12122          .hpd = hpd,
12123          .tsz_oob = tsz_oob,
12124          .ds = ds,
12125          .ha = ha,
12126          .hd = ha && hd,
12127          .gran = gran,
12128      };
12129  }
12130  
12131  /*
12132   * Note that signed overflow is undefined in C.  The following routines are
12133   * careful to use unsigned types where modulo arithmetic is required.
12134   * Failure to do so _will_ break on newer gcc.
12135   */
12136  
12137  /* Signed saturating arithmetic.  */
12138  
12139  /* Perform 16-bit signed saturating addition.  */
12140  static inline uint16_t add16_sat(uint16_t a, uint16_t b)
12141  {
12142      uint16_t res;
12143  
12144      res = a + b;
12145      if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
12146          if (a & 0x8000) {
12147              res = 0x8000;
12148          } else {
12149              res = 0x7fff;
12150          }
12151      }
12152      return res;
12153  }
12154  
12155  /* Perform 8-bit signed saturating addition.  */
12156  static inline uint8_t add8_sat(uint8_t a, uint8_t b)
12157  {
12158      uint8_t res;
12159  
12160      res = a + b;
12161      if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
12162          if (a & 0x80) {
12163              res = 0x80;
12164          } else {
12165              res = 0x7f;
12166          }
12167      }
12168      return res;
12169  }
12170  
12171  /* Perform 16-bit signed saturating subtraction.  */
12172  static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
12173  {
12174      uint16_t res;
12175  
12176      res = a - b;
12177      if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
12178          if (a & 0x8000) {
12179              res = 0x8000;
12180          } else {
12181              res = 0x7fff;
12182          }
12183      }
12184      return res;
12185  }
12186  
12187  /* Perform 8-bit signed saturating subtraction.  */
12188  static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
12189  {
12190      uint8_t res;
12191  
12192      res = a - b;
12193      if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
12194          if (a & 0x80) {
12195              res = 0x80;
12196          } else {
12197              res = 0x7f;
12198          }
12199      }
12200      return res;
12201  }
12202  
12203  #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12204  #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12205  #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
12206  #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
12207  #define PFX q
12208  
12209  #include "op_addsub.h"
12210  
12211  /* Unsigned saturating arithmetic.  */
12212  static inline uint16_t add16_usat(uint16_t a, uint16_t b)
12213  {
12214      uint16_t res;
12215      res = a + b;
12216      if (res < a) {
12217          res = 0xffff;
12218      }
12219      return res;
12220  }
12221  
12222  static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
12223  {
12224      if (a > b) {
12225          return a - b;
12226      } else {
12227          return 0;
12228      }
12229  }
12230  
12231  static inline uint8_t add8_usat(uint8_t a, uint8_t b)
12232  {
12233      uint8_t res;
12234      res = a + b;
12235      if (res < a) {
12236          res = 0xff;
12237      }
12238      return res;
12239  }
12240  
12241  static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
12242  {
12243      if (a > b) {
12244          return a - b;
12245      } else {
12246          return 0;
12247      }
12248  }
12249  
12250  #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12251  #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12252  #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
12253  #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
12254  #define PFX uq
12255  
12256  #include "op_addsub.h"
12257  
12258  /* Signed modulo arithmetic.  */
12259  #define SARITH16(a, b, n, op) do { \
12260      int32_t sum; \
12261      sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12262      RESULT(sum, n, 16); \
12263      if (sum >= 0) \
12264          ge |= 3 << (n * 2); \
12265      } while (0)
12266  
12267  #define SARITH8(a, b, n, op) do { \
12268      int32_t sum; \
12269      sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12270      RESULT(sum, n, 8); \
12271      if (sum >= 0) \
12272          ge |= 1 << n; \
12273      } while (0)
12274  
12275  
12276  #define ADD16(a, b, n) SARITH16(a, b, n, +)
12277  #define SUB16(a, b, n) SARITH16(a, b, n, -)
12278  #define ADD8(a, b, n)  SARITH8(a, b, n, +)
12279  #define SUB8(a, b, n)  SARITH8(a, b, n, -)
12280  #define PFX s
12281  #define ARITH_GE
12282  
12283  #include "op_addsub.h"
12284  
12285  /* Unsigned modulo arithmetic.  */
12286  #define ADD16(a, b, n) do { \
12287      uint32_t sum; \
12288      sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12289      RESULT(sum, n, 16); \
12290      if ((sum >> 16) == 1) \
12291          ge |= 3 << (n * 2); \
12292      } while (0)
12293  
12294  #define ADD8(a, b, n) do { \
12295      uint32_t sum; \
12296      sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12297      RESULT(sum, n, 8); \
12298      if ((sum >> 8) == 1) \
12299          ge |= 1 << n; \
12300      } while (0)
12301  
12302  #define SUB16(a, b, n) do { \
12303      uint32_t sum; \
12304      sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12305      RESULT(sum, n, 16); \
12306      if ((sum >> 16) == 0) \
12307          ge |= 3 << (n * 2); \
12308      } while (0)
12309  
12310  #define SUB8(a, b, n) do { \
12311      uint32_t sum; \
12312      sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12313      RESULT(sum, n, 8); \
12314      if ((sum >> 8) == 0) \
12315          ge |= 1 << n; \
12316      } while (0)
12317  
12318  #define PFX u
12319  #define ARITH_GE
12320  
12321  #include "op_addsub.h"
12322  
12323  /* Halved signed arithmetic.  */
12324  #define ADD16(a, b, n) \
12325    RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12326  #define SUB16(a, b, n) \
12327    RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12328  #define ADD8(a, b, n) \
12329    RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12330  #define SUB8(a, b, n) \
12331    RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12332  #define PFX sh
12333  
12334  #include "op_addsub.h"
12335  
12336  /* Halved unsigned arithmetic.  */
12337  #define ADD16(a, b, n) \
12338    RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12339  #define SUB16(a, b, n) \
12340    RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12341  #define ADD8(a, b, n) \
12342    RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12343  #define SUB8(a, b, n) \
12344    RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12345  #define PFX uh
12346  
12347  #include "op_addsub.h"
12348  
12349  static inline uint8_t do_usad(uint8_t a, uint8_t b)
12350  {
12351      if (a > b) {
12352          return a - b;
12353      } else {
12354          return b - a;
12355      }
12356  }
12357  
12358  /* Unsigned sum of absolute byte differences.  */
12359  uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
12360  {
12361      uint32_t sum;
12362      sum = do_usad(a, b);
12363      sum += do_usad(a >> 8, b >> 8);
12364      sum += do_usad(a >> 16, b >> 16);
12365      sum += do_usad(a >> 24, b >> 24);
12366      return sum;
12367  }
12368  
12369  /* For ARMv6 SEL instruction.  */
12370  uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
12371  {
12372      uint32_t mask;
12373  
12374      mask = 0;
12375      if (flags & 1) {
12376          mask |= 0xff;
12377      }
12378      if (flags & 2) {
12379          mask |= 0xff00;
12380      }
12381      if (flags & 4) {
12382          mask |= 0xff0000;
12383      }
12384      if (flags & 8) {
12385          mask |= 0xff000000;
12386      }
12387      return (a & mask) | (b & ~mask);
12388  }
12389  
12390  /*
12391   * CRC helpers.
12392   * The upper bytes of val (above the number specified by 'bytes') must have
12393   * been zeroed out by the caller.
12394   */
12395  uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12396  {
12397      uint8_t buf[4];
12398  
12399      stl_le_p(buf, val);
12400  
12401      /* zlib crc32 converts the accumulator and output to one's complement.  */
12402      return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12403  }
12404  
12405  uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12406  {
12407      uint8_t buf[4];
12408  
12409      stl_le_p(buf, val);
12410  
12411      /* Linux crc32c converts the output to one's complement.  */
12412      return crc32c(acc, buf, bytes) ^ 0xffffffff;
12413  }
12414  
12415  /*
12416   * Return the exception level to which FP-disabled exceptions should
12417   * be taken, or 0 if FP is enabled.
12418   */
12419  int fp_exception_el(CPUARMState *env, int cur_el)
12420  {
12421  #ifndef CONFIG_USER_ONLY
12422      uint64_t hcr_el2;
12423  
12424      /*
12425       * CPACR and the CPTR registers don't exist before v6, so FP is
12426       * always accessible
12427       */
12428      if (!arm_feature(env, ARM_FEATURE_V6)) {
12429          return 0;
12430      }
12431  
12432      if (arm_feature(env, ARM_FEATURE_M)) {
12433          /* CPACR can cause a NOCP UsageFault taken to current security state */
12434          if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
12435              return 1;
12436          }
12437  
12438          if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
12439              if (!extract32(env->v7m.nsacr, 10, 1)) {
12440                  /* FP insns cause a NOCP UsageFault taken to Secure */
12441                  return 3;
12442              }
12443          }
12444  
12445          return 0;
12446      }
12447  
12448      hcr_el2 = arm_hcr_el2_eff(env);
12449  
12450      /*
12451       * The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12452       * 0, 2 : trap EL0 and EL1/PL1 accesses
12453       * 1    : trap only EL0 accesses
12454       * 3    : trap no accesses
12455       * This register is ignored if E2H+TGE are both set.
12456       */
12457      if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
12458          int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
12459  
12460          switch (fpen) {
12461          case 1:
12462              if (cur_el != 0) {
12463                  break;
12464              }
12465              /* fall through */
12466          case 0:
12467          case 2:
12468              /* Trap from Secure PL0 or PL1 to Secure PL1. */
12469              if (!arm_el_is_aa64(env, 3)
12470                  && (cur_el == 3 || arm_is_secure_below_el3(env))) {
12471                  return 3;
12472              }
12473              if (cur_el <= 1) {
12474                  return 1;
12475              }
12476              break;
12477          }
12478      }
12479  
12480      /*
12481       * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12482       * to control non-secure access to the FPU. It doesn't have any
12483       * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12484       */
12485      if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
12486           cur_el <= 2 && !arm_is_secure_below_el3(env))) {
12487          if (!extract32(env->cp15.nsacr, 10, 1)) {
12488              /* FP insns act as UNDEF */
12489              return cur_el == 2 ? 2 : 1;
12490          }
12491      }
12492  
12493      /*
12494       * CPTR_EL2 is present in v7VE or v8, and changes format
12495       * with HCR_EL2.E2H (regardless of TGE).
12496       */
12497      if (cur_el <= 2) {
12498          if (hcr_el2 & HCR_E2H) {
12499              switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
12500              case 1:
12501                  if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
12502                      break;
12503                  }
12504                  /* fall through */
12505              case 0:
12506              case 2:
12507                  return 2;
12508              }
12509          } else if (arm_is_el2_enabled(env)) {
12510              if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
12511                  return 2;
12512              }
12513          }
12514      }
12515  
12516      /* CPTR_EL3 : present in v8 */
12517      if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
12518          /* Trap all FP ops to EL3 */
12519          return 3;
12520      }
12521  #endif
12522      return 0;
12523  }
12524  
12525  /* Return the exception level we're running at if this is our mmu_idx */
12526  int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
12527  {
12528      if (mmu_idx & ARM_MMU_IDX_M) {
12529          return mmu_idx & ARM_MMU_IDX_M_PRIV;
12530      }
12531  
12532      switch (mmu_idx) {
12533      case ARMMMUIdx_E10_0:
12534      case ARMMMUIdx_E20_0:
12535          return 0;
12536      case ARMMMUIdx_E10_1:
12537      case ARMMMUIdx_E10_1_PAN:
12538          return 1;
12539      case ARMMMUIdx_E2:
12540      case ARMMMUIdx_E20_2:
12541      case ARMMMUIdx_E20_2_PAN:
12542          return 2;
12543      case ARMMMUIdx_E3:
12544          return 3;
12545      default:
12546          g_assert_not_reached();
12547      }
12548  }
12549  
12550  #ifndef CONFIG_TCG
12551  ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
12552  {
12553      g_assert_not_reached();
12554  }
12555  #endif
12556  
12557  ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
12558  {
12559      ARMMMUIdx idx;
12560      uint64_t hcr;
12561  
12562      if (arm_feature(env, ARM_FEATURE_M)) {
12563          return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
12564      }
12565  
12566      /* See ARM pseudo-function ELIsInHost.  */
12567      switch (el) {
12568      case 0:
12569          hcr = arm_hcr_el2_eff(env);
12570          if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
12571              idx = ARMMMUIdx_E20_0;
12572          } else {
12573              idx = ARMMMUIdx_E10_0;
12574          }
12575          break;
12576      case 1:
12577          if (arm_pan_enabled(env)) {
12578              idx = ARMMMUIdx_E10_1_PAN;
12579          } else {
12580              idx = ARMMMUIdx_E10_1;
12581          }
12582          break;
12583      case 2:
12584          /* Note that TGE does not apply at EL2.  */
12585          if (arm_hcr_el2_eff(env) & HCR_E2H) {
12586              if (arm_pan_enabled(env)) {
12587                  idx = ARMMMUIdx_E20_2_PAN;
12588              } else {
12589                  idx = ARMMMUIdx_E20_2;
12590              }
12591          } else {
12592              idx = ARMMMUIdx_E2;
12593          }
12594          break;
12595      case 3:
12596          return ARMMMUIdx_E3;
12597      default:
12598          g_assert_not_reached();
12599      }
12600  
12601      return idx;
12602  }
12603  
12604  ARMMMUIdx arm_mmu_idx(CPUARMState *env)
12605  {
12606      return arm_mmu_idx_el(env, arm_current_el(env));
12607  }
12608  
12609  static bool mve_no_pred(CPUARMState *env)
12610  {
12611      /*
12612       * Return true if there is definitely no predication of MVE
12613       * instructions by VPR or LTPSIZE. (Returning false even if there
12614       * isn't any predication is OK; generated code will just be
12615       * a little worse.)
12616       * If the CPU does not implement MVE then this TB flag is always 0.
12617       *
12618       * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
12619       * logic in gen_update_fp_context() needs to be updated to match.
12620       *
12621       * We do not include the effect of the ECI bits here -- they are
12622       * tracked in other TB flags. This simplifies the logic for
12623       * "when did we emit code that changes the MVE_NO_PRED TB flag
12624       * and thus need to end the TB?".
12625       */
12626      if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
12627          return false;
12628      }
12629      if (env->v7m.vpr) {
12630          return false;
12631      }
12632      if (env->v7m.ltpsize < 4) {
12633          return false;
12634      }
12635      return true;
12636  }
12637  
12638  void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
12639                            uint64_t *cs_base, uint32_t *pflags)
12640  {
12641      CPUARMTBFlags flags;
12642  
12643      assert_hflags_rebuild_correctly(env);
12644      flags = env->hflags;
12645  
12646      if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
12647          *pc = env->pc;
12648          if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
12649              DP_TBFLAG_A64(flags, BTYPE, env->btype);
12650          }
12651      } else {
12652          *pc = env->regs[15];
12653  
12654          if (arm_feature(env, ARM_FEATURE_M)) {
12655              if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
12656                  FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
12657                  != env->v7m.secure) {
12658                  DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
12659              }
12660  
12661              if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
12662                  (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
12663                   (env->v7m.secure &&
12664                    !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
12665                  /*
12666                   * ASPEN is set, but FPCA/SFPA indicate that there is no
12667                   * active FP context; we must create a new FP context before
12668                   * executing any FP insn.
12669                   */
12670                  DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
12671              }
12672  
12673              bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
12674              if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
12675                  DP_TBFLAG_M32(flags, LSPACT, 1);
12676              }
12677  
12678              if (mve_no_pred(env)) {
12679                  DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
12680              }
12681          } else {
12682              /*
12683               * Note that XSCALE_CPAR shares bits with VECSTRIDE.
12684               * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
12685               */
12686              if (arm_feature(env, ARM_FEATURE_XSCALE)) {
12687                  DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
12688              } else {
12689                  DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
12690                  DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
12691              }
12692              if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
12693                  DP_TBFLAG_A32(flags, VFPEN, 1);
12694              }
12695          }
12696  
12697          DP_TBFLAG_AM32(flags, THUMB, env->thumb);
12698          DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
12699      }
12700  
12701      /*
12702       * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
12703       * states defined in the ARM ARM for software singlestep:
12704       *  SS_ACTIVE   PSTATE.SS   State
12705       *     0            x       Inactive (the TB flag for SS is always 0)
12706       *     1            0       Active-pending
12707       *     1            1       Active-not-pending
12708       * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
12709       */
12710      if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
12711          DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
12712      }
12713  
12714      *pflags = flags.flags;
12715      *cs_base = flags.flags2;
12716  }
12717  
12718  #ifdef TARGET_AARCH64
12719  /*
12720   * The manual says that when SVE is enabled and VQ is widened the
12721   * implementation is allowed to zero the previously inaccessible
12722   * portion of the registers.  The corollary to that is that when
12723   * SVE is enabled and VQ is narrowed we are also allowed to zero
12724   * the now inaccessible portion of the registers.
12725   *
12726   * The intent of this is that no predicate bit beyond VQ is ever set.
12727   * Which means that some operations on predicate registers themselves
12728   * may operate on full uint64_t or even unrolled across the maximum
12729   * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
12730   * may well be cheaper than conditionals to restrict the operation
12731   * to the relevant portion of a uint16_t[16].
12732   */
12733  void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
12734  {
12735      int i, j;
12736      uint64_t pmask;
12737  
12738      assert(vq >= 1 && vq <= ARM_MAX_VQ);
12739      assert(vq <= env_archcpu(env)->sve_max_vq);
12740  
12741      /* Zap the high bits of the zregs.  */
12742      for (i = 0; i < 32; i++) {
12743          memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
12744      }
12745  
12746      /* Zap the high bits of the pregs and ffr.  */
12747      pmask = 0;
12748      if (vq & 3) {
12749          pmask = ~(-1ULL << (16 * (vq & 3)));
12750      }
12751      for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
12752          for (i = 0; i < 17; ++i) {
12753              env->vfp.pregs[i].p[j] &= pmask;
12754          }
12755          pmask = 0;
12756      }
12757  }
12758  
12759  static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm)
12760  {
12761      int exc_el;
12762  
12763      if (sm) {
12764          exc_el = sme_exception_el(env, el);
12765      } else {
12766          exc_el = sve_exception_el(env, el);
12767      }
12768      if (exc_el) {
12769          return 0; /* disabled */
12770      }
12771      return sve_vqm1_for_el_sm(env, el, sm);
12772  }
12773  
12774  /*
12775   * Notice a change in SVE vector size when changing EL.
12776   */
12777  void aarch64_sve_change_el(CPUARMState *env, int old_el,
12778                             int new_el, bool el0_a64)
12779  {
12780      ARMCPU *cpu = env_archcpu(env);
12781      int old_len, new_len;
12782      bool old_a64, new_a64, sm;
12783  
12784      /* Nothing to do if no SVE.  */
12785      if (!cpu_isar_feature(aa64_sve, cpu)) {
12786          return;
12787      }
12788  
12789      /* Nothing to do if FP is disabled in either EL.  */
12790      if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
12791          return;
12792      }
12793  
12794      old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
12795      new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
12796  
12797      /*
12798       * Both AArch64.TakeException and AArch64.ExceptionReturn
12799       * invoke ResetSVEState when taking an exception from, or
12800       * returning to, AArch32 state when PSTATE.SM is enabled.
12801       */
12802      sm = FIELD_EX64(env->svcr, SVCR, SM);
12803      if (old_a64 != new_a64 && sm) {
12804          arm_reset_sve_state(env);
12805          return;
12806      }
12807  
12808      /*
12809       * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
12810       * at ELx, or not available because the EL is in AArch32 state, then
12811       * for all purposes other than a direct read, the ZCR_ELx.LEN field
12812       * has an effective value of 0".
12813       *
12814       * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
12815       * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
12816       * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
12817       * we already have the correct register contents when encountering the
12818       * vq0->vq0 transition between EL0->EL1.
12819       */
12820      old_len = new_len = 0;
12821      if (old_a64) {
12822          old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm);
12823      }
12824      if (new_a64) {
12825          new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm);
12826      }
12827  
12828      /* When changing vector length, clear inaccessible state.  */
12829      if (new_len < old_len) {
12830          aarch64_sve_narrow_vq(env, new_len + 1);
12831      }
12832  }
12833  #endif
12834  
12835  #ifndef CONFIG_USER_ONLY
12836  ARMSecuritySpace arm_security_space(CPUARMState *env)
12837  {
12838      if (arm_feature(env, ARM_FEATURE_M)) {
12839          return arm_secure_to_space(env->v7m.secure);
12840      }
12841  
12842      /*
12843       * If EL3 is not supported then the secure state is implementation
12844       * defined, in which case QEMU defaults to non-secure.
12845       */
12846      if (!arm_feature(env, ARM_FEATURE_EL3)) {
12847          return ARMSS_NonSecure;
12848      }
12849  
12850      /* Check for AArch64 EL3 or AArch32 Mon. */
12851      if (is_a64(env)) {
12852          if (extract32(env->pstate, 2, 2) == 3) {
12853              if (cpu_isar_feature(aa64_rme, env_archcpu(env))) {
12854                  return ARMSS_Root;
12855              } else {
12856                  return ARMSS_Secure;
12857              }
12858          }
12859      } else {
12860          if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
12861              return ARMSS_Secure;
12862          }
12863      }
12864  
12865      return arm_security_space_below_el3(env);
12866  }
12867  
12868  ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env)
12869  {
12870      assert(!arm_feature(env, ARM_FEATURE_M));
12871  
12872      /*
12873       * If EL3 is not supported then the secure state is implementation
12874       * defined, in which case QEMU defaults to non-secure.
12875       */
12876      if (!arm_feature(env, ARM_FEATURE_EL3)) {
12877          return ARMSS_NonSecure;
12878      }
12879  
12880      /*
12881       * Note NSE cannot be set without RME, and NSE & !NS is Reserved.
12882       * Ignoring NSE when !NS retains consistency without having to
12883       * modify other predicates.
12884       */
12885      if (!(env->cp15.scr_el3 & SCR_NS)) {
12886          return ARMSS_Secure;
12887      } else if (env->cp15.scr_el3 & SCR_NSE) {
12888          return ARMSS_Realm;
12889      } else {
12890          return ARMSS_NonSecure;
12891      }
12892  }
12893  #endif /* !CONFIG_USER_ONLY */
12894