xref: /openbmc/qemu/target/arm/internals.h (revision 739dabb1ad26c750fdf6df469e442ab8591c3090)
1  /*
2   * QEMU ARM CPU -- internal functions and types
3   *
4   * Copyright (c) 2014 Linaro Ltd
5   *
6   * This program is free software; you can redistribute it and/or
7   * modify it under the terms of the GNU General Public License
8   * as published by the Free Software Foundation; either version 2
9   * of the License, or (at your option) any later version.
10   *
11   * This program is distributed in the hope that it will be useful,
12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   * GNU General Public License for more details.
15   *
16   * You should have received a copy of the GNU General Public License
17   * along with this program; if not, see
18   * <http://www.gnu.org/licenses/gpl-2.0.html>
19   *
20   * This header defines functions, types, etc which need to be shared
21   * between different source files within target/arm/ but which are
22   * private to it and not required by the rest of QEMU.
23   */
24  
25  #ifndef TARGET_ARM_INTERNALS_H
26  #define TARGET_ARM_INTERNALS_H
27  
28  #include "exec/breakpoint.h"
29  #include "hw/registerfields.h"
30  #include "tcg/tcg-gvec-desc.h"
31  #include "syndrome.h"
32  #include "cpu-features.h"
33  
34  /* register banks for CPU modes */
35  #define BANK_USRSYS 0
36  #define BANK_SVC    1
37  #define BANK_ABT    2
38  #define BANK_UND    3
39  #define BANK_IRQ    4
40  #define BANK_FIQ    5
41  #define BANK_HYP    6
42  #define BANK_MON    7
43  
arm_env_mmu_index(CPUARMState * env)44  static inline int arm_env_mmu_index(CPUARMState *env)
45  {
46      return EX_TBFLAG_ANY(env->hflags, MMUIDX);
47  }
48  
excp_is_internal(int excp)49  static inline bool excp_is_internal(int excp)
50  {
51      /* Return true if this exception number represents a QEMU-internal
52       * exception that will not be passed to the guest.
53       */
54      return excp == EXCP_INTERRUPT
55          || excp == EXCP_HLT
56          || excp == EXCP_DEBUG
57          || excp == EXCP_HALTED
58          || excp == EXCP_EXCEPTION_EXIT
59          || excp == EXCP_KERNEL_TRAP
60          || excp == EXCP_SEMIHOST;
61  }
62  
63  /*
64   * Default frequency for the generic timer, in Hz.
65   * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before
66   * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz,
67   * which gives a 16ns tick period.
68   *
69   * We will use the back-compat value:
70   *  - for QEMU CPU types added before we standardized on 1GHz
71   *  - for versioned machine types with a version of 9.0 or earlier
72   * In any case, the machine model may override via the cntfrq property.
73   */
74  #define GTIMER_DEFAULT_HZ 1000000000
75  #define GTIMER_BACKCOMPAT_HZ 62500000
76  
77  /* Bit definitions for the v7M CONTROL register */
78  FIELD(V7M_CONTROL, NPRIV, 0, 1)
79  FIELD(V7M_CONTROL, SPSEL, 1, 1)
80  FIELD(V7M_CONTROL, FPCA, 2, 1)
81  FIELD(V7M_CONTROL, SFPA, 3, 1)
82  
83  /* Bit definitions for v7M exception return payload */
84  FIELD(V7M_EXCRET, ES, 0, 1)
85  FIELD(V7M_EXCRET, RES0, 1, 1)
86  FIELD(V7M_EXCRET, SPSEL, 2, 1)
87  FIELD(V7M_EXCRET, MODE, 3, 1)
88  FIELD(V7M_EXCRET, FTYPE, 4, 1)
89  FIELD(V7M_EXCRET, DCRS, 5, 1)
90  FIELD(V7M_EXCRET, S, 6, 1)
91  FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
92  
93  /* Minimum value which is a magic number for exception return */
94  #define EXC_RETURN_MIN_MAGIC 0xff000000
95  /* Minimum number which is a magic number for function or exception return
96   * when using v8M security extension
97   */
98  #define FNC_RETURN_MIN_MAGIC 0xfefffffe
99  
100  /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */
101  FIELD(DBGWCR, E, 0, 1)
102  FIELD(DBGWCR, PAC, 1, 2)
103  FIELD(DBGWCR, LSC, 3, 2)
104  FIELD(DBGWCR, BAS, 5, 8)
105  FIELD(DBGWCR, HMC, 13, 1)
106  FIELD(DBGWCR, SSC, 14, 2)
107  FIELD(DBGWCR, LBN, 16, 4)
108  FIELD(DBGWCR, WT, 20, 1)
109  FIELD(DBGWCR, MASK, 24, 5)
110  FIELD(DBGWCR, SSCE, 29, 1)
111  
112  #define VTCR_NSW (1u << 29)
113  #define VTCR_NSA (1u << 30)
114  #define VSTCR_SW VTCR_NSW
115  #define VSTCR_SA VTCR_NSA
116  
117  /* Bit definitions for CPACR (AArch32 only) */
118  FIELD(CPACR, CP10, 20, 2)
119  FIELD(CPACR, CP11, 22, 2)
120  FIELD(CPACR, TRCDIS, 28, 1)    /* matches CPACR_EL1.TTA */
121  FIELD(CPACR, D32DIS, 30, 1)    /* up to v7; RAZ in v8 */
122  FIELD(CPACR, ASEDIS, 31, 1)
123  
124  /* Bit definitions for CPACR_EL1 (AArch64 only) */
125  FIELD(CPACR_EL1, ZEN, 16, 2)
126  FIELD(CPACR_EL1, FPEN, 20, 2)
127  FIELD(CPACR_EL1, SMEN, 24, 2)
128  FIELD(CPACR_EL1, TTA, 28, 1)   /* matches CPACR.TRCDIS */
129  
130  /* Bit definitions for HCPTR (AArch32 only) */
131  FIELD(HCPTR, TCP10, 10, 1)
132  FIELD(HCPTR, TCP11, 11, 1)
133  FIELD(HCPTR, TASE, 15, 1)
134  FIELD(HCPTR, TTA, 20, 1)
135  FIELD(HCPTR, TAM, 30, 1)       /* matches CPTR_EL2.TAM */
136  FIELD(HCPTR, TCPAC, 31, 1)     /* matches CPTR_EL2.TCPAC */
137  
138  /* Bit definitions for CPTR_EL2 (AArch64 only) */
139  FIELD(CPTR_EL2, TZ, 8, 1)      /* !E2H */
140  FIELD(CPTR_EL2, TFP, 10, 1)    /* !E2H, matches HCPTR.TCP10 */
141  FIELD(CPTR_EL2, TSM, 12, 1)    /* !E2H */
142  FIELD(CPTR_EL2, ZEN, 16, 2)    /* E2H */
143  FIELD(CPTR_EL2, FPEN, 20, 2)   /* E2H */
144  FIELD(CPTR_EL2, SMEN, 24, 2)   /* E2H */
145  FIELD(CPTR_EL2, TTA, 28, 1)
146  FIELD(CPTR_EL2, TAM, 30, 1)    /* matches HCPTR.TAM */
147  FIELD(CPTR_EL2, TCPAC, 31, 1)  /* matches HCPTR.TCPAC */
148  
149  /* Bit definitions for CPTR_EL3 (AArch64 only) */
150  FIELD(CPTR_EL3, EZ, 8, 1)
151  FIELD(CPTR_EL3, TFP, 10, 1)
152  FIELD(CPTR_EL3, ESM, 12, 1)
153  FIELD(CPTR_EL3, TTA, 20, 1)
154  FIELD(CPTR_EL3, TAM, 30, 1)
155  FIELD(CPTR_EL3, TCPAC, 31, 1)
156  
157  #define MDCR_MTPME    (1U << 28)
158  #define MDCR_TDCC     (1U << 27)
159  #define MDCR_HLP      (1U << 26)  /* MDCR_EL2 */
160  #define MDCR_SCCD     (1U << 23)  /* MDCR_EL3 */
161  #define MDCR_HCCD     (1U << 23)  /* MDCR_EL2 */
162  #define MDCR_EPMAD    (1U << 21)
163  #define MDCR_EDAD     (1U << 20)
164  #define MDCR_TTRF     (1U << 19)
165  #define MDCR_STE      (1U << 18)  /* MDCR_EL3 */
166  #define MDCR_SPME     (1U << 17)  /* MDCR_EL3 */
167  #define MDCR_HPMD     (1U << 17)  /* MDCR_EL2 */
168  #define MDCR_SDD      (1U << 16)
169  #define MDCR_SPD      (3U << 14)
170  #define MDCR_TDRA     (1U << 11)
171  #define MDCR_TDOSA    (1U << 10)
172  #define MDCR_TDA      (1U << 9)
173  #define MDCR_TDE      (1U << 8)
174  #define MDCR_HPME     (1U << 7)
175  #define MDCR_TPM      (1U << 6)
176  #define MDCR_TPMCR    (1U << 5)
177  #define MDCR_HPMN     (0x1fU)
178  
179  /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
180  #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \
181                           MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \
182                           MDCR_STE | MDCR_SPME | MDCR_SPD)
183  
184  #define TTBCR_N      (7U << 0) /* TTBCR.EAE==0 */
185  #define TTBCR_T0SZ   (7U << 0) /* TTBCR.EAE==1 */
186  #define TTBCR_PD0    (1U << 4)
187  #define TTBCR_PD1    (1U << 5)
188  #define TTBCR_EPD0   (1U << 7)
189  #define TTBCR_IRGN0  (3U << 8)
190  #define TTBCR_ORGN0  (3U << 10)
191  #define TTBCR_SH0    (3U << 12)
192  #define TTBCR_T1SZ   (3U << 16)
193  #define TTBCR_A1     (1U << 22)
194  #define TTBCR_EPD1   (1U << 23)
195  #define TTBCR_IRGN1  (3U << 24)
196  #define TTBCR_ORGN1  (3U << 26)
197  #define TTBCR_SH1    (1U << 28)
198  #define TTBCR_EAE    (1U << 31)
199  
200  FIELD(VTCR, T0SZ, 0, 6)
201  FIELD(VTCR, SL0, 6, 2)
202  FIELD(VTCR, IRGN0, 8, 2)
203  FIELD(VTCR, ORGN0, 10, 2)
204  FIELD(VTCR, SH0, 12, 2)
205  FIELD(VTCR, TG0, 14, 2)
206  FIELD(VTCR, PS, 16, 3)
207  FIELD(VTCR, VS, 19, 1)
208  FIELD(VTCR, HA, 21, 1)
209  FIELD(VTCR, HD, 22, 1)
210  FIELD(VTCR, HWU59, 25, 1)
211  FIELD(VTCR, HWU60, 26, 1)
212  FIELD(VTCR, HWU61, 27, 1)
213  FIELD(VTCR, HWU62, 28, 1)
214  FIELD(VTCR, NSW, 29, 1)
215  FIELD(VTCR, NSA, 30, 1)
216  FIELD(VTCR, DS, 32, 1)
217  FIELD(VTCR, SL2, 33, 1)
218  
219  #define HCRX_ENAS0    (1ULL << 0)
220  #define HCRX_ENALS    (1ULL << 1)
221  #define HCRX_ENASR    (1ULL << 2)
222  #define HCRX_FNXS     (1ULL << 3)
223  #define HCRX_FGTNXS   (1ULL << 4)
224  #define HCRX_SMPME    (1ULL << 5)
225  #define HCRX_TALLINT  (1ULL << 6)
226  #define HCRX_VINMI    (1ULL << 7)
227  #define HCRX_VFNMI    (1ULL << 8)
228  #define HCRX_CMOW     (1ULL << 9)
229  #define HCRX_MCE2     (1ULL << 10)
230  #define HCRX_MSCEN    (1ULL << 11)
231  
232  #define HPFAR_NS      (1ULL << 63)
233  
234  #define HSTR_TTEE (1 << 16)
235  #define HSTR_TJDBX (1 << 17)
236  
237  /*
238   * Depending on the value of HCR_EL2.E2H, bits 0 and 1
239   * have different bit definitions, and EL1PCTEN might be
240   * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to
241   * disambiguate if necessary.
242   */
243  FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1)
244  FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1)
245  FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1)
246  FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1)
247  FIELD(CNTHCTL, EVNTEN, 2, 1)
248  FIELD(CNTHCTL, EVNTDIR, 3, 1)
249  FIELD(CNTHCTL, EVNTI, 4, 4)
250  FIELD(CNTHCTL, EL0VTEN, 8, 1)
251  FIELD(CNTHCTL, EL0PTEN, 9, 1)
252  FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1)
253  FIELD(CNTHCTL, EL1PTEN, 11, 1)
254  FIELD(CNTHCTL, ECV, 12, 1)
255  FIELD(CNTHCTL, EL1TVT, 13, 1)
256  FIELD(CNTHCTL, EL1TVCT, 14, 1)
257  FIELD(CNTHCTL, EL1NVPCT, 15, 1)
258  FIELD(CNTHCTL, EL1NVVCT, 16, 1)
259  FIELD(CNTHCTL, EVNTIS, 17, 1)
260  FIELD(CNTHCTL, CNTVMASK, 18, 1)
261  FIELD(CNTHCTL, CNTPMASK, 19, 1)
262  
263  /* We use a few fake FSR values for internal purposes in M profile.
264   * M profile cores don't have A/R format FSRs, but currently our
265   * get_phys_addr() code assumes A/R profile and reports failures via
266   * an A/R format FSR value. We then translate that into the proper
267   * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
268   * Mostly the FSR values we use for this are those defined for v7PMSA,
269   * since we share some of that codepath. A few kinds of fault are
270   * only for M profile and have no A/R equivalent, though, so we have
271   * to pick a value from the reserved range (which we never otherwise
272   * generate) to use for these.
273   * These values will never be visible to the guest.
274   */
275  #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
276  #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
277  
278  /**
279   * raise_exception: Raise the specified exception.
280   * Raise a guest exception with the specified value, syndrome register
281   * and target exception level. This should be called from helper functions,
282   * and never returns because we will longjump back up to the CPU main loop.
283   */
284  G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp,
285                                  uint32_t syndrome, uint32_t target_el);
286  
287  /*
288   * Similarly, but also use unwinding to restore cpu state.
289   */
290  G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp,
291                                        uint32_t syndrome, uint32_t target_el,
292                                        uintptr_t ra);
293  
294  /*
295   * For AArch64, map a given EL to an index in the banked_spsr array.
296   * Note that this mapping and the AArch32 mapping defined in bank_number()
297   * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
298   * mandated mapping between each other.
299   */
aarch64_banked_spsr_index(unsigned int el)300  static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
301  {
302      static const unsigned int map[4] = {
303          [1] = BANK_SVC, /* EL1.  */
304          [2] = BANK_HYP, /* EL2.  */
305          [3] = BANK_MON, /* EL3.  */
306      };
307      assert(el >= 1 && el <= 3);
308      return map[el];
309  }
310  
311  /* Map CPU modes onto saved register banks.  */
bank_number(int mode)312  static inline int bank_number(int mode)
313  {
314      switch (mode) {
315      case ARM_CPU_MODE_USR:
316      case ARM_CPU_MODE_SYS:
317          return BANK_USRSYS;
318      case ARM_CPU_MODE_SVC:
319          return BANK_SVC;
320      case ARM_CPU_MODE_ABT:
321          return BANK_ABT;
322      case ARM_CPU_MODE_UND:
323          return BANK_UND;
324      case ARM_CPU_MODE_IRQ:
325          return BANK_IRQ;
326      case ARM_CPU_MODE_FIQ:
327          return BANK_FIQ;
328      case ARM_CPU_MODE_HYP:
329          return BANK_HYP;
330      case ARM_CPU_MODE_MON:
331          return BANK_MON;
332      }
333      g_assert_not_reached();
334  }
335  
336  /**
337   * r14_bank_number: Map CPU mode onto register bank for r14
338   *
339   * Given an AArch32 CPU mode, return the index into the saved register
340   * banks to use for the R14 (LR) in that mode. This is the same as
341   * bank_number(), except for the special case of Hyp mode, where
342   * R14 is shared with USR and SYS, unlike its R13 and SPSR.
343   * This should be used as the index into env->banked_r14[], and
344   * bank_number() used for the index into env->banked_r13[] and
345   * env->banked_spsr[].
346   */
r14_bank_number(int mode)347  static inline int r14_bank_number(int mode)
348  {
349      return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
350  }
351  
352  void arm_cpu_register(const ARMCPUInfo *info);
353  void aarch64_cpu_register(const ARMCPUInfo *info);
354  
355  void register_cp_regs_for_features(ARMCPU *cpu);
356  void init_cpreg_list(ARMCPU *cpu);
357  
358  void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
359  void arm_translate_init(void);
360  
361  void arm_cpu_register_gdb_commands(ARMCPU *cpu);
362  void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
363                                         GPtrArray *, GPtrArray *);
364  
365  void arm_restore_state_to_opc(CPUState *cs,
366                                const TranslationBlock *tb,
367                                const uint64_t *data);
368  
369  #ifdef CONFIG_TCG
370  void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
371  
372  /* Our implementation of TCGCPUOps::cpu_exec_halt */
373  bool arm_cpu_exec_halt(CPUState *cs);
374  #endif /* CONFIG_TCG */
375  
376  typedef enum ARMFPRounding {
377      FPROUNDING_TIEEVEN,
378      FPROUNDING_POSINF,
379      FPROUNDING_NEGINF,
380      FPROUNDING_ZERO,
381      FPROUNDING_TIEAWAY,
382      FPROUNDING_ODD
383  } ARMFPRounding;
384  
385  extern const FloatRoundMode arm_rmode_to_sf_map[6];
386  
arm_rmode_to_sf(ARMFPRounding rmode)387  static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode)
388  {
389      assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map));
390      return arm_rmode_to_sf_map[rmode];
391  }
392  
aarch64_save_sp(CPUARMState * env,int el)393  static inline void aarch64_save_sp(CPUARMState *env, int el)
394  {
395      if (env->pstate & PSTATE_SP) {
396          env->sp_el[el] = env->xregs[31];
397      } else {
398          env->sp_el[0] = env->xregs[31];
399      }
400  }
401  
aarch64_restore_sp(CPUARMState * env,int el)402  static inline void aarch64_restore_sp(CPUARMState *env, int el)
403  {
404      if (env->pstate & PSTATE_SP) {
405          env->xregs[31] = env->sp_el[el];
406      } else {
407          env->xregs[31] = env->sp_el[0];
408      }
409  }
410  
update_spsel(CPUARMState * env,uint32_t imm)411  static inline void update_spsel(CPUARMState *env, uint32_t imm)
412  {
413      unsigned int cur_el = arm_current_el(env);
414      /* Update PSTATE SPSel bit; this requires us to update the
415       * working stack pointer in xregs[31].
416       */
417      if (!((imm ^ env->pstate) & PSTATE_SP)) {
418          return;
419      }
420      aarch64_save_sp(env, cur_el);
421      env->pstate = deposit32(env->pstate, 0, 1, imm);
422  
423      /* We rely on illegal updates to SPsel from EL0 to get trapped
424       * at translation time.
425       */
426      assert(cur_el >= 1 && cur_el <= 3);
427      aarch64_restore_sp(env, cur_el);
428  }
429  
430  /*
431   * arm_pamax
432   * @cpu: ARMCPU
433   *
434   * Returns the implementation defined bit-width of physical addresses.
435   * The ARMv8 reference manuals refer to this as PAMax().
436   */
437  unsigned int arm_pamax(ARMCPU *cpu);
438  
439  /*
440   * round_down_to_parange_index
441   * @bit_size: uint8_t
442   *
443   * Rounds down the bit_size supplied to the first supported ARM physical
444   * address range and returns the index for this. The index is intended to
445   * be used to set ID_AA64MMFR0_EL1's PARANGE bits.
446   */
447  uint8_t round_down_to_parange_index(uint8_t bit_size);
448  
449  /*
450   * round_down_to_parange_bit_size
451   * @bit_size: uint8_t
452   *
453   * Rounds down the bit_size supplied to the first supported ARM physical
454   * address range bit size and returns this.
455   */
456  uint8_t round_down_to_parange_bit_size(uint8_t bit_size);
457  
458  /* Return true if extended addresses are enabled.
459   * This is always the case if our translation regime is 64 bit,
460   * but depends on TTBCR.EAE for 32 bit.
461   */
extended_addresses_enabled(CPUARMState * env)462  static inline bool extended_addresses_enabled(CPUARMState *env)
463  {
464      uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
465      if (arm_feature(env, ARM_FEATURE_PMSA) &&
466          arm_feature(env, ARM_FEATURE_V8)) {
467          return true;
468      }
469      return arm_el_is_aa64(env, 1) ||
470             (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE));
471  }
472  
473  /* Update a QEMU watchpoint based on the information the guest has set in the
474   * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
475   */
476  void hw_watchpoint_update(ARMCPU *cpu, int n);
477  /* Update the QEMU watchpoints for every guest watchpoint. This does a
478   * complete delete-and-reinstate of the QEMU watchpoint list and so is
479   * suitable for use after migration or on reset.
480   */
481  void hw_watchpoint_update_all(ARMCPU *cpu);
482  /* Update a QEMU breakpoint based on the information the guest has set in the
483   * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
484   */
485  void hw_breakpoint_update(ARMCPU *cpu, int n);
486  /* Update the QEMU breakpoints for every guest breakpoint. This does a
487   * complete delete-and-reinstate of the QEMU breakpoint list and so is
488   * suitable for use after migration or on reset.
489   */
490  void hw_breakpoint_update_all(ARMCPU *cpu);
491  
492  /* Callback function for checking if a breakpoint should trigger. */
493  bool arm_debug_check_breakpoint(CPUState *cs);
494  
495  /* Callback function for checking if a watchpoint should trigger. */
496  bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
497  
498  /* Adjust addresses (in BE32 mode) before testing against watchpoint
499   * addresses.
500   */
501  vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
502  
503  /* Callback function for when a watchpoint or breakpoint triggers. */
504  void arm_debug_excp_handler(CPUState *cs);
505  
506  #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
arm_is_psci_call(ARMCPU * cpu,int excp_type)507  static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
508  {
509      return false;
510  }
arm_handle_psci_call(ARMCPU * cpu)511  static inline void arm_handle_psci_call(ARMCPU *cpu)
512  {
513      g_assert_not_reached();
514  }
515  #else
516  /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
517  bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
518  /* Actually handle a PSCI call */
519  void arm_handle_psci_call(ARMCPU *cpu);
520  #endif
521  
522  /**
523   * arm_clear_exclusive: clear the exclusive monitor
524   * @env: CPU env
525   * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
526   */
arm_clear_exclusive(CPUARMState * env)527  static inline void arm_clear_exclusive(CPUARMState *env)
528  {
529      env->exclusive_addr = -1;
530  }
531  
532  /**
533   * ARMFaultType: type of an ARM MMU fault
534   * This corresponds to the v8A pseudocode's Fault enumeration,
535   * with extensions for QEMU internal conditions.
536   */
537  typedef enum ARMFaultType {
538      ARMFault_None,
539      ARMFault_AccessFlag,
540      ARMFault_Alignment,
541      ARMFault_Background,
542      ARMFault_Domain,
543      ARMFault_Permission,
544      ARMFault_Translation,
545      ARMFault_AddressSize,
546      ARMFault_SyncExternal,
547      ARMFault_SyncExternalOnWalk,
548      ARMFault_SyncParity,
549      ARMFault_SyncParityOnWalk,
550      ARMFault_AsyncParity,
551      ARMFault_AsyncExternal,
552      ARMFault_Debug,
553      ARMFault_TLBConflict,
554      ARMFault_UnsuppAtomicUpdate,
555      ARMFault_Lockdown,
556      ARMFault_Exclusive,
557      ARMFault_ICacheMaint,
558      ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
559      ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
560      ARMFault_GPCFOnWalk,
561      ARMFault_GPCFOnOutput,
562  } ARMFaultType;
563  
564  typedef enum ARMGPCF {
565      GPCF_None,
566      GPCF_AddressSize,
567      GPCF_Walk,
568      GPCF_EABT,
569      GPCF_Fail,
570  } ARMGPCF;
571  
572  /**
573   * ARMMMUFaultInfo: Information describing an ARM MMU Fault
574   * @type: Type of fault
575   * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
576   * @level: Table walk level (for translation, access flag and permission faults)
577   * @domain: Domain of the fault address (for non-LPAE CPUs only)
578   * @s2addr: Address that caused a fault at stage 2
579   * @paddr: physical address that caused a fault for gpc
580   * @paddr_space: physical address space that caused a fault for gpc
581   * @stage2: True if we faulted at stage 2
582   * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
583   * @s1ns: True if we faulted on a non-secure IPA while in secure state
584   * @ea: True if we should set the EA (external abort type) bit in syndrome
585   */
586  typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
587  struct ARMMMUFaultInfo {
588      ARMFaultType type;
589      ARMGPCF gpcf;
590      target_ulong s2addr;
591      target_ulong paddr;
592      ARMSecuritySpace paddr_space;
593      int level;
594      int domain;
595      bool stage2;
596      bool s1ptw;
597      bool s1ns;
598      bool ea;
599  };
600  
601  /**
602   * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
603   * Compare pseudocode EncodeSDFSC(), though unlike that function
604   * we set up a whole FSR-format code including domain field and
605   * putting the high bit of the FSC into bit 10.
606   */
arm_fi_to_sfsc(ARMMMUFaultInfo * fi)607  static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
608  {
609      uint32_t fsc;
610  
611      switch (fi->type) {
612      case ARMFault_None:
613          return 0;
614      case ARMFault_AccessFlag:
615          fsc = fi->level == 1 ? 0x3 : 0x6;
616          break;
617      case ARMFault_Alignment:
618          fsc = 0x1;
619          break;
620      case ARMFault_Permission:
621          fsc = fi->level == 1 ? 0xd : 0xf;
622          break;
623      case ARMFault_Domain:
624          fsc = fi->level == 1 ? 0x9 : 0xb;
625          break;
626      case ARMFault_Translation:
627          fsc = fi->level == 1 ? 0x5 : 0x7;
628          break;
629      case ARMFault_SyncExternal:
630          fsc = 0x8 | (fi->ea << 12);
631          break;
632      case ARMFault_SyncExternalOnWalk:
633          fsc = fi->level == 1 ? 0xc : 0xe;
634          fsc |= (fi->ea << 12);
635          break;
636      case ARMFault_SyncParity:
637          fsc = 0x409;
638          break;
639      case ARMFault_SyncParityOnWalk:
640          fsc = fi->level == 1 ? 0x40c : 0x40e;
641          break;
642      case ARMFault_AsyncParity:
643          fsc = 0x408;
644          break;
645      case ARMFault_AsyncExternal:
646          fsc = 0x406 | (fi->ea << 12);
647          break;
648      case ARMFault_Debug:
649          fsc = 0x2;
650          break;
651      case ARMFault_TLBConflict:
652          fsc = 0x400;
653          break;
654      case ARMFault_Lockdown:
655          fsc = 0x404;
656          break;
657      case ARMFault_Exclusive:
658          fsc = 0x405;
659          break;
660      case ARMFault_ICacheMaint:
661          fsc = 0x4;
662          break;
663      case ARMFault_Background:
664          fsc = 0x0;
665          break;
666      case ARMFault_QEMU_NSCExec:
667          fsc = M_FAKE_FSR_NSC_EXEC;
668          break;
669      case ARMFault_QEMU_SFault:
670          fsc = M_FAKE_FSR_SFAULT;
671          break;
672      default:
673          /* Other faults can't occur in a context that requires a
674           * short-format status code.
675           */
676          g_assert_not_reached();
677      }
678  
679      fsc |= (fi->domain << 4);
680      return fsc;
681  }
682  
683  /**
684   * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
685   * Compare pseudocode EncodeLDFSC(), though unlike that function
686   * we fill in also the LPAE bit 9 of a DFSR format.
687   */
arm_fi_to_lfsc(ARMMMUFaultInfo * fi)688  static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
689  {
690      uint32_t fsc;
691  
692      switch (fi->type) {
693      case ARMFault_None:
694          return 0;
695      case ARMFault_AddressSize:
696          assert(fi->level >= -1 && fi->level <= 3);
697          if (fi->level < 0) {
698              fsc = 0b101001;
699          } else {
700              fsc = fi->level;
701          }
702          break;
703      case ARMFault_AccessFlag:
704          assert(fi->level >= 0 && fi->level <= 3);
705          fsc = 0b001000 | fi->level;
706          break;
707      case ARMFault_Permission:
708          assert(fi->level >= 0 && fi->level <= 3);
709          fsc = 0b001100 | fi->level;
710          break;
711      case ARMFault_Translation:
712          assert(fi->level >= -1 && fi->level <= 3);
713          if (fi->level < 0) {
714              fsc = 0b101011;
715          } else {
716              fsc = 0b000100 | fi->level;
717          }
718          break;
719      case ARMFault_SyncExternal:
720          fsc = 0x10 | (fi->ea << 12);
721          break;
722      case ARMFault_SyncExternalOnWalk:
723          assert(fi->level >= -1 && fi->level <= 3);
724          if (fi->level < 0) {
725              fsc = 0b010011;
726          } else {
727              fsc = 0b010100 | fi->level;
728          }
729          fsc |= fi->ea << 12;
730          break;
731      case ARMFault_SyncParity:
732          fsc = 0x18;
733          break;
734      case ARMFault_SyncParityOnWalk:
735          assert(fi->level >= -1 && fi->level <= 3);
736          if (fi->level < 0) {
737              fsc = 0b011011;
738          } else {
739              fsc = 0b011100 | fi->level;
740          }
741          break;
742      case ARMFault_AsyncParity:
743          fsc = 0x19;
744          break;
745      case ARMFault_AsyncExternal:
746          fsc = 0x11 | (fi->ea << 12);
747          break;
748      case ARMFault_Alignment:
749          fsc = 0x21;
750          break;
751      case ARMFault_Debug:
752          fsc = 0x22;
753          break;
754      case ARMFault_TLBConflict:
755          fsc = 0x30;
756          break;
757      case ARMFault_UnsuppAtomicUpdate:
758          fsc = 0x31;
759          break;
760      case ARMFault_Lockdown:
761          fsc = 0x34;
762          break;
763      case ARMFault_Exclusive:
764          fsc = 0x35;
765          break;
766      case ARMFault_GPCFOnWalk:
767          assert(fi->level >= -1 && fi->level <= 3);
768          if (fi->level < 0) {
769              fsc = 0b100011;
770          } else {
771              fsc = 0b100100 | fi->level;
772          }
773          break;
774      case ARMFault_GPCFOnOutput:
775          fsc = 0b101000;
776          break;
777      default:
778          /* Other faults can't occur in a context that requires a
779           * long-format status code.
780           */
781          g_assert_not_reached();
782      }
783  
784      fsc |= 1 << 9;
785      return fsc;
786  }
787  
arm_extabort_type(MemTxResult result)788  static inline bool arm_extabort_type(MemTxResult result)
789  {
790      /* The EA bit in syndromes and fault status registers is an
791       * IMPDEF classification of external aborts. ARM implementations
792       * usually use this to indicate AXI bus Decode error (0) or
793       * Slave error (1); in QEMU we follow that.
794       */
795      return result != MEMTX_DECODE_ERROR;
796  }
797  
798  #ifdef CONFIG_USER_ONLY
799  void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr,
800                              MMUAccessType access_type,
801                              bool maperr, uintptr_t ra);
802  void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr,
803                             MMUAccessType access_type, uintptr_t ra);
804  #else
805  bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
806                              MMUAccessType access_type, int mmu_idx,
807                              MemOp memop, int size, bool probe, uintptr_t ra);
808  #endif
809  
arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)810  static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
811  {
812      return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
813  }
814  
core_to_arm_mmu_idx(CPUARMState * env,int mmu_idx)815  static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
816  {
817      if (arm_feature(env, ARM_FEATURE_M)) {
818          return mmu_idx | ARM_MMU_IDX_M;
819      } else {
820          return mmu_idx | ARM_MMU_IDX_A;
821      }
822  }
823  
core_to_aa64_mmu_idx(int mmu_idx)824  static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
825  {
826      /* AArch64 is always a-profile. */
827      return mmu_idx | ARM_MMU_IDX_A;
828  }
829  
830  int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
831  
832  /* Return the MMU index for a v7M CPU in the specified security state */
833  ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
834  
835  /*
836   * Return true if the stage 1 translation regime is using LPAE
837   * format page tables
838   */
839  bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
840  
841  /* Raise a data fault alignment exception for the specified virtual address */
842  G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
843                                              MMUAccessType access_type,
844                                              int mmu_idx, uintptr_t retaddr);
845  
846  #ifndef CONFIG_USER_ONLY
847  /* arm_cpu_do_transaction_failed: handle a memory system error response
848   * (eg "no device/memory present at address") by raising an external abort
849   * exception
850   */
851  void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
852                                     vaddr addr, unsigned size,
853                                     MMUAccessType access_type,
854                                     int mmu_idx, MemTxAttrs attrs,
855                                     MemTxResult response, uintptr_t retaddr);
856  #endif
857  
858  /* Call any registered EL change hooks */
arm_call_pre_el_change_hook(ARMCPU * cpu)859  static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
860  {
861      ARMELChangeHook *hook, *next;
862      QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
863          hook->hook(cpu, hook->opaque);
864      }
865  }
arm_call_el_change_hook(ARMCPU * cpu)866  static inline void arm_call_el_change_hook(ARMCPU *cpu)
867  {
868      ARMELChangeHook *hook, *next;
869      QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
870          hook->hook(cpu, hook->opaque);
871      }
872  }
873  
874  /*
875   * Return true if this address translation regime has two ranges.
876   * Note that this will not return the correct answer for AArch32
877   * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is
878   * never called from a context where EL3 can be AArch32. (The
879   * correct return value for ARMMMUIdx_E3 would be different for
880   * that case, so we can't just make the function return the
881   * correct value anyway; we would need an extra "bool e3_is_aarch32"
882   * argument which all the current callsites would pass as 'false'.)
883   */
regime_has_2_ranges(ARMMMUIdx mmu_idx)884  static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
885  {
886      switch (mmu_idx) {
887      case ARMMMUIdx_Stage1_E0:
888      case ARMMMUIdx_Stage1_E1:
889      case ARMMMUIdx_Stage1_E1_PAN:
890      case ARMMMUIdx_E10_0:
891      case ARMMMUIdx_E10_1:
892      case ARMMMUIdx_E10_1_PAN:
893      case ARMMMUIdx_E20_0:
894      case ARMMMUIdx_E20_2:
895      case ARMMMUIdx_E20_2_PAN:
896          return true;
897      default:
898          return false;
899      }
900  }
901  
regime_is_pan(CPUARMState * env,ARMMMUIdx mmu_idx)902  static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
903  {
904      switch (mmu_idx) {
905      case ARMMMUIdx_Stage1_E1_PAN:
906      case ARMMMUIdx_E10_1_PAN:
907      case ARMMMUIdx_E20_2_PAN:
908      case ARMMMUIdx_E30_3_PAN:
909          return true;
910      default:
911          return false;
912      }
913  }
914  
regime_is_stage2(ARMMMUIdx mmu_idx)915  static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
916  {
917      return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
918  }
919  
920  /* Return the exception level which controls this address translation regime */
regime_el(CPUARMState * env,ARMMMUIdx mmu_idx)921  static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
922  {
923      switch (mmu_idx) {
924      case ARMMMUIdx_E20_0:
925      case ARMMMUIdx_E20_2:
926      case ARMMMUIdx_E20_2_PAN:
927      case ARMMMUIdx_Stage2:
928      case ARMMMUIdx_Stage2_S:
929      case ARMMMUIdx_E2:
930          return 2;
931      case ARMMMUIdx_E3:
932      case ARMMMUIdx_E30_0:
933      case ARMMMUIdx_E30_3_PAN:
934          return 3;
935      case ARMMMUIdx_E10_0:
936      case ARMMMUIdx_Stage1_E0:
937      case ARMMMUIdx_Stage1_E1:
938      case ARMMMUIdx_Stage1_E1_PAN:
939      case ARMMMUIdx_E10_1:
940      case ARMMMUIdx_E10_1_PAN:
941      case ARMMMUIdx_MPrivNegPri:
942      case ARMMMUIdx_MUserNegPri:
943      case ARMMMUIdx_MPriv:
944      case ARMMMUIdx_MUser:
945      case ARMMMUIdx_MSPrivNegPri:
946      case ARMMMUIdx_MSUserNegPri:
947      case ARMMMUIdx_MSPriv:
948      case ARMMMUIdx_MSUser:
949          return 1;
950      default:
951          g_assert_not_reached();
952      }
953  }
954  
regime_is_user(CPUARMState * env,ARMMMUIdx mmu_idx)955  static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
956  {
957      switch (mmu_idx) {
958      case ARMMMUIdx_E10_0:
959      case ARMMMUIdx_E20_0:
960      case ARMMMUIdx_E30_0:
961      case ARMMMUIdx_Stage1_E0:
962      case ARMMMUIdx_MUser:
963      case ARMMMUIdx_MSUser:
964      case ARMMMUIdx_MUserNegPri:
965      case ARMMMUIdx_MSUserNegPri:
966          return true;
967      default:
968          return false;
969      }
970  }
971  
972  /* Return the SCTLR value which controls this address translation regime */
regime_sctlr(CPUARMState * env,ARMMMUIdx mmu_idx)973  static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
974  {
975      return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
976  }
977  
978  /*
979   * These are the fields in VTCR_EL2 which affect both the Secure stage 2
980   * and the Non-Secure stage 2 translation regimes (and hence which are
981   * not present in VSTCR_EL2).
982   */
983  #define VTCR_SHARED_FIELD_MASK \
984      (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \
985       R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \
986       R_VTCR_DS_MASK)
987  
988  /* Return the value of the TCR controlling this translation regime */
regime_tcr(CPUARMState * env,ARMMMUIdx mmu_idx)989  static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
990  {
991      if (mmu_idx == ARMMMUIdx_Stage2) {
992          return env->cp15.vtcr_el2;
993      }
994      if (mmu_idx == ARMMMUIdx_Stage2_S) {
995          /*
996           * Secure stage 2 shares fields from VTCR_EL2. We merge those
997           * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format
998           * value so the callers don't need to special case this.
999           *
1000           * If a future architecture change defines bits in VSTCR_EL2 that
1001           * overlap with these VTCR_EL2 fields we may need to revisit this.
1002           */
1003          uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK;
1004          v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK;
1005          return v;
1006      }
1007      return env->cp15.tcr_el[regime_el(env, mmu_idx)];
1008  }
1009  
1010  /* Return true if the translation regime is using LPAE format page tables */
regime_using_lpae_format(CPUARMState * env,ARMMMUIdx mmu_idx)1011  static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
1012  {
1013      int el = regime_el(env, mmu_idx);
1014      if (el == 2 || arm_el_is_aa64(env, el)) {
1015          return true;
1016      }
1017      if (arm_feature(env, ARM_FEATURE_PMSA) &&
1018          arm_feature(env, ARM_FEATURE_V8)) {
1019          return true;
1020      }
1021      if (arm_feature(env, ARM_FEATURE_LPAE)
1022          && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
1023          return true;
1024      }
1025      return false;
1026  }
1027  
1028  /**
1029   * arm_num_brps: Return number of implemented breakpoints.
1030   * Note that the ID register BRPS field is "number of bps - 1",
1031   * and we return the actual number of breakpoints.
1032   */
arm_num_brps(ARMCPU * cpu)1033  static inline int arm_num_brps(ARMCPU *cpu)
1034  {
1035      if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1036          return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
1037      } else {
1038          return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
1039      }
1040  }
1041  
1042  /**
1043   * arm_num_wrps: Return number of implemented watchpoints.
1044   * Note that the ID register WRPS field is "number of wps - 1",
1045   * and we return the actual number of watchpoints.
1046   */
arm_num_wrps(ARMCPU * cpu)1047  static inline int arm_num_wrps(ARMCPU *cpu)
1048  {
1049      if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1050          return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
1051      } else {
1052          return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
1053      }
1054  }
1055  
1056  /**
1057   * arm_num_ctx_cmps: Return number of implemented context comparators.
1058   * Note that the ID register CTX_CMPS field is "number of cmps - 1",
1059   * and we return the actual number of comparators.
1060   */
arm_num_ctx_cmps(ARMCPU * cpu)1061  static inline int arm_num_ctx_cmps(ARMCPU *cpu)
1062  {
1063      if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1064          return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
1065      } else {
1066          return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
1067      }
1068  }
1069  
1070  /**
1071   * v7m_using_psp: Return true if using process stack pointer
1072   * Return true if the CPU is currently using the process stack
1073   * pointer, or false if it is using the main stack pointer.
1074   */
v7m_using_psp(CPUARMState * env)1075  static inline bool v7m_using_psp(CPUARMState *env)
1076  {
1077      /* Handler mode always uses the main stack; for thread mode
1078       * the CONTROL.SPSEL bit determines the answer.
1079       * Note that in v7M it is not possible to be in Handler mode with
1080       * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
1081       */
1082      return !arm_v7m_is_handler_mode(env) &&
1083          env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
1084  }
1085  
1086  /**
1087   * v7m_sp_limit: Return SP limit for current CPU state
1088   * Return the SP limit value for the current CPU security state
1089   * and stack pointer.
1090   */
v7m_sp_limit(CPUARMState * env)1091  static inline uint32_t v7m_sp_limit(CPUARMState *env)
1092  {
1093      if (v7m_using_psp(env)) {
1094          return env->v7m.psplim[env->v7m.secure];
1095      } else {
1096          return env->v7m.msplim[env->v7m.secure];
1097      }
1098  }
1099  
1100  /**
1101   * v7m_cpacr_pass:
1102   * Return true if the v7M CPACR permits access to the FPU for the specified
1103   * security state and privilege level.
1104   */
v7m_cpacr_pass(CPUARMState * env,bool is_secure,bool is_priv)1105  static inline bool v7m_cpacr_pass(CPUARMState *env,
1106                                    bool is_secure, bool is_priv)
1107  {
1108      switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
1109      case 0:
1110      case 2: /* UNPREDICTABLE: we treat like 0 */
1111          return false;
1112      case 1:
1113          return is_priv;
1114      case 3:
1115          return true;
1116      default:
1117          g_assert_not_reached();
1118      }
1119  }
1120  
1121  /**
1122   * aarch32_mode_name(): Return name of the AArch32 CPU mode
1123   * @psr: Program Status Register indicating CPU mode
1124   *
1125   * Returns, for debug logging purposes, a printable representation
1126   * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
1127   * the low bits of the specified PSR.
1128   */
aarch32_mode_name(uint32_t psr)1129  static inline const char *aarch32_mode_name(uint32_t psr)
1130  {
1131      static const char cpu_mode_names[16][4] = {
1132          "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
1133          "???", "???", "hyp", "und", "???", "???", "???", "sys"
1134      };
1135  
1136      return cpu_mode_names[psr & 0xf];
1137  }
1138  
1139  /**
1140   * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
1141   *
1142   * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
1143   * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
1144   * Must be called with the BQL held.
1145   */
1146  void arm_cpu_update_virq(ARMCPU *cpu);
1147  
1148  /**
1149   * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
1150   *
1151   * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
1152   * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
1153   * Must be called with the BQL held.
1154   */
1155  void arm_cpu_update_vfiq(ARMCPU *cpu);
1156  
1157  /**
1158   * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request
1159   *
1160   * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following
1161   * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI.
1162   * Must be called with the BQL held.
1163   */
1164  void arm_cpu_update_vinmi(ARMCPU *cpu);
1165  
1166  /**
1167   * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request
1168   *
1169   * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following
1170   * a change to the HCRX_EL2.VFNMI.
1171   * Must be called with the BQL held.
1172   */
1173  void arm_cpu_update_vfnmi(ARMCPU *cpu);
1174  
1175  /**
1176   * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
1177   *
1178   * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
1179   * following a change to the HCR_EL2.VSE bit.
1180   */
1181  void arm_cpu_update_vserr(ARMCPU *cpu);
1182  
1183  /**
1184   * arm_mmu_idx_el:
1185   * @env: The cpu environment
1186   * @el: The EL to use.
1187   *
1188   * Return the full ARMMMUIdx for the translation regime for EL.
1189   */
1190  ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
1191  
1192  /**
1193   * arm_mmu_idx:
1194   * @env: The cpu environment
1195   *
1196   * Return the full ARMMMUIdx for the current translation regime.
1197   */
1198  ARMMMUIdx arm_mmu_idx(CPUARMState *env);
1199  
1200  /**
1201   * arm_stage1_mmu_idx:
1202   * @env: The cpu environment
1203   *
1204   * Return the ARMMMUIdx for the stage1 traversal for the current regime.
1205   */
1206  #ifdef CONFIG_USER_ONLY
stage_1_mmu_idx(ARMMMUIdx mmu_idx)1207  static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1208  {
1209      return ARMMMUIdx_Stage1_E0;
1210  }
arm_stage1_mmu_idx(CPUARMState * env)1211  static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1212  {
1213      return ARMMMUIdx_Stage1_E0;
1214  }
1215  #else
1216  ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx);
1217  ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
1218  #endif
1219  
1220  /**
1221   * arm_mmu_idx_is_stage1_of_2:
1222   * @mmu_idx: The ARMMMUIdx to test
1223   *
1224   * Return true if @mmu_idx is a NOTLB mmu_idx that is the
1225   * first stage of a two stage regime.
1226   */
arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)1227  static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
1228  {
1229      switch (mmu_idx) {
1230      case ARMMMUIdx_Stage1_E0:
1231      case ARMMMUIdx_Stage1_E1:
1232      case ARMMMUIdx_Stage1_E1_PAN:
1233          return true;
1234      default:
1235          return false;
1236      }
1237  }
1238  
aarch32_cpsr_valid_mask(uint64_t features,const ARMISARegisters * id)1239  static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
1240                                                 const ARMISARegisters *id)
1241  {
1242      uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
1243  
1244      if ((features >> ARM_FEATURE_V4T) & 1) {
1245          valid |= CPSR_T;
1246      }
1247      if ((features >> ARM_FEATURE_V5) & 1) {
1248          valid |= CPSR_Q; /* V5TE in reality*/
1249      }
1250      if ((features >> ARM_FEATURE_V6) & 1) {
1251          valid |= CPSR_E | CPSR_GE;
1252      }
1253      if ((features >> ARM_FEATURE_THUMB2) & 1) {
1254          valid |= CPSR_IT;
1255      }
1256      if (isar_feature_aa32_jazelle(id)) {
1257          valid |= CPSR_J;
1258      }
1259      if (isar_feature_aa32_pan(id)) {
1260          valid |= CPSR_PAN;
1261      }
1262      if (isar_feature_aa32_dit(id)) {
1263          valid |= CPSR_DIT;
1264      }
1265      if (isar_feature_aa32_ssbs(id)) {
1266          valid |= CPSR_SSBS;
1267      }
1268  
1269      return valid;
1270  }
1271  
aarch64_pstate_valid_mask(const ARMISARegisters * id)1272  static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
1273  {
1274      uint32_t valid;
1275  
1276      valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
1277      if (isar_feature_aa64_bti(id)) {
1278          valid |= PSTATE_BTYPE;
1279      }
1280      if (isar_feature_aa64_pan(id)) {
1281          valid |= PSTATE_PAN;
1282      }
1283      if (isar_feature_aa64_uao(id)) {
1284          valid |= PSTATE_UAO;
1285      }
1286      if (isar_feature_aa64_dit(id)) {
1287          valid |= PSTATE_DIT;
1288      }
1289      if (isar_feature_aa64_ssbs(id)) {
1290          valid |= PSTATE_SSBS;
1291      }
1292      if (isar_feature_aa64_mte(id)) {
1293          valid |= PSTATE_TCO;
1294      }
1295      if (isar_feature_aa64_nmi(id)) {
1296          valid |= PSTATE_ALLINT;
1297      }
1298  
1299      return valid;
1300  }
1301  
1302  /* Granule size (i.e. page size) */
1303  typedef enum ARMGranuleSize {
1304      /* Same order as TG0 encoding */
1305      Gran4K,
1306      Gran64K,
1307      Gran16K,
1308      GranInvalid,
1309  } ARMGranuleSize;
1310  
1311  /**
1312   * arm_granule_bits: Return address size of the granule in bits
1313   *
1314   * Return the address size of the granule in bits. This corresponds
1315   * to the pseudocode TGxGranuleBits().
1316   */
arm_granule_bits(ARMGranuleSize gran)1317  static inline int arm_granule_bits(ARMGranuleSize gran)
1318  {
1319      switch (gran) {
1320      case Gran64K:
1321          return 16;
1322      case Gran16K:
1323          return 14;
1324      case Gran4K:
1325          return 12;
1326      default:
1327          g_assert_not_reached();
1328      }
1329  }
1330  
1331  /*
1332   * Parameters of a given virtual address, as extracted from the
1333   * translation control register (TCR) for a given regime.
1334   */
1335  typedef struct ARMVAParameters {
1336      unsigned tsz    : 8;
1337      unsigned ps     : 3;
1338      unsigned sh     : 2;
1339      unsigned select : 1;
1340      bool tbi        : 1;
1341      bool epd        : 1;
1342      bool hpd        : 1;
1343      bool tsz_oob    : 1;  /* tsz has been clamped to legal range */
1344      bool ds         : 1;
1345      bool ha         : 1;
1346      bool hd         : 1;
1347      ARMGranuleSize gran : 2;
1348  } ARMVAParameters;
1349  
1350  /**
1351   * aa64_va_parameters: Return parameters for an AArch64 virtual address
1352   * @env: CPU
1353   * @va: virtual address to look up
1354   * @mmu_idx: determines translation regime to use
1355   * @data: true if this is a data access
1356   * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32
1357   *  (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob)
1358   */
1359  ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
1360                                     ARMMMUIdx mmu_idx, bool data,
1361                                     bool el1_is_aa32);
1362  
1363  int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
1364  int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx);
1365  int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx);
1366  
1367  /* Determine if allocation tags are available.  */
allocation_tag_access_enabled(CPUARMState * env,int el,uint64_t sctlr)1368  static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
1369                                                   uint64_t sctlr)
1370  {
1371      if (el < 3
1372          && arm_feature(env, ARM_FEATURE_EL3)
1373          && !(env->cp15.scr_el3 & SCR_ATA)) {
1374          return false;
1375      }
1376      if (el < 2 && arm_is_el2_enabled(env)) {
1377          uint64_t hcr = arm_hcr_el2_eff(env);
1378          if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
1379              return false;
1380          }
1381      }
1382      sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
1383      return sctlr != 0;
1384  }
1385  
1386  #ifndef CONFIG_USER_ONLY
1387  
1388  /* Security attributes for an address, as returned by v8m_security_lookup. */
1389  typedef struct V8M_SAttributes {
1390      bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
1391      bool ns;
1392      bool nsc;
1393      uint8_t sregion;
1394      bool srvalid;
1395      uint8_t iregion;
1396      bool irvalid;
1397  } V8M_SAttributes;
1398  
1399  void v8m_security_lookup(CPUARMState *env, uint32_t address,
1400                           MMUAccessType access_type, ARMMMUIdx mmu_idx,
1401                           bool secure, V8M_SAttributes *sattrs);
1402  
1403  /* Cacheability and shareability attributes for a memory access */
1404  typedef struct ARMCacheAttrs {
1405      /*
1406       * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
1407       * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
1408       */
1409      unsigned int attrs:8;
1410      unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
1411      bool is_s2_format:1;
1412  } ARMCacheAttrs;
1413  
1414  /* Fields that are valid upon success. */
1415  typedef struct GetPhysAddrResult {
1416      CPUTLBEntryFull f;
1417      ARMCacheAttrs cacheattrs;
1418  } GetPhysAddrResult;
1419  
1420  /**
1421   * get_phys_addr: get the physical address for a virtual address
1422   * @env: CPUARMState
1423   * @address: virtual address to get physical address for
1424   * @access_type: 0 for read, 1 for write, 2 for execute
1425   * @memop: memory operation feeding this access, or 0 for none
1426   * @mmu_idx: MMU index indicating required translation regime
1427   * @result: set on translation success.
1428   * @fi: set to fault info if the translation fails
1429   *
1430   * Find the physical address corresponding to the given virtual address,
1431   * by doing a translation table walk on MMU based systems or using the
1432   * MPU state on MPU based systems.
1433   *
1434   * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
1435   * prot and page_size may not be filled in, and the populated fsr value provides
1436   * information on why the translation aborted, in the format of a
1437   * DFSR/IFSR fault register, with the following caveats:
1438   *  * we honour the short vs long DFSR format differences.
1439   *  * the WnR bit is never set (the caller must do this).
1440   *  * for PSMAv5 based systems we don't bother to return a full FSR format
1441   *    value.
1442   */
1443  bool get_phys_addr(CPUARMState *env, vaddr address,
1444                     MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
1445                     GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1446      __attribute__((nonnull));
1447  
1448  /**
1449   * get_phys_addr_with_space_nogpc: get the physical address for a virtual
1450   *                                 address
1451   * @env: CPUARMState
1452   * @address: virtual address to get physical address for
1453   * @access_type: 0 for read, 1 for write, 2 for execute
1454   * @memop: memory operation feeding this access, or 0 for none
1455   * @mmu_idx: MMU index indicating required translation regime
1456   * @space: security space for the access
1457   * @result: set on translation success.
1458   * @fi: set to fault info if the translation fails
1459   *
1460   * Similar to get_phys_addr, but use the given security space and don't perform
1461   * a Granule Protection Check on the resulting address.
1462   */
1463  bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
1464                                      MMUAccessType access_type, MemOp memop,
1465                                      ARMMMUIdx mmu_idx, ARMSecuritySpace space,
1466                                      GetPhysAddrResult *result,
1467                                      ARMMMUFaultInfo *fi)
1468      __attribute__((nonnull));
1469  
1470  bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1471                         MMUAccessType access_type, ARMMMUIdx mmu_idx,
1472                         bool is_secure, GetPhysAddrResult *result,
1473                         ARMMMUFaultInfo *fi, uint32_t *mregion);
1474  
1475  void arm_log_exception(CPUState *cs);
1476  
1477  #endif /* !CONFIG_USER_ONLY */
1478  
1479  /*
1480   * SVE predicates are 1/8 the size of SVE vectors, and cannot use
1481   * the same simd_desc() encoding due to restrictions on size.
1482   * Use these instead.
1483   */
1484  FIELD(PREDDESC, OPRSZ, 0, 6)
1485  FIELD(PREDDESC, ESZ, 6, 2)
1486  FIELD(PREDDESC, DATA, 8, 24)
1487  
1488  /*
1489   * The SVE simd_data field, for memory ops, contains either
1490   * rd (5 bits) or a shift count (2 bits).
1491   */
1492  #define SVE_MTEDESC_SHIFT 5
1493  
1494  /* Bits within a descriptor passed to the helper_mte_check* functions. */
1495  FIELD(MTEDESC, MIDX,  0, 4)
1496  FIELD(MTEDESC, TBI,   4, 2)
1497  FIELD(MTEDESC, TCMA,  6, 2)
1498  FIELD(MTEDESC, WRITE, 8, 1)
1499  FIELD(MTEDESC, ALIGN, 9, 3)
1500  FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12)  /* size - 1 */
1501  
1502  bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
1503  uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
1504  
1505  /**
1506   * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation
1507   * @env: CPU env
1508   * @ptr: start address of memory region (dirty pointer)
1509   * @size: length of region (guaranteed not to cross a page boundary)
1510   * @desc: MTEDESC descriptor word (0 means no MTE checks)
1511   * Returns: the size of the region that can be copied without hitting
1512   *          an MTE tag failure
1513   *
1514   * Note that we assume that the caller has already checked the TBI
1515   * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1516   * required.
1517   */
1518  uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
1519                          uint32_t desc);
1520  
1521  /**
1522   * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS
1523   *                     operation going in the reverse direction
1524   * @env: CPU env
1525   * @ptr: *end* address of memory region (dirty pointer)
1526   * @size: length of region (guaranteed not to cross a page boundary)
1527   * @desc: MTEDESC descriptor word (0 means no MTE checks)
1528   * Returns: the size of the region that can be copied without hitting
1529   *          an MTE tag failure
1530   *
1531   * Note that we assume that the caller has already checked the TBI
1532   * and TCMA bits with mte_checks_needed() and an MTE check is definitely
1533   * required.
1534   */
1535  uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
1536                              uint32_t desc);
1537  
1538  /**
1539   * mte_check_fail: Record an MTE tag check failure
1540   * @env: CPU env
1541   * @desc: MTEDESC descriptor word
1542   * @dirty_ptr: Failing dirty address
1543   * @ra: TCG retaddr
1544   *
1545   * This may never return (if the MTE tag checks are configured to fault).
1546   */
1547  void mte_check_fail(CPUARMState *env, uint32_t desc,
1548                      uint64_t dirty_ptr, uintptr_t ra);
1549  
1550  /**
1551   * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
1552   * @env: CPU env
1553   * @dirty_ptr: Start address of memory region (dirty pointer)
1554   * @size: length of region (guaranteed not to cross page boundary)
1555   * @desc: MTEDESC descriptor word
1556   */
1557  void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
1558                         uint32_t desc);
1559  
allocation_tag_from_addr(uint64_t ptr)1560  static inline int allocation_tag_from_addr(uint64_t ptr)
1561  {
1562      return extract64(ptr, 56, 4);
1563  }
1564  
address_with_allocation_tag(uint64_t ptr,int rtag)1565  static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
1566  {
1567      return deposit64(ptr, 56, 4, rtag);
1568  }
1569  
1570  /* Return true if tbi bits mean that the access is checked.  */
tbi_check(uint32_t desc,int bit55)1571  static inline bool tbi_check(uint32_t desc, int bit55)
1572  {
1573      return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
1574  }
1575  
1576  /* Return true if tcma bits mean that the access is unchecked.  */
tcma_check(uint32_t desc,int bit55,int ptr_tag)1577  static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
1578  {
1579      /*
1580       * We had extracted bit55 and ptr_tag for other reasons, so fold
1581       * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
1582       */
1583      bool match = ((ptr_tag + bit55) & 0xf) == 0;
1584      bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
1585      return tcma && match;
1586  }
1587  
1588  /*
1589   * For TBI, ideally, we would do nothing.  Proper behaviour on fault is
1590   * for the tag to be present in the FAR_ELx register.  But for user-only
1591   * mode, we do not have a TLB with which to implement this, so we must
1592   * remove the top byte.
1593   */
useronly_clean_ptr(uint64_t ptr)1594  static inline uint64_t useronly_clean_ptr(uint64_t ptr)
1595  {
1596  #ifdef CONFIG_USER_ONLY
1597      /* TBI0 is known to be enabled, while TBI1 is disabled. */
1598      ptr &= sextract64(ptr, 0, 56);
1599  #endif
1600      return ptr;
1601  }
1602  
useronly_maybe_clean_ptr(uint32_t desc,uint64_t ptr)1603  static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
1604  {
1605  #ifdef CONFIG_USER_ONLY
1606      int64_t clean_ptr = sextract64(ptr, 0, 56);
1607      if (tbi_check(desc, clean_ptr < 0)) {
1608          ptr = clean_ptr;
1609      }
1610  #endif
1611      return ptr;
1612  }
1613  
1614  /* Values for M-profile PSR.ECI for MVE insns */
1615  enum MVEECIState {
1616      ECI_NONE = 0, /* No completed beats */
1617      ECI_A0 = 1, /* Completed: A0 */
1618      ECI_A0A1 = 2, /* Completed: A0, A1 */
1619      /* 3 is reserved */
1620      ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
1621      ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
1622      /* All other values reserved */
1623  };
1624  
1625  /* Definitions for the PMU registers */
1626  #define PMCRN_MASK  0xf800
1627  #define PMCRN_SHIFT 11
1628  #define PMCRLP  0x80
1629  #define PMCRLC  0x40
1630  #define PMCRDP  0x20
1631  #define PMCRX   0x10
1632  #define PMCRD   0x8
1633  #define PMCRC   0x4
1634  #define PMCRP   0x2
1635  #define PMCRE   0x1
1636  /*
1637   * Mask of PMCR bits writable by guest (not including WO bits like C, P,
1638   * which can be written as 1 to trigger behaviour but which stay RAZ).
1639   */
1640  #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1641  
1642  #define PMXEVTYPER_P          0x80000000
1643  #define PMXEVTYPER_U          0x40000000
1644  #define PMXEVTYPER_NSK        0x20000000
1645  #define PMXEVTYPER_NSU        0x10000000
1646  #define PMXEVTYPER_NSH        0x08000000
1647  #define PMXEVTYPER_M          0x04000000
1648  #define PMXEVTYPER_MT         0x02000000
1649  #define PMXEVTYPER_EVTCOUNT   0x0000ffff
1650  #define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1651                                 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1652                                 PMXEVTYPER_M | PMXEVTYPER_MT | \
1653                                 PMXEVTYPER_EVTCOUNT)
1654  
1655  #define PMCCFILTR             0xf8000000
1656  #define PMCCFILTR_M           PMXEVTYPER_M
1657  #define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1658  
pmu_num_counters(CPUARMState * env)1659  static inline uint32_t pmu_num_counters(CPUARMState *env)
1660  {
1661      ARMCPU *cpu = env_archcpu(env);
1662  
1663      return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
1664  }
1665  
1666  /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
pmu_counter_mask(CPUARMState * env)1667  static inline uint64_t pmu_counter_mask(CPUARMState *env)
1668  {
1669    return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
1670  }
1671  
1672  #ifdef TARGET_AARCH64
1673  GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
1674  int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
1675  int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
1676  int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg);
1677  int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg);
1678  int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg);
1679  int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg);
1680  int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg);
1681  int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg);
1682  void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
1683  void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
1684  void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
1685  void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
1686  void aarch64_max_tcg_initfn(Object *obj);
1687  void aarch64_add_pauth_properties(Object *obj);
1688  void aarch64_add_sve_properties(Object *obj);
1689  void aarch64_add_sme_properties(Object *obj);
1690  #endif
1691  
1692  /* Read the CONTROL register as the MRS instruction would. */
1693  uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
1694  
1695  /*
1696   * Return a pointer to the location where we currently store the
1697   * stack pointer for the requested security state and thread mode.
1698   * This pointer will become invalid if the CPU state is updated
1699   * such that the stack pointers are switched around (eg changing
1700   * the SPSEL control bit).
1701   */
1702  uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure,
1703                               bool threadmode, bool spsel);
1704  
1705  bool el_is_in_host(CPUARMState *env, int el);
1706  
1707  void aa32_max_features(ARMCPU *cpu);
1708  int exception_target_el(CPUARMState *env);
1709  bool arm_singlestep_active(CPUARMState *env);
1710  bool arm_generate_debug_exceptions(CPUARMState *env);
1711  
1712  /**
1713   * pauth_ptr_mask:
1714   * @param: parameters defining the MMU setup
1715   *
1716   * Return a mask of the address bits that contain the authentication code,
1717   * given the MMU config defined by @param.
1718   */
pauth_ptr_mask(ARMVAParameters param)1719  static inline uint64_t pauth_ptr_mask(ARMVAParameters param)
1720  {
1721      int bot_pac_bit = 64 - param.tsz;
1722      int top_pac_bit = 64 - 8 * param.tbi;
1723  
1724      return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit);
1725  }
1726  
1727  /* Add the cpreg definitions for debug related system registers */
1728  void define_debug_regs(ARMCPU *cpu);
1729  
1730  /* Effective value of MDCR_EL2 */
arm_mdcr_el2_eff(CPUARMState * env)1731  static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
1732  {
1733      return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0;
1734  }
1735  
1736  /* Powers of 2 for sve_vq_map et al. */
1737  #define SVE_VQ_POW2_MAP                                 \
1738      ((1 << (1 - 1)) | (1 << (2 - 1)) |                  \
1739       (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1)))
1740  
1741  /*
1742   * Return true if it is possible to take a fine-grained-trap to EL2.
1743   */
arm_fgt_active(CPUARMState * env,int el)1744  static inline bool arm_fgt_active(CPUARMState *env, int el)
1745  {
1746      /*
1747       * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps
1748       * that can affect EL0, but it is harmless to do the test also for
1749       * traps on registers that are only accessible at EL1 because if the test
1750       * returns true then we can't be executing at EL1 anyway.
1751       * FGT traps only happen when EL2 is enabled and EL1 is AArch64;
1752       * traps from AArch32 only happen for the EL0 is AArch32 case.
1753       */
1754      return cpu_isar_feature(aa64_fgt, env_archcpu(env)) &&
1755          el < 2 && arm_is_el2_enabled(env) &&
1756          arm_el_is_aa64(env, 1) &&
1757          (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) &&
1758          (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
1759  }
1760  
1761  void assert_hflags_rebuild_correctly(CPUARMState *env);
1762  
1763  /*
1764   * Although the ARM implementation of hardware assisted debugging
1765   * allows for different breakpoints per-core, the current GDB
1766   * interface treats them as a global pool of registers (which seems to
1767   * be the case for x86, ppc and s390). As a result we store one copy
1768   * of registers which is used for all active cores.
1769   *
1770   * Write access is serialised by virtue of the GDB protocol which
1771   * updates things. Read access (i.e. when the values are copied to the
1772   * vCPU) is also gated by GDB's run control.
1773   *
1774   * This is not unreasonable as most of the time debugging kernels you
1775   * never know which core will eventually execute your function.
1776   */
1777  
1778  typedef struct {
1779      uint64_t bcr;
1780      uint64_t bvr;
1781  } HWBreakpoint;
1782  
1783  /*
1784   * The watchpoint registers can cover more area than the requested
1785   * watchpoint so we need to store the additional information
1786   * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
1787   * when the watchpoint is hit.
1788   */
1789  typedef struct {
1790      uint64_t wcr;
1791      uint64_t wvr;
1792      CPUWatchpoint details;
1793  } HWWatchpoint;
1794  
1795  /* Maximum and current break/watch point counts */
1796  extern int max_hw_bps, max_hw_wps;
1797  extern GArray *hw_breakpoints, *hw_watchpoints;
1798  
1799  #define cur_hw_wps      (hw_watchpoints->len)
1800  #define cur_hw_bps      (hw_breakpoints->len)
1801  #define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
1802  #define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
1803  
1804  bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
1805  int insert_hw_breakpoint(target_ulong pc);
1806  int delete_hw_breakpoint(target_ulong pc);
1807  
1808  bool check_watchpoint_in_range(int i, target_ulong addr);
1809  CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
1810  int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1811  int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
1812  
1813  /* Return the current value of the system counter in ticks */
1814  uint64_t gt_get_countervalue(CPUARMState *env);
1815  /*
1816   * Return the currently applicable offset between the system counter
1817   * and the counter for the specified timer, as used for direct register
1818   * accesses.
1819   */
1820  uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx);
1821  #endif
1822