xref: /openbmc/qemu/target/arm/ptw.c (revision 2f3e5e4c08c43daeec144adeeae9138176039b60)
1  /*
2   * ARM page table walking.
3   *
4   * This code is licensed under the GNU GPL v2 or later.
5   *
6   * SPDX-License-Identifier: GPL-2.0-or-later
7   */
8  
9  #include "qemu/osdep.h"
10  #include "qemu/log.h"
11  #include "qemu/range.h"
12  #include "qemu/main-loop.h"
13  #include "exec/exec-all.h"
14  #include "cpu.h"
15  #include "internals.h"
16  #include "cpu-features.h"
17  #include "idau.h"
18  #ifdef CONFIG_TCG
19  # include "tcg/oversized-guest.h"
20  #endif
21  
22  typedef struct S1Translate {
23      /*
24       * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
25       * Together with in_space, specifies the architectural translation regime.
26       */
27      ARMMMUIdx in_mmu_idx;
28      /*
29       * in_ptw_idx: specifies which mmuidx to use for the actual
30       * page table descriptor load operations. This will be one of the
31       * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
32       * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
33       * this field is updated accordingly.
34       */
35      ARMMMUIdx in_ptw_idx;
36      /*
37       * in_space: the security space for this walk. This plus
38       * the in_mmu_idx specify the architectural translation regime.
39       * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
40       * this field is updated accordingly.
41       *
42       * Note that the security space for the in_ptw_idx may be different
43       * from that for the in_mmu_idx. We do not need to explicitly track
44       * the in_ptw_idx security space because:
45       *  - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
46       *    itself specifies the security space
47       *  - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
48       *    space used for ptw reads is the same as that of the security
49       *    space of the stage 1 translation for all cases except where
50       *    stage 1 is Secure; in that case the only possibilities for
51       *    the ptw read are Secure and NonSecure, and the in_ptw_idx
52       *    value being Stage2 vs Stage2_S distinguishes those.
53       */
54      ARMSecuritySpace in_space;
55      /*
56       * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
57       * accesses will not update the guest page table access flags
58       * and will not change the state of the softmmu TLBs.
59       */
60      bool in_debug;
61      /*
62       * If this is stage 2 of a stage 1+2 page table walk, then this must
63       * be true if stage 1 is an EL0 access; otherwise this is ignored.
64       * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
65       */
66      bool in_s1_is_el0;
67      bool out_rw;
68      bool out_be;
69      ARMSecuritySpace out_space;
70      hwaddr out_virt;
71      hwaddr out_phys;
72      void *out_host;
73  } S1Translate;
74  
75  static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
76                                  target_ulong address,
77                                  MMUAccessType access_type,
78                                  GetPhysAddrResult *result,
79                                  ARMMMUFaultInfo *fi);
80  
81  static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
82                                target_ulong address,
83                                MMUAccessType access_type,
84                                GetPhysAddrResult *result,
85                                ARMMMUFaultInfo *fi);
86  
87  /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
88  static const uint8_t pamax_map[] = {
89      [0] = 32,
90      [1] = 36,
91      [2] = 40,
92      [3] = 42,
93      [4] = 44,
94      [5] = 48,
95      [6] = 52,
96  };
97  
98  /*
99   * The cpu-specific constant value of PAMax; also used by hw/arm/virt.
100   * Note that machvirt_init calls this on a CPU that is inited but not realized!
101   */
102  unsigned int arm_pamax(ARMCPU *cpu)
103  {
104      if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
105          unsigned int parange =
106              FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
107  
108          /*
109           * id_aa64mmfr0 is a read-only register so values outside of the
110           * supported mappings can be considered an implementation error.
111           */
112          assert(parange < ARRAY_SIZE(pamax_map));
113          return pamax_map[parange];
114      }
115  
116      if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
117          /* v7 or v8 with LPAE */
118          return 40;
119      }
120      /* Anything else */
121      return 32;
122  }
123  
124  /*
125   * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
126   */
127  ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
128  {
129      switch (mmu_idx) {
130      case ARMMMUIdx_E10_0:
131          return ARMMMUIdx_Stage1_E0;
132      case ARMMMUIdx_E10_1:
133          return ARMMMUIdx_Stage1_E1;
134      case ARMMMUIdx_E10_1_PAN:
135          return ARMMMUIdx_Stage1_E1_PAN;
136      default:
137          return mmu_idx;
138      }
139  }
140  
141  ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
142  {
143      return stage_1_mmu_idx(arm_mmu_idx(env));
144  }
145  
146  /*
147   * Return where we should do ptw loads from for a stage 2 walk.
148   * This depends on whether the address we are looking up is a
149   * Secure IPA or a NonSecure IPA, which we know from whether this is
150   * Stage2 or Stage2_S.
151   * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
152   */
153  static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
154  {
155      bool s2walk_secure;
156  
157      /*
158       * We're OK to check the current state of the CPU here because
159       * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit
160       * changes.
161       * (2) there's no way to do a lookup that cares about Stage 2 for a
162       * different security state to the current one for AArch64, and AArch32
163       * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
164       * an NS stage 1+2 lookup while the NS bit is 0.)
165       */
166      if (!arm_el_is_aa64(env, 3)) {
167          return ARMMMUIdx_Phys_NS;
168      }
169  
170      switch (arm_security_space_below_el3(env)) {
171      case ARMSS_NonSecure:
172          return ARMMMUIdx_Phys_NS;
173      case ARMSS_Realm:
174          return ARMMMUIdx_Phys_Realm;
175      case ARMSS_Secure:
176          if (stage2idx == ARMMMUIdx_Stage2_S) {
177              s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
178          } else {
179              s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
180          }
181          return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
182      default:
183          g_assert_not_reached();
184      }
185  }
186  
187  static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
188  {
189      return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
190  }
191  
192  /* Return the TTBR associated with this translation regime */
193  static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
194  {
195      if (mmu_idx == ARMMMUIdx_Stage2) {
196          return env->cp15.vttbr_el2;
197      }
198      if (mmu_idx == ARMMMUIdx_Stage2_S) {
199          return env->cp15.vsttbr_el2;
200      }
201      if (ttbrn == 0) {
202          return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
203      } else {
204          return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
205      }
206  }
207  
208  /* Return true if the specified stage of address translation is disabled */
209  static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
210                                          ARMSecuritySpace space)
211  {
212      uint64_t hcr_el2;
213  
214      if (arm_feature(env, ARM_FEATURE_M)) {
215          bool is_secure = arm_space_is_secure(space);
216          switch (env->v7m.mpu_ctrl[is_secure] &
217                  (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
218          case R_V7M_MPU_CTRL_ENABLE_MASK:
219              /* Enabled, but not for HardFault and NMI */
220              return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
221          case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
222              /* Enabled for all cases */
223              return false;
224          case 0:
225          default:
226              /*
227               * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
228               * we warned about that in armv7m_nvic.c when the guest set it.
229               */
230              return true;
231          }
232      }
233  
234  
235      switch (mmu_idx) {
236      case ARMMMUIdx_Stage2:
237      case ARMMMUIdx_Stage2_S:
238          /* HCR.DC means HCR.VM behaves as 1 */
239          hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
240          return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
241  
242      case ARMMMUIdx_E10_0:
243      case ARMMMUIdx_E10_1:
244      case ARMMMUIdx_E10_1_PAN:
245          /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
246          hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
247          if (hcr_el2 & HCR_TGE) {
248              return true;
249          }
250          break;
251  
252      case ARMMMUIdx_Stage1_E0:
253      case ARMMMUIdx_Stage1_E1:
254      case ARMMMUIdx_Stage1_E1_PAN:
255          /* HCR.DC means SCTLR_EL1.M behaves as 0 */
256          hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
257          if (hcr_el2 & HCR_DC) {
258              return true;
259          }
260          break;
261  
262      case ARMMMUIdx_E20_0:
263      case ARMMMUIdx_E20_2:
264      case ARMMMUIdx_E20_2_PAN:
265      case ARMMMUIdx_E2:
266      case ARMMMUIdx_E3:
267          break;
268  
269      case ARMMMUIdx_Phys_S:
270      case ARMMMUIdx_Phys_NS:
271      case ARMMMUIdx_Phys_Root:
272      case ARMMMUIdx_Phys_Realm:
273          /* No translation for physical address spaces. */
274          return true;
275  
276      default:
277          g_assert_not_reached();
278      }
279  
280      return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
281  }
282  
283  static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
284                                       ARMSecuritySpace pspace,
285                                       ARMMMUFaultInfo *fi)
286  {
287      MemTxAttrs attrs = {
288          .secure = true,
289          .space = ARMSS_Root,
290      };
291      ARMCPU *cpu = env_archcpu(env);
292      uint64_t gpccr = env->cp15.gpccr_el3;
293      unsigned pps, pgs, l0gptsz, level = 0;
294      uint64_t tableaddr, pps_mask, align, entry, index;
295      AddressSpace *as;
296      MemTxResult result;
297      int gpi;
298  
299      if (!FIELD_EX64(gpccr, GPCCR, GPC)) {
300          return true;
301      }
302  
303      /*
304       * GPC Priority 1 (R_GMGRR):
305       * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
306       * the access fails as GPT walk fault at level 0.
307       */
308  
309      /*
310       * Configuration of PPS to a value exceeding the implemented
311       * physical address size is invalid.
312       */
313      pps = FIELD_EX64(gpccr, GPCCR, PPS);
314      if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
315          goto fault_walk;
316      }
317      pps = pamax_map[pps];
318      pps_mask = MAKE_64BIT_MASK(0, pps);
319  
320      switch (FIELD_EX64(gpccr, GPCCR, SH)) {
321      case 0b10: /* outer shareable */
322          break;
323      case 0b00: /* non-shareable */
324      case 0b11: /* inner shareable */
325          /* Inner and Outer non-cacheable requires Outer shareable. */
326          if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 &&
327              FIELD_EX64(gpccr, GPCCR, IRGN) == 0) {
328              goto fault_walk;
329          }
330          break;
331      default:   /* reserved */
332          goto fault_walk;
333      }
334  
335      switch (FIELD_EX64(gpccr, GPCCR, PGS)) {
336      case 0b00: /* 4KB */
337          pgs = 12;
338          break;
339      case 0b01: /* 64KB */
340          pgs = 16;
341          break;
342      case 0b10: /* 16KB */
343          pgs = 14;
344          break;
345      default: /* reserved */
346          goto fault_walk;
347      }
348  
349      /* Note this field is read-only and fixed at reset. */
350      l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
351  
352      /*
353       * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
354       * R_CPDSB: A NonSecure physical address input exceeding PPS
355       * does not experience any fault.
356       */
357      if (paddress & ~pps_mask) {
358          if (pspace == ARMSS_NonSecure) {
359              return true;
360          }
361          goto fault_size;
362      }
363  
364      /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
365      tableaddr = env->cp15.gptbr_el3 << 12;
366      if (tableaddr & ~pps_mask) {
367          goto fault_size;
368      }
369  
370      /*
371       * BADDR is aligned per a function of PPS and L0GPTSZ.
372       * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
373       * unlike the RES0 bits of the GPT entries (R_XNKFZ).
374       */
375      align = MAX(pps - l0gptsz + 3, 12);
376      align = MAKE_64BIT_MASK(0, align);
377      tableaddr &= ~align;
378  
379      as = arm_addressspace(env_cpu(env), attrs);
380  
381      /* Level 0 lookup. */
382      index = extract64(paddress, l0gptsz, pps - l0gptsz);
383      tableaddr += index * 8;
384      entry = address_space_ldq_le(as, tableaddr, attrs, &result);
385      if (result != MEMTX_OK) {
386          goto fault_eabt;
387      }
388  
389      switch (extract32(entry, 0, 4)) {
390      case 1: /* block descriptor */
391          if (entry >> 8) {
392              goto fault_walk; /* RES0 bits not 0 */
393          }
394          gpi = extract32(entry, 4, 4);
395          goto found;
396      case 3: /* table descriptor */
397          tableaddr = entry & ~0xf;
398          align = MAX(l0gptsz - pgs - 1, 12);
399          align = MAKE_64BIT_MASK(0, align);
400          if (tableaddr & (~pps_mask | align)) {
401              goto fault_walk; /* RES0 bits not 0 */
402          }
403          break;
404      default: /* invalid */
405          goto fault_walk;
406      }
407  
408      /* Level 1 lookup */
409      level = 1;
410      index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4);
411      tableaddr += index * 8;
412      entry = address_space_ldq_le(as, tableaddr, attrs, &result);
413      if (result != MEMTX_OK) {
414          goto fault_eabt;
415      }
416  
417      switch (extract32(entry, 0, 4)) {
418      case 1: /* contiguous descriptor */
419          if (entry >> 10) {
420              goto fault_walk; /* RES0 bits not 0 */
421          }
422          /*
423           * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
424           * and because we cannot invalidate by pa, and thus will always
425           * flush entire tlbs, we don't actually care about the range here
426           * and can simply extract the GPI as the result.
427           */
428          if (extract32(entry, 8, 2) == 0) {
429              goto fault_walk; /* reserved contig */
430          }
431          gpi = extract32(entry, 4, 4);
432          break;
433      default:
434          index = extract64(paddress, pgs, 4);
435          gpi = extract64(entry, index * 4, 4);
436          break;
437      }
438  
439   found:
440      switch (gpi) {
441      case 0b0000: /* no access */
442          break;
443      case 0b1111: /* all access */
444          return true;
445      case 0b1000:
446      case 0b1001:
447      case 0b1010:
448      case 0b1011:
449          if (pspace == (gpi & 3)) {
450              return true;
451          }
452          break;
453      default:
454          goto fault_walk; /* reserved */
455      }
456  
457      fi->gpcf = GPCF_Fail;
458      goto fault_common;
459   fault_eabt:
460      fi->gpcf = GPCF_EABT;
461      goto fault_common;
462   fault_size:
463      fi->gpcf = GPCF_AddressSize;
464      goto fault_common;
465   fault_walk:
466      fi->gpcf = GPCF_Walk;
467   fault_common:
468      fi->level = level;
469      fi->paddr = paddress;
470      fi->paddr_space = pspace;
471      return false;
472  }
473  
474  static bool S1_attrs_are_device(uint8_t attrs)
475  {
476      /*
477       * This slightly under-decodes the MAIR_ELx field:
478       * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE;
479       * 0b0000dd1x is UNPREDICTABLE.
480       */
481      return (attrs & 0xf0) == 0;
482  }
483  
484  static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
485  {
486      /*
487       * For an S1 page table walk, the stage 1 attributes are always
488       * some form of "this is Normal memory". The combined S1+S2
489       * attributes are therefore only Device if stage 2 specifies Device.
490       * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
491       * ie when cacheattrs.attrs bits [3:2] are 0b00.
492       * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
493       * when cacheattrs.attrs bit [2] is 0.
494       */
495      if (hcr & HCR_FWB) {
496          return (attrs & 0x4) == 0;
497      } else {
498          return (attrs & 0xc) == 0;
499      }
500  }
501  
502  static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
503                                            ARMMMUIdx s2_mmu_idx)
504  {
505      /*
506       * Return the security space to use for stage 2 when doing
507       * the S1 page table descriptor load.
508       */
509      if (regime_is_stage2(s2_mmu_idx)) {
510          /*
511           * The security space for ptw reads is almost always the same
512           * as that of the security space of the stage 1 translation.
513           * The only exception is when stage 1 is Secure; in that case
514           * the ptw read might be to the Secure or the NonSecure space
515           * (but never Realm or Root), and the s2_mmu_idx tells us which.
516           * Root translations are always single-stage.
517           */
518          if (s1_space == ARMSS_Secure) {
519              return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
520          } else {
521              assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
522              assert(s1_space != ARMSS_Root);
523              return s1_space;
524          }
525      } else {
526          /* ptw loads are from phys: the mmu idx itself says which space */
527          return arm_phys_to_space(s2_mmu_idx);
528      }
529  }
530  
531  static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx)
532  {
533      /*
534       * For stage 2 faults in Secure EL22, S1NS indicates
535       * whether the faulting IPA is in the Secure or NonSecure
536       * IPA space. For all other kinds of fault, it is false.
537       */
538      return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx)
539          && s2_mmu_idx == ARMMMUIdx_Stage2_S;
540  }
541  
542  /* Translate a S1 pagetable walk through S2 if needed.  */
543  static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
544                               hwaddr addr, ARMMMUFaultInfo *fi)
545  {
546      ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
547      ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
548      uint8_t pte_attrs;
549  
550      ptw->out_virt = addr;
551  
552      if (unlikely(ptw->in_debug)) {
553          /*
554           * From gdbstub, do not use softmmu so that we don't modify the
555           * state of the cpu at all, including softmmu tlb contents.
556           */
557          ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
558          S1Translate s2ptw = {
559              .in_mmu_idx = s2_mmu_idx,
560              .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
561              .in_space = s2_space,
562              .in_debug = true,
563          };
564          GetPhysAddrResult s2 = { };
565  
566          if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
567              goto fail;
568          }
569  
570          ptw->out_phys = s2.f.phys_addr;
571          pte_attrs = s2.cacheattrs.attrs;
572          ptw->out_host = NULL;
573          ptw->out_rw = false;
574          ptw->out_space = s2.f.attrs.space;
575      } else {
576  #ifdef CONFIG_TCG
577          CPUTLBEntryFull *full;
578          int flags;
579  
580          env->tlb_fi = fi;
581          flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD,
582                                        arm_to_core_mmu_idx(s2_mmu_idx),
583                                        &ptw->out_host, &full);
584          env->tlb_fi = NULL;
585  
586          if (unlikely(flags & TLB_INVALID_MASK)) {
587              goto fail;
588          }
589          ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
590          ptw->out_rw = full->prot & PAGE_WRITE;
591          pte_attrs = full->extra.arm.pte_attrs;
592          ptw->out_space = full->attrs.space;
593  #else
594          g_assert_not_reached();
595  #endif
596      }
597  
598      if (regime_is_stage2(s2_mmu_idx)) {
599          uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
600  
601          if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
602              /*
603               * PTW set and S1 walk touched S2 Device memory:
604               * generate Permission fault.
605               */
606              fi->type = ARMFault_Permission;
607              fi->s2addr = addr;
608              fi->stage2 = true;
609              fi->s1ptw = true;
610              fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
611              return false;
612          }
613      }
614  
615      ptw->out_be = regime_translation_big_endian(env, mmu_idx);
616      return true;
617  
618   fail:
619      assert(fi->type != ARMFault_None);
620      if (fi->type == ARMFault_GPCFOnOutput) {
621          fi->type = ARMFault_GPCFOnWalk;
622      }
623      fi->s2addr = addr;
624      fi->stage2 = regime_is_stage2(s2_mmu_idx);
625      fi->s1ptw = fi->stage2;
626      fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
627      return false;
628  }
629  
630  /* All loads done in the course of a page table walk go through here. */
631  static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
632                              ARMMMUFaultInfo *fi)
633  {
634      CPUState *cs = env_cpu(env);
635      void *host = ptw->out_host;
636      uint32_t data;
637  
638      if (likely(host)) {
639          /* Page tables are in RAM, and we have the host address. */
640          data = qatomic_read((uint32_t *)host);
641          if (ptw->out_be) {
642              data = be32_to_cpu(data);
643          } else {
644              data = le32_to_cpu(data);
645          }
646      } else {
647          /* Page tables are in MMIO. */
648          MemTxAttrs attrs = {
649              .space = ptw->out_space,
650              .secure = arm_space_is_secure(ptw->out_space),
651          };
652          AddressSpace *as = arm_addressspace(cs, attrs);
653          MemTxResult result = MEMTX_OK;
654  
655          if (ptw->out_be) {
656              data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
657          } else {
658              data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
659          }
660          if (unlikely(result != MEMTX_OK)) {
661              fi->type = ARMFault_SyncExternalOnWalk;
662              fi->ea = arm_extabort_type(result);
663              return 0;
664          }
665      }
666      return data;
667  }
668  
669  static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
670                              ARMMMUFaultInfo *fi)
671  {
672      CPUState *cs = env_cpu(env);
673      void *host = ptw->out_host;
674      uint64_t data;
675  
676      if (likely(host)) {
677          /* Page tables are in RAM, and we have the host address. */
678  #ifdef CONFIG_ATOMIC64
679          data = qatomic_read__nocheck((uint64_t *)host);
680          if (ptw->out_be) {
681              data = be64_to_cpu(data);
682          } else {
683              data = le64_to_cpu(data);
684          }
685  #else
686          if (ptw->out_be) {
687              data = ldq_be_p(host);
688          } else {
689              data = ldq_le_p(host);
690          }
691  #endif
692      } else {
693          /* Page tables are in MMIO. */
694          MemTxAttrs attrs = {
695              .space = ptw->out_space,
696              .secure = arm_space_is_secure(ptw->out_space),
697          };
698          AddressSpace *as = arm_addressspace(cs, attrs);
699          MemTxResult result = MEMTX_OK;
700  
701          if (ptw->out_be) {
702              data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
703          } else {
704              data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
705          }
706          if (unlikely(result != MEMTX_OK)) {
707              fi->type = ARMFault_SyncExternalOnWalk;
708              fi->ea = arm_extabort_type(result);
709              return 0;
710          }
711      }
712      return data;
713  }
714  
715  static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
716                               uint64_t new_val, S1Translate *ptw,
717                               ARMMMUFaultInfo *fi)
718  {
719  #if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
720      uint64_t cur_val;
721      void *host = ptw->out_host;
722  
723      if (unlikely(!host)) {
724          /* Page table in MMIO Memory Region */
725          CPUState *cs = env_cpu(env);
726          MemTxAttrs attrs = {
727              .space = ptw->out_space,
728              .secure = arm_space_is_secure(ptw->out_space),
729          };
730          AddressSpace *as = arm_addressspace(cs, attrs);
731          MemTxResult result = MEMTX_OK;
732          bool need_lock = !bql_locked();
733  
734          if (need_lock) {
735              bql_lock();
736          }
737          if (ptw->out_be) {
738              cur_val = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
739              if (unlikely(result != MEMTX_OK)) {
740                  fi->type = ARMFault_SyncExternalOnWalk;
741                  fi->ea = arm_extabort_type(result);
742                  if (need_lock) {
743                      bql_unlock();
744                  }
745                  return old_val;
746              }
747              if (cur_val == old_val) {
748                  address_space_stq_be(as, ptw->out_phys, new_val, attrs, &result);
749                  if (unlikely(result != MEMTX_OK)) {
750                      fi->type = ARMFault_SyncExternalOnWalk;
751                      fi->ea = arm_extabort_type(result);
752                      if (need_lock) {
753                          bql_unlock();
754                      }
755                      return old_val;
756                  }
757                  cur_val = new_val;
758              }
759          } else {
760              cur_val = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
761              if (unlikely(result != MEMTX_OK)) {
762                  fi->type = ARMFault_SyncExternalOnWalk;
763                  fi->ea = arm_extabort_type(result);
764                  if (need_lock) {
765                      bql_unlock();
766                  }
767                  return old_val;
768              }
769              if (cur_val == old_val) {
770                  address_space_stq_le(as, ptw->out_phys, new_val, attrs, &result);
771                  if (unlikely(result != MEMTX_OK)) {
772                      fi->type = ARMFault_SyncExternalOnWalk;
773                      fi->ea = arm_extabort_type(result);
774                      if (need_lock) {
775                          bql_unlock();
776                      }
777                      return old_val;
778                  }
779                  cur_val = new_val;
780              }
781          }
782          if (need_lock) {
783              bql_unlock();
784          }
785          return cur_val;
786      }
787  
788      /*
789       * Raising a stage2 Protection fault for an atomic update to a read-only
790       * page is delayed until it is certain that there is a change to make.
791       */
792      if (unlikely(!ptw->out_rw)) {
793          int flags;
794  
795          env->tlb_fi = fi;
796          flags = probe_access_full_mmu(env, ptw->out_virt, 0,
797                                        MMU_DATA_STORE,
798                                        arm_to_core_mmu_idx(ptw->in_ptw_idx),
799                                        NULL, NULL);
800          env->tlb_fi = NULL;
801  
802          if (unlikely(flags & TLB_INVALID_MASK)) {
803              /*
804               * We know this must be a stage 2 fault because the granule
805               * protection table does not separately track read and write
806               * permission, so all GPC faults are caught in S1_ptw_translate():
807               * we only get here for "readable but not writeable".
808               */
809              assert(fi->type != ARMFault_None);
810              fi->s2addr = ptw->out_virt;
811              fi->stage2 = true;
812              fi->s1ptw = true;
813              fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx);
814              return 0;
815          }
816  
817          /* In case CAS mismatches and we loop, remember writability. */
818          ptw->out_rw = true;
819      }
820  
821  #ifdef CONFIG_ATOMIC64
822      if (ptw->out_be) {
823          old_val = cpu_to_be64(old_val);
824          new_val = cpu_to_be64(new_val);
825          cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
826          cur_val = be64_to_cpu(cur_val);
827      } else {
828          old_val = cpu_to_le64(old_val);
829          new_val = cpu_to_le64(new_val);
830          cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
831          cur_val = le64_to_cpu(cur_val);
832      }
833  #else
834      /*
835       * We can't support the full 64-bit atomic cmpxchg on the host.
836       * Because this is only used for FEAT_HAFDBS, which is only for AA64,
837       * we know that TCG_OVERSIZED_GUEST is set, which means that we are
838       * running in round-robin mode and could only race with dma i/o.
839       */
840  #if !TCG_OVERSIZED_GUEST
841  # error "Unexpected configuration"
842  #endif
843      bool locked = bql_locked();
844      if (!locked) {
845          bql_lock();
846      }
847      if (ptw->out_be) {
848          cur_val = ldq_be_p(host);
849          if (cur_val == old_val) {
850              stq_be_p(host, new_val);
851          }
852      } else {
853          cur_val = ldq_le_p(host);
854          if (cur_val == old_val) {
855              stq_le_p(host, new_val);
856          }
857      }
858      if (!locked) {
859          bql_unlock();
860      }
861  #endif
862  
863      return cur_val;
864  #else
865      /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
866      g_assert_not_reached();
867  #endif
868  }
869  
870  static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
871                                       uint32_t *table, uint32_t address)
872  {
873      /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
874      uint64_t tcr = regime_tcr(env, mmu_idx);
875      int maskshift = extract32(tcr, 0, 3);
876      uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
877      uint32_t base_mask;
878  
879      if (address & mask) {
880          if (tcr & TTBCR_PD1) {
881              /* Translation table walk disabled for TTBR1 */
882              return false;
883          }
884          *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
885      } else {
886          if (tcr & TTBCR_PD0) {
887              /* Translation table walk disabled for TTBR0 */
888              return false;
889          }
890          base_mask = ~((uint32_t)0x3fffu >> maskshift);
891          *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
892      }
893      *table |= (address >> 18) & 0x3ffc;
894      return true;
895  }
896  
897  /*
898   * Translate section/page access permissions to page R/W protection flags
899   * @env:         CPUARMState
900   * @mmu_idx:     MMU index indicating required translation regime
901   * @ap:          The 3-bit access permissions (AP[2:0])
902   * @domain_prot: The 2-bit domain access permissions
903   * @is_user: TRUE if accessing from PL0
904   */
905  static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx,
906                           int ap, int domain_prot, bool is_user)
907  {
908      if (domain_prot == 3) {
909          return PAGE_READ | PAGE_WRITE;
910      }
911  
912      switch (ap) {
913      case 0:
914          if (arm_feature(env, ARM_FEATURE_V7)) {
915              return 0;
916          }
917          switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
918          case SCTLR_S:
919              return is_user ? 0 : PAGE_READ;
920          case SCTLR_R:
921              return PAGE_READ;
922          default:
923              return 0;
924          }
925      case 1:
926          return is_user ? 0 : PAGE_READ | PAGE_WRITE;
927      case 2:
928          if (is_user) {
929              return PAGE_READ;
930          } else {
931              return PAGE_READ | PAGE_WRITE;
932          }
933      case 3:
934          return PAGE_READ | PAGE_WRITE;
935      case 4: /* Reserved.  */
936          return 0;
937      case 5:
938          return is_user ? 0 : PAGE_READ;
939      case 6:
940          return PAGE_READ;
941      case 7:
942          if (!arm_feature(env, ARM_FEATURE_V6K)) {
943              return 0;
944          }
945          return PAGE_READ;
946      default:
947          g_assert_not_reached();
948      }
949  }
950  
951  /*
952   * Translate section/page access permissions to page R/W protection flags
953   * @env:         CPUARMState
954   * @mmu_idx:     MMU index indicating required translation regime
955   * @ap:          The 3-bit access permissions (AP[2:0])
956   * @domain_prot: The 2-bit domain access permissions
957   */
958  static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
959                           int ap, int domain_prot)
960  {
961     return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
962                                  regime_is_user(env, mmu_idx));
963  }
964  
965  /*
966   * Translate section/page access permissions to page R/W protection flags.
967   * @ap:      The 2-bit simple AP (AP[2:1])
968   * @is_user: TRUE if accessing from PL0
969   */
970  static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
971  {
972      switch (ap) {
973      case 0:
974          return is_user ? 0 : PAGE_READ | PAGE_WRITE;
975      case 1:
976          return PAGE_READ | PAGE_WRITE;
977      case 2:
978          return is_user ? 0 : PAGE_READ;
979      case 3:
980          return PAGE_READ;
981      default:
982          g_assert_not_reached();
983      }
984  }
985  
986  static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
987  {
988      return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
989  }
990  
991  static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
992                               uint32_t address, MMUAccessType access_type,
993                               GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
994  {
995      int level = 1;
996      uint32_t table;
997      uint32_t desc;
998      int type;
999      int ap;
1000      int domain = 0;
1001      int domain_prot;
1002      hwaddr phys_addr;
1003      uint32_t dacr;
1004  
1005      /* Pagetable walk.  */
1006      /* Lookup l1 descriptor.  */
1007      if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
1008          /* Section translation fault if page walk is disabled by PD0 or PD1 */
1009          fi->type = ARMFault_Translation;
1010          goto do_fault;
1011      }
1012      if (!S1_ptw_translate(env, ptw, table, fi)) {
1013          goto do_fault;
1014      }
1015      desc = arm_ldl_ptw(env, ptw, fi);
1016      if (fi->type != ARMFault_None) {
1017          goto do_fault;
1018      }
1019      type = (desc & 3);
1020      domain = (desc >> 5) & 0x0f;
1021      if (regime_el(env, ptw->in_mmu_idx) == 1) {
1022          dacr = env->cp15.dacr_ns;
1023      } else {
1024          dacr = env->cp15.dacr_s;
1025      }
1026      domain_prot = (dacr >> (domain * 2)) & 3;
1027      if (type == 0) {
1028          /* Section translation fault.  */
1029          fi->type = ARMFault_Translation;
1030          goto do_fault;
1031      }
1032      if (type != 2) {
1033          level = 2;
1034      }
1035      if (domain_prot == 0 || domain_prot == 2) {
1036          fi->type = ARMFault_Domain;
1037          goto do_fault;
1038      }
1039      if (type == 2) {
1040          /* 1Mb section.  */
1041          phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1042          ap = (desc >> 10) & 3;
1043          result->f.lg_page_size = 20; /* 1MB */
1044      } else {
1045          /* Lookup l2 entry.  */
1046          if (type == 1) {
1047              /* Coarse pagetable.  */
1048              table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1049          } else {
1050              /* Fine pagetable.  */
1051              table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1052          }
1053          if (!S1_ptw_translate(env, ptw, table, fi)) {
1054              goto do_fault;
1055          }
1056          desc = arm_ldl_ptw(env, ptw, fi);
1057          if (fi->type != ARMFault_None) {
1058              goto do_fault;
1059          }
1060          switch (desc & 3) {
1061          case 0: /* Page translation fault.  */
1062              fi->type = ARMFault_Translation;
1063              goto do_fault;
1064          case 1: /* 64k page.  */
1065              phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1066              ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1067              result->f.lg_page_size = 16;
1068              break;
1069          case 2: /* 4k page.  */
1070              phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1071              ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
1072              result->f.lg_page_size = 12;
1073              break;
1074          case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
1075              if (type == 1) {
1076                  /* ARMv6/XScale extended small page format */
1077                  if (arm_feature(env, ARM_FEATURE_XSCALE)
1078                      || arm_feature(env, ARM_FEATURE_V6)) {
1079                      phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1080                      result->f.lg_page_size = 12;
1081                  } else {
1082                      /*
1083                       * UNPREDICTABLE in ARMv5; we choose to take a
1084                       * page translation fault.
1085                       */
1086                      fi->type = ARMFault_Translation;
1087                      goto do_fault;
1088                  }
1089              } else {
1090                  phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1091                  result->f.lg_page_size = 10;
1092              }
1093              ap = (desc >> 4) & 3;
1094              break;
1095          default:
1096              /* Never happens, but compiler isn't smart enough to tell.  */
1097              g_assert_not_reached();
1098          }
1099      }
1100      result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
1101      result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
1102      if (!(result->f.prot & (1 << access_type))) {
1103          /* Access permission fault.  */
1104          fi->type = ARMFault_Permission;
1105          goto do_fault;
1106      }
1107      result->f.phys_addr = phys_addr;
1108      return false;
1109  do_fault:
1110      fi->domain = domain;
1111      fi->level = level;
1112      return true;
1113  }
1114  
1115  static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
1116                               uint32_t address, MMUAccessType access_type,
1117                               GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1118  {
1119      ARMCPU *cpu = env_archcpu(env);
1120      ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
1121      int level = 1;
1122      uint32_t table;
1123      uint32_t desc;
1124      uint32_t xn;
1125      uint32_t pxn = 0;
1126      int type;
1127      int ap;
1128      int domain = 0;
1129      int domain_prot;
1130      hwaddr phys_addr;
1131      uint32_t dacr;
1132      bool ns;
1133      int user_prot;
1134  
1135      /* Pagetable walk.  */
1136      /* Lookup l1 descriptor.  */
1137      if (!get_level1_table_address(env, mmu_idx, &table, address)) {
1138          /* Section translation fault if page walk is disabled by PD0 or PD1 */
1139          fi->type = ARMFault_Translation;
1140          goto do_fault;
1141      }
1142      if (!S1_ptw_translate(env, ptw, table, fi)) {
1143          goto do_fault;
1144      }
1145      desc = arm_ldl_ptw(env, ptw, fi);
1146      if (fi->type != ARMFault_None) {
1147          goto do_fault;
1148      }
1149      type = (desc & 3);
1150      if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
1151          /* Section translation fault, or attempt to use the encoding
1152           * which is Reserved on implementations without PXN.
1153           */
1154          fi->type = ARMFault_Translation;
1155          goto do_fault;
1156      }
1157      if ((type == 1) || !(desc & (1 << 18))) {
1158          /* Page or Section.  */
1159          domain = (desc >> 5) & 0x0f;
1160      }
1161      if (regime_el(env, mmu_idx) == 1) {
1162          dacr = env->cp15.dacr_ns;
1163      } else {
1164          dacr = env->cp15.dacr_s;
1165      }
1166      if (type == 1) {
1167          level = 2;
1168      }
1169      domain_prot = (dacr >> (domain * 2)) & 3;
1170      if (domain_prot == 0 || domain_prot == 2) {
1171          /* Section or Page domain fault */
1172          fi->type = ARMFault_Domain;
1173          goto do_fault;
1174      }
1175      if (type != 1) {
1176          if (desc & (1 << 18)) {
1177              /* Supersection.  */
1178              phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1179              phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
1180              phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
1181              result->f.lg_page_size = 24;  /* 16MB */
1182          } else {
1183              /* Section.  */
1184              phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1185              result->f.lg_page_size = 20;  /* 1MB */
1186          }
1187          ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1188          xn = desc & (1 << 4);
1189          pxn = desc & 1;
1190          ns = extract32(desc, 19, 1);
1191      } else {
1192          if (cpu_isar_feature(aa32_pxn, cpu)) {
1193              pxn = (desc >> 2) & 1;
1194          }
1195          ns = extract32(desc, 3, 1);
1196          /* Lookup l2 entry.  */
1197          table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1198          if (!S1_ptw_translate(env, ptw, table, fi)) {
1199              goto do_fault;
1200          }
1201          desc = arm_ldl_ptw(env, ptw, fi);
1202          if (fi->type != ARMFault_None) {
1203              goto do_fault;
1204          }
1205          ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1206          switch (desc & 3) {
1207          case 0: /* Page translation fault.  */
1208              fi->type = ARMFault_Translation;
1209              goto do_fault;
1210          case 1: /* 64k page.  */
1211              phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1212              xn = desc & (1 << 15);
1213              result->f.lg_page_size = 16;
1214              break;
1215          case 2: case 3: /* 4k page.  */
1216              phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1217              xn = desc & 1;
1218              result->f.lg_page_size = 12;
1219              break;
1220          default:
1221              /* Never happens, but compiler isn't smart enough to tell.  */
1222              g_assert_not_reached();
1223          }
1224      }
1225      if (domain_prot == 3) {
1226          result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1227      } else {
1228          if (pxn && !regime_is_user(env, mmu_idx)) {
1229              xn = 1;
1230          }
1231          if (xn && access_type == MMU_INST_FETCH) {
1232              fi->type = ARMFault_Permission;
1233              goto do_fault;
1234          }
1235  
1236          if (arm_feature(env, ARM_FEATURE_V6K) &&
1237                  (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
1238              /* The simplified model uses AP[0] as an access control bit.  */
1239              if ((ap & 1) == 0) {
1240                  /* Access flag fault.  */
1241                  fi->type = ARMFault_AccessFlag;
1242                  goto do_fault;
1243              }
1244              result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
1245              user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
1246          } else {
1247              result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
1248              user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
1249          }
1250          if (result->f.prot && !xn) {
1251              result->f.prot |= PAGE_EXEC;
1252          }
1253          if (!(result->f.prot & (1 << access_type))) {
1254              /* Access permission fault.  */
1255              fi->type = ARMFault_Permission;
1256              goto do_fault;
1257          }
1258          if (regime_is_pan(env, mmu_idx) &&
1259              !regime_is_user(env, mmu_idx) &&
1260              user_prot &&
1261              access_type != MMU_INST_FETCH) {
1262              /* Privileged Access Never fault */
1263              fi->type = ARMFault_Permission;
1264              goto do_fault;
1265          }
1266      }
1267      if (ns) {
1268          /* The NS bit will (as required by the architecture) have no effect if
1269           * the CPU doesn't support TZ or this is a non-secure translation
1270           * regime, because the attribute will already be non-secure.
1271           */
1272          result->f.attrs.secure = false;
1273          result->f.attrs.space = ARMSS_NonSecure;
1274      }
1275      result->f.phys_addr = phys_addr;
1276      return false;
1277  do_fault:
1278      fi->domain = domain;
1279      fi->level = level;
1280      return true;
1281  }
1282  
1283  /*
1284   * Translate S2 section/page access permissions to protection flags
1285   * @env:     CPUARMState
1286   * @s2ap:    The 2-bit stage2 access permissions (S2AP)
1287   * @xn:      XN (execute-never) bits
1288   * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
1289   */
1290  static int get_S2prot_noexecute(int s2ap)
1291  {
1292      int prot = 0;
1293  
1294      if (s2ap & 1) {
1295          prot |= PAGE_READ;
1296      }
1297      if (s2ap & 2) {
1298          prot |= PAGE_WRITE;
1299      }
1300      return prot;
1301  }
1302  
1303  static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
1304  {
1305      int prot = get_S2prot_noexecute(s2ap);
1306  
1307      if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
1308          switch (xn) {
1309          case 0:
1310              prot |= PAGE_EXEC;
1311              break;
1312          case 1:
1313              if (s1_is_el0) {
1314                  prot |= PAGE_EXEC;
1315              }
1316              break;
1317          case 2:
1318              break;
1319          case 3:
1320              if (!s1_is_el0) {
1321                  prot |= PAGE_EXEC;
1322              }
1323              break;
1324          default:
1325              g_assert_not_reached();
1326          }
1327      } else {
1328          if (!extract32(xn, 1, 1)) {
1329              if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
1330                  prot |= PAGE_EXEC;
1331              }
1332          }
1333      }
1334      return prot;
1335  }
1336  
1337  /*
1338   * Translate section/page access permissions to protection flags
1339   * @env:     CPUARMState
1340   * @mmu_idx: MMU index indicating required translation regime
1341   * @is_aa64: TRUE if AArch64
1342   * @ap:      The 2-bit simple AP (AP[2:1])
1343   * @xn:      XN (execute-never) bit
1344   * @pxn:     PXN (privileged execute-never) bit
1345   * @in_pa:   The original input pa space
1346   * @out_pa:  The output pa space, modified by NSTable, NS, and NSE
1347   */
1348  static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
1349                        int ap, int xn, int pxn,
1350                        ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
1351  {
1352      ARMCPU *cpu = env_archcpu(env);
1353      bool is_user = regime_is_user(env, mmu_idx);
1354      int prot_rw, user_rw;
1355      bool have_wxn;
1356      int wxn = 0;
1357  
1358      assert(!regime_is_stage2(mmu_idx));
1359  
1360      user_rw = simple_ap_to_rw_prot_is_user(ap, true);
1361      if (is_user) {
1362          prot_rw = user_rw;
1363      } else {
1364          /*
1365           * PAN controls can forbid data accesses but don't affect insn fetch.
1366           * Plain PAN forbids data accesses if EL0 has data permissions;
1367           * PAN3 forbids data accesses if EL0 has either data or exec perms.
1368           * Note that for AArch64 the 'user can exec' case is exactly !xn.
1369           * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
1370           * do not affect EPAN.
1371           */
1372          if (user_rw && regime_is_pan(env, mmu_idx)) {
1373              prot_rw = 0;
1374          } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
1375                     regime_is_pan(env, mmu_idx) &&
1376                     (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
1377              prot_rw = 0;
1378          } else {
1379              prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
1380          }
1381      }
1382  
1383      if (in_pa != out_pa) {
1384          switch (in_pa) {
1385          case ARMSS_Root:
1386              /*
1387               * R_ZWRVD: permission fault for insn fetched from non-Root,
1388               * I_WWBFB: SIF has no effect in EL3.
1389               */
1390              return prot_rw;
1391          case ARMSS_Realm:
1392              /*
1393               * R_PKTDS: permission fault for insn fetched from non-Realm,
1394               * for Realm EL2 or EL2&0.  The corresponding fault for EL1&0
1395               * happens during any stage2 translation.
1396               */
1397              switch (mmu_idx) {
1398              case ARMMMUIdx_E2:
1399              case ARMMMUIdx_E20_0:
1400              case ARMMMUIdx_E20_2:
1401              case ARMMMUIdx_E20_2_PAN:
1402                  return prot_rw;
1403              default:
1404                  break;
1405              }
1406              break;
1407          case ARMSS_Secure:
1408              if (env->cp15.scr_el3 & SCR_SIF) {
1409                  return prot_rw;
1410              }
1411              break;
1412          default:
1413              /* Input NonSecure must have output NonSecure. */
1414              g_assert_not_reached();
1415          }
1416      }
1417  
1418      /* TODO have_wxn should be replaced with
1419       *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
1420       * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
1421       * compatible processors have EL2, which is required for [U]WXN.
1422       */
1423      have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
1424  
1425      if (have_wxn) {
1426          wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
1427      }
1428  
1429      if (is_aa64) {
1430          if (regime_has_2_ranges(mmu_idx) && !is_user) {
1431              xn = pxn || (user_rw & PAGE_WRITE);
1432          }
1433      } else if (arm_feature(env, ARM_FEATURE_V7)) {
1434          switch (regime_el(env, mmu_idx)) {
1435          case 1:
1436          case 3:
1437              if (is_user) {
1438                  xn = xn || !(user_rw & PAGE_READ);
1439              } else {
1440                  int uwxn = 0;
1441                  if (have_wxn) {
1442                      uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
1443                  }
1444                  xn = xn || !(prot_rw & PAGE_READ) || pxn ||
1445                       (uwxn && (user_rw & PAGE_WRITE));
1446              }
1447              break;
1448          case 2:
1449              break;
1450          }
1451      } else {
1452          xn = wxn = 0;
1453      }
1454  
1455      if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
1456          return prot_rw;
1457      }
1458      return prot_rw | PAGE_EXEC;
1459  }
1460  
1461  static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
1462                                            ARMMMUIdx mmu_idx)
1463  {
1464      uint64_t tcr = regime_tcr(env, mmu_idx);
1465      uint32_t el = regime_el(env, mmu_idx);
1466      int select, tsz;
1467      bool epd, hpd;
1468  
1469      assert(mmu_idx != ARMMMUIdx_Stage2_S);
1470  
1471      if (mmu_idx == ARMMMUIdx_Stage2) {
1472          /* VTCR */
1473          bool sext = extract32(tcr, 4, 1);
1474          bool sign = extract32(tcr, 3, 1);
1475  
1476          /*
1477           * If the sign-extend bit is not the same as t0sz[3], the result
1478           * is unpredictable. Flag this as a guest error.
1479           */
1480          if (sign != sext) {
1481              qemu_log_mask(LOG_GUEST_ERROR,
1482                            "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
1483          }
1484          tsz = sextract32(tcr, 0, 4) + 8;
1485          select = 0;
1486          hpd = false;
1487          epd = false;
1488      } else if (el == 2) {
1489          /* HTCR */
1490          tsz = extract32(tcr, 0, 3);
1491          select = 0;
1492          hpd = extract64(tcr, 24, 1);
1493          epd = false;
1494      } else {
1495          int t0sz = extract32(tcr, 0, 3);
1496          int t1sz = extract32(tcr, 16, 3);
1497  
1498          if (t1sz == 0) {
1499              select = va > (0xffffffffu >> t0sz);
1500          } else {
1501              /* Note that we will detect errors later.  */
1502              select = va >= ~(0xffffffffu >> t1sz);
1503          }
1504          if (!select) {
1505              tsz = t0sz;
1506              epd = extract32(tcr, 7, 1);
1507              hpd = extract64(tcr, 41, 1);
1508          } else {
1509              tsz = t1sz;
1510              epd = extract32(tcr, 23, 1);
1511              hpd = extract64(tcr, 42, 1);
1512          }
1513          /* For aarch32, hpd0 is not enabled without t2e as well.  */
1514          hpd &= extract32(tcr, 6, 1);
1515      }
1516  
1517      return (ARMVAParameters) {
1518          .tsz = tsz,
1519          .select = select,
1520          .epd = epd,
1521          .hpd = hpd,
1522      };
1523  }
1524  
1525  /*
1526   * check_s2_mmu_setup
1527   * @cpu:        ARMCPU
1528   * @is_aa64:    True if the translation regime is in AArch64 state
1529   * @tcr:        VTCR_EL2 or VSTCR_EL2
1530   * @ds:         Effective value of TCR.DS.
1531   * @iasize:     Bitsize of IPAs
1532   * @stride:     Page-table stride (See the ARM ARM)
1533   *
1534   * Decode the starting level of the S2 lookup, returning INT_MIN if
1535   * the configuration is invalid.
1536   */
1537  static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
1538                                bool ds, int iasize, int stride)
1539  {
1540      int sl0, sl2, startlevel, granulebits, levels;
1541      int s1_min_iasize, s1_max_iasize;
1542  
1543      sl0 = extract32(tcr, 6, 2);
1544      if (is_aa64) {
1545          /*
1546           * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
1547           * so interleave AArch64.S2StartLevel.
1548           */
1549          switch (stride) {
1550          case 9: /* 4KB */
1551              /* SL2 is RES0 unless DS=1 & 4KB granule. */
1552              sl2 = extract64(tcr, 33, 1);
1553              if (ds && sl2) {
1554                  if (sl0 != 0) {
1555                      goto fail;
1556                  }
1557                  startlevel = -1;
1558              } else {
1559                  startlevel = 2 - sl0;
1560                  switch (sl0) {
1561                  case 2:
1562                      if (arm_pamax(cpu) < 44) {
1563                          goto fail;
1564                      }
1565                      break;
1566                  case 3:
1567                      if (!cpu_isar_feature(aa64_st, cpu)) {
1568                          goto fail;
1569                      }
1570                      startlevel = 3;
1571                      break;
1572                  }
1573              }
1574              break;
1575          case 11: /* 16KB */
1576              switch (sl0) {
1577              case 2:
1578                  if (arm_pamax(cpu) < 42) {
1579                      goto fail;
1580                  }
1581                  break;
1582              case 3:
1583                  if (!ds) {
1584                      goto fail;
1585                  }
1586                  break;
1587              }
1588              startlevel = 3 - sl0;
1589              break;
1590          case 13: /* 64KB */
1591              switch (sl0) {
1592              case 2:
1593                  if (arm_pamax(cpu) < 44) {
1594                      goto fail;
1595                  }
1596                  break;
1597              case 3:
1598                  goto fail;
1599              }
1600              startlevel = 3 - sl0;
1601              break;
1602          default:
1603              g_assert_not_reached();
1604          }
1605      } else {
1606          /*
1607           * Things are simpler for AArch32 EL2, with only 4k pages.
1608           * There is no separate S2InvalidSL function, but AArch32.S2Walk
1609           * begins with walkparms.sl0 in {'1x'}.
1610           */
1611          assert(stride == 9);
1612          if (sl0 >= 2) {
1613              goto fail;
1614          }
1615          startlevel = 2 - sl0;
1616      }
1617  
1618      /* AArch{64,32}.S2InconsistentSL are functionally equivalent.  */
1619      levels = 3 - startlevel;
1620      granulebits = stride + 3;
1621  
1622      s1_min_iasize = levels * stride + granulebits + 1;
1623      s1_max_iasize = s1_min_iasize + (stride - 1) + 4;
1624  
1625      if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
1626          return startlevel;
1627      }
1628  
1629   fail:
1630      return INT_MIN;
1631  }
1632  
1633  static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
1634                                    ARMGranuleSize gran, int level)
1635  {
1636      /*
1637       * See pseudocode AArch46.BlockDescSupported(): block descriptors
1638       * are not valid at all levels, depending on the page size.
1639       */
1640      switch (gran) {
1641      case Gran4K:
1642          return (level == 0 && ds) || level == 1 || level == 2;
1643      case Gran16K:
1644          return (level == 1 && ds) || level == 2;
1645      case Gran64K:
1646          return (level == 1 && arm_pamax(cpu) == 52) || level == 2;
1647      default:
1648          g_assert_not_reached();
1649      }
1650  }
1651  
1652  static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
1653  {
1654      uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
1655      return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
1656  }
1657  
1658  /**
1659   * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1660   *
1661   * Returns false if the translation was successful. Otherwise, phys_ptr,
1662   * attrs, prot and page_size may not be filled in, and the populated fsr
1663   * value provides information on why the translation aborted, in the format
1664   * of a long-format DFSR/IFSR fault register, with the following caveat:
1665   * the WnR bit is never set (the caller must do this).
1666   *
1667   * @env: CPUARMState
1668   * @ptw: Current and next stage parameters for the walk.
1669   * @address: virtual address to get physical address for
1670   * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1671   * @result: set on translation success,
1672   * @fi: set to fault info if the translation fails
1673   */
1674  static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
1675                                 uint64_t address,
1676                                 MMUAccessType access_type,
1677                                 GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1678  {
1679      ARMCPU *cpu = env_archcpu(env);
1680      ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
1681      int32_t level;
1682      ARMVAParameters param;
1683      uint64_t ttbr;
1684      hwaddr descaddr, indexmask, indexmask_grainsize;
1685      uint32_t tableattrs;
1686      target_ulong page_size;
1687      uint64_t attrs;
1688      int32_t stride;
1689      int addrsize, inputsize, outputsize;
1690      uint64_t tcr = regime_tcr(env, mmu_idx);
1691      int ap, xn, pxn;
1692      uint32_t el = regime_el(env, mmu_idx);
1693      uint64_t descaddrmask;
1694      bool aarch64 = arm_el_is_aa64(env, el);
1695      uint64_t descriptor, new_descriptor;
1696      ARMSecuritySpace out_space;
1697      bool device;
1698  
1699      /* TODO: This code does not support shareability levels. */
1700      if (aarch64) {
1701          int ps;
1702  
1703          param = aa64_va_parameters(env, address, mmu_idx,
1704                                     access_type != MMU_INST_FETCH,
1705                                     !arm_el_is_aa64(env, 1));
1706          level = 0;
1707  
1708          /*
1709           * If TxSZ is programmed to a value larger than the maximum,
1710           * or smaller than the effective minimum, it is IMPLEMENTATION
1711           * DEFINED whether we behave as if the field were programmed
1712           * within bounds, or if a level 0 Translation fault is generated.
1713           *
1714           * With FEAT_LVA, fault on less than minimum becomes required,
1715           * so our choice is to always raise the fault.
1716           */
1717          if (param.tsz_oob) {
1718              goto do_translation_fault;
1719          }
1720  
1721          addrsize = 64 - 8 * param.tbi;
1722          inputsize = 64 - param.tsz;
1723  
1724          /*
1725           * Bound PS by PARANGE to find the effective output address size.
1726           * ID_AA64MMFR0 is a read-only register so values outside of the
1727           * supported mappings can be considered an implementation error.
1728           */
1729          ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1730          ps = MIN(ps, param.ps);
1731          assert(ps < ARRAY_SIZE(pamax_map));
1732          outputsize = pamax_map[ps];
1733  
1734          /*
1735           * With LPA2, the effective output address (OA) size is at most 48 bits
1736           * unless TCR.DS == 1
1737           */
1738          if (!param.ds && param.gran != Gran64K) {
1739              outputsize = MIN(outputsize, 48);
1740          }
1741      } else {
1742          param = aa32_va_parameters(env, address, mmu_idx);
1743          level = 1;
1744          addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1745          inputsize = addrsize - param.tsz;
1746          outputsize = 40;
1747      }
1748  
1749      /*
1750       * We determined the region when collecting the parameters, but we
1751       * have not yet validated that the address is valid for the region.
1752       * Extract the top bits and verify that they all match select.
1753       *
1754       * For aa32, if inputsize == addrsize, then we have selected the
1755       * region by exclusion in aa32_va_parameters and there is no more
1756       * validation to do here.
1757       */
1758      if (inputsize < addrsize) {
1759          target_ulong top_bits = sextract64(address, inputsize,
1760                                             addrsize - inputsize);
1761          if (-top_bits != param.select) {
1762              /* The gap between the two regions is a Translation fault */
1763              goto do_translation_fault;
1764          }
1765      }
1766  
1767      stride = arm_granule_bits(param.gran) - 3;
1768  
1769      /*
1770       * Note that QEMU ignores shareability and cacheability attributes,
1771       * so we don't need to do anything with the SH, ORGN, IRGN fields
1772       * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
1773       * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1774       * implement any ASID-like capability so we can ignore it (instead
1775       * we will always flush the TLB any time the ASID is changed).
1776       */
1777      ttbr = regime_ttbr(env, mmu_idx, param.select);
1778  
1779      /*
1780       * Here we should have set up all the parameters for the translation:
1781       * inputsize, ttbr, epd, stride, tbi
1782       */
1783  
1784      if (param.epd) {
1785          /*
1786           * Translation table walk disabled => Translation fault on TLB miss
1787           * Note: This is always 0 on 64-bit EL2 and EL3.
1788           */
1789          goto do_translation_fault;
1790      }
1791  
1792      if (!regime_is_stage2(mmu_idx)) {
1793          /*
1794           * The starting level depends on the virtual address size (which can
1795           * be up to 48 bits) and the translation granule size. It indicates
1796           * the number of strides (stride bits at a time) needed to
1797           * consume the bits of the input address. In the pseudocode this is:
1798           *  level = 4 - RoundUp((inputsize - grainsize) / stride)
1799           * where their 'inputsize' is our 'inputsize', 'grainsize' is
1800           * our 'stride + 3' and 'stride' is our 'stride'.
1801           * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1802           * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1803           * = 4 - (inputsize - 4) / stride;
1804           */
1805          level = 4 - (inputsize - 4) / stride;
1806      } else {
1807          int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
1808                                              inputsize, stride);
1809          if (startlevel == INT_MIN) {
1810              level = 0;
1811              goto do_translation_fault;
1812          }
1813          level = startlevel;
1814      }
1815  
1816      indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1817      indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1818  
1819      /* Now we can extract the actual base address from the TTBR */
1820      descaddr = extract64(ttbr, 0, 48);
1821  
1822      /*
1823       * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1824       *
1825       * Otherwise, if the base address is out of range, raise AddressSizeFault.
1826       * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1827       * but we've just cleared the bits above 47, so simplify the test.
1828       */
1829      if (outputsize > 48) {
1830          descaddr |= extract64(ttbr, 2, 4) << 48;
1831      } else if (descaddr >> outputsize) {
1832          level = 0;
1833          fi->type = ARMFault_AddressSize;
1834          goto do_fault;
1835      }
1836  
1837      /*
1838       * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1839       * and also to mask out CnP (bit 0) which could validly be non-zero.
1840       */
1841      descaddr &= ~indexmask;
1842  
1843      /*
1844       * For AArch32, the address field in the descriptor goes up to bit 39
1845       * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
1846       * or an AddressSize fault is raised.  So for v8 we extract those SBZ
1847       * bits as part of the address, which will be checked via outputsize.
1848       * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1849       * the highest bits of a 52-bit output are placed elsewhere.
1850       */
1851      if (param.ds) {
1852          descaddrmask = MAKE_64BIT_MASK(0, 50);
1853      } else if (arm_feature(env, ARM_FEATURE_V8)) {
1854          descaddrmask = MAKE_64BIT_MASK(0, 48);
1855      } else {
1856          descaddrmask = MAKE_64BIT_MASK(0, 40);
1857      }
1858      descaddrmask &= ~indexmask_grainsize;
1859      tableattrs = 0;
1860  
1861   next_level:
1862      descaddr |= (address >> (stride * (4 - level))) & indexmask;
1863      descaddr &= ~7ULL;
1864  
1865      /*
1866       * Process the NSTable bit from the previous level.  This changes
1867       * the table address space and the output space from Secure to
1868       * NonSecure.  With RME, the EL3 translation regime does not change
1869       * from Root to NonSecure.
1870       */
1871      if (ptw->in_space == ARMSS_Secure
1872          && !regime_is_stage2(mmu_idx)
1873          && extract32(tableattrs, 4, 1)) {
1874          /*
1875           * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1876           * Assert the relative order of the secure/non-secure indexes.
1877           */
1878          QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
1879          QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
1880          ptw->in_ptw_idx += 1;
1881          ptw->in_space = ARMSS_NonSecure;
1882      }
1883  
1884      if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
1885          goto do_fault;
1886      }
1887      descriptor = arm_ldq_ptw(env, ptw, fi);
1888      if (fi->type != ARMFault_None) {
1889          goto do_fault;
1890      }
1891      new_descriptor = descriptor;
1892  
1893   restart_atomic_update:
1894      if (!(descriptor & 1) ||
1895          (!(descriptor & 2) &&
1896           !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) {
1897          /* Invalid, or a block descriptor at an invalid level */
1898          goto do_translation_fault;
1899      }
1900  
1901      descaddr = descriptor & descaddrmask;
1902  
1903      /*
1904       * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1905       * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
1906       * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
1907       * raise AddressSizeFault.
1908       */
1909      if (outputsize > 48) {
1910          if (param.ds) {
1911              descaddr |= extract64(descriptor, 8, 2) << 50;
1912          } else {
1913              descaddr |= extract64(descriptor, 12, 4) << 48;
1914          }
1915      } else if (descaddr >> outputsize) {
1916          fi->type = ARMFault_AddressSize;
1917          goto do_fault;
1918      }
1919  
1920      if ((descriptor & 2) && (level < 3)) {
1921          /*
1922           * Table entry. The top five bits are attributes which may
1923           * propagate down through lower levels of the table (and
1924           * which are all arranged so that 0 means "no effect", so
1925           * we can gather them up by ORing in the bits at each level).
1926           */
1927          tableattrs |= extract64(descriptor, 59, 5);
1928          level++;
1929          indexmask = indexmask_grainsize;
1930          goto next_level;
1931      }
1932  
1933      /*
1934       * Block entry at level 1 or 2, or page entry at level 3.
1935       * These are basically the same thing, although the number
1936       * of bits we pull in from the vaddr varies. Note that although
1937       * descaddrmask masks enough of the low bits of the descriptor
1938       * to give a correct page or table address, the address field
1939       * in a block descriptor is smaller; so we need to explicitly
1940       * clear the lower bits here before ORing in the low vaddr bits.
1941       *
1942       * Afterward, descaddr is the final physical address.
1943       */
1944      page_size = (1ULL << ((stride * (4 - level)) + 3));
1945      descaddr &= ~(hwaddr)(page_size - 1);
1946      descaddr |= (address & (page_size - 1));
1947  
1948      if (likely(!ptw->in_debug)) {
1949          /*
1950           * Access flag.
1951           * If HA is enabled, prepare to update the descriptor below.
1952           * Otherwise, pass the access fault on to software.
1953           */
1954          if (!(descriptor & (1 << 10))) {
1955              if (param.ha) {
1956                  new_descriptor |= 1 << 10; /* AF */
1957              } else {
1958                  fi->type = ARMFault_AccessFlag;
1959                  goto do_fault;
1960              }
1961          }
1962  
1963          /*
1964           * Dirty Bit.
1965           * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
1966           * bit for writeback. The actual write protection test may still be
1967           * overridden by tableattrs, to be merged below.
1968           */
1969          if (param.hd
1970              && extract64(descriptor, 51, 1)  /* DBM */
1971              && access_type == MMU_DATA_STORE) {
1972              if (regime_is_stage2(mmu_idx)) {
1973                  new_descriptor |= 1ull << 7;    /* set S2AP[1] */
1974              } else {
1975                  new_descriptor &= ~(1ull << 7); /* clear AP[2] */
1976              }
1977          }
1978      }
1979  
1980      /*
1981       * Extract attributes from the (modified) descriptor, and apply
1982       * table descriptors. Stage 2 table descriptors do not include
1983       * any attribute fields. HPD disables all the table attributes
1984       * except NSTable (which we have already handled).
1985       */
1986      attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
1987      if (!regime_is_stage2(mmu_idx)) {
1988          if (!param.hpd) {
1989              attrs |= extract64(tableattrs, 0, 2) << 53;     /* XN, PXN */
1990              /*
1991               * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1992               * means "force PL1 access only", which means forcing AP[1] to 0.
1993               */
1994              attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
1995              attrs |= extract32(tableattrs, 3, 1) << 7;    /* APT[1] => AP[2] */
1996          }
1997      }
1998  
1999      ap = extract32(attrs, 6, 2);
2000      out_space = ptw->in_space;
2001      if (regime_is_stage2(mmu_idx)) {
2002          /*
2003           * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
2004           * The bit remains ignored for other security states.
2005           * R_YMCSL: Executing an insn fetched from non-Realm causes
2006           * a stage2 permission fault.
2007           */
2008          if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
2009              out_space = ARMSS_NonSecure;
2010              result->f.prot = get_S2prot_noexecute(ap);
2011          } else {
2012              xn = extract64(attrs, 53, 2);
2013              result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
2014          }
2015      } else {
2016          int nse, ns = extract32(attrs, 5, 1);
2017          switch (out_space) {
2018          case ARMSS_Root:
2019              /*
2020               * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
2021               * R_XTYPW: NSE and NS together select the output pa space.
2022               */
2023              nse = extract32(attrs, 11, 1);
2024              out_space = (nse << 1) | ns;
2025              if (out_space == ARMSS_Secure &&
2026                  !cpu_isar_feature(aa64_sel2, cpu)) {
2027                  out_space = ARMSS_NonSecure;
2028              }
2029              break;
2030          case ARMSS_Secure:
2031              if (ns) {
2032                  out_space = ARMSS_NonSecure;
2033              }
2034              break;
2035          case ARMSS_Realm:
2036              switch (mmu_idx) {
2037              case ARMMMUIdx_Stage1_E0:
2038              case ARMMMUIdx_Stage1_E1:
2039              case ARMMMUIdx_Stage1_E1_PAN:
2040                  /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
2041                  break;
2042              case ARMMMUIdx_E2:
2043              case ARMMMUIdx_E20_0:
2044              case ARMMMUIdx_E20_2:
2045              case ARMMMUIdx_E20_2_PAN:
2046                  /*
2047                   * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
2048                   * NS changes the output to non-secure space.
2049                   */
2050                  if (ns) {
2051                      out_space = ARMSS_NonSecure;
2052                  }
2053                  break;
2054              default:
2055                  g_assert_not_reached();
2056              }
2057              break;
2058          case ARMSS_NonSecure:
2059              /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
2060              break;
2061          default:
2062              g_assert_not_reached();
2063          }
2064          xn = extract64(attrs, 54, 1);
2065          pxn = extract64(attrs, 53, 1);
2066  
2067          if (el == 1 && nv_nv1_enabled(env, ptw)) {
2068              /*
2069               * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page
2070               * descriptor bit 54 holds PXN, 53 is RES0, and the effective value
2071               * of UXN is 0. Similarly for bits 59 and 60 in table descriptors
2072               * (which we have already folded into bits 53 and 54 of attrs).
2073               * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
2074               * Similarly, APTable[0] from the table descriptor is treated as 0;
2075               * we already folded this into AP[1] and squashing that to 0 does
2076               * the right thing.
2077               */
2078              pxn = xn;
2079              xn = 0;
2080              ap &= ~1;
2081          }
2082          /*
2083           * Note that we modified ptw->in_space earlier for NSTable, but
2084           * result->f.attrs retains a copy of the original security space.
2085           */
2086          result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
2087                                      result->f.attrs.space, out_space);
2088      }
2089  
2090      if (!(result->f.prot & (1 << access_type))) {
2091          fi->type = ARMFault_Permission;
2092          goto do_fault;
2093      }
2094  
2095      /* If FEAT_HAFDBS has made changes, update the PTE. */
2096      if (new_descriptor != descriptor) {
2097          new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
2098          if (fi->type != ARMFault_None) {
2099              goto do_fault;
2100          }
2101          /*
2102           * I_YZSVV says that if the in-memory descriptor has changed,
2103           * then we must use the information in that new value
2104           * (which might include a different output address, different
2105           * attributes, or generate a fault).
2106           * Restart the handling of the descriptor value from scratch.
2107           */
2108          if (new_descriptor != descriptor) {
2109              descriptor = new_descriptor;
2110              goto restart_atomic_update;
2111          }
2112      }
2113  
2114      result->f.attrs.space = out_space;
2115      result->f.attrs.secure = arm_space_is_secure(out_space);
2116  
2117      if (regime_is_stage2(mmu_idx)) {
2118          result->cacheattrs.is_s2_format = true;
2119          result->cacheattrs.attrs = extract32(attrs, 2, 4);
2120          /*
2121           * Security state does not really affect HCR_EL2.FWB;
2122           * we only need to filter FWB for aa32 or other FEAT.
2123           */
2124          device = S2_attrs_are_device(arm_hcr_el2_eff(env),
2125                                       result->cacheattrs.attrs);
2126      } else {
2127          /* Index into MAIR registers for cache attributes */
2128          uint8_t attrindx = extract32(attrs, 2, 3);
2129          uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
2130          assert(attrindx <= 7);
2131          result->cacheattrs.is_s2_format = false;
2132          result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
2133  
2134          /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
2135          if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
2136              result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
2137          }
2138          device = S1_attrs_are_device(result->cacheattrs.attrs);
2139      }
2140  
2141      /*
2142       * Enable alignment checks on Device memory.
2143       *
2144       * Per R_XCHFJ, this check is mis-ordered. The correct ordering
2145       * for alignment, permission, and stage 2 faults should be:
2146       *    - Alignment fault caused by the memory type
2147       *    - Permission fault
2148       *    - A stage 2 fault on the memory access
2149       * but due to the way the TCG softmmu TLB operates, we will have
2150       * implicitly done the permission check and the stage2 lookup in
2151       * finding the TLB entry, so the alignment check cannot be done sooner.
2152       *
2153       * In v7, for a CPU without the Virtualization Extensions this
2154       * access is UNPREDICTABLE; we choose to make it take the alignment
2155       * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
2156       * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
2157       */
2158      if (device) {
2159          result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED;
2160      }
2161  
2162      /*
2163       * For FEAT_LPA2 and effective DS, the SH field in the attributes
2164       * was re-purposed for output address bits.  The SH attribute in
2165       * that case comes from TCR_ELx, which we extracted earlier.
2166       */
2167      if (param.ds) {
2168          result->cacheattrs.shareability = param.sh;
2169      } else {
2170          result->cacheattrs.shareability = extract32(attrs, 8, 2);
2171      }
2172  
2173      result->f.phys_addr = descaddr;
2174      result->f.lg_page_size = ctz64(page_size);
2175      return false;
2176  
2177   do_translation_fault:
2178      fi->type = ARMFault_Translation;
2179   do_fault:
2180      if (fi->s1ptw) {
2181          /* Retain the existing stage 2 fi->level */
2182          assert(fi->stage2);
2183      } else {
2184          fi->level = level;
2185          fi->stage2 = regime_is_stage2(mmu_idx);
2186      }
2187      fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx);
2188      return true;
2189  }
2190  
2191  static bool get_phys_addr_pmsav5(CPUARMState *env,
2192                                   S1Translate *ptw,
2193                                   uint32_t address,
2194                                   MMUAccessType access_type,
2195                                   GetPhysAddrResult *result,
2196                                   ARMMMUFaultInfo *fi)
2197  {
2198      int n;
2199      uint32_t mask;
2200      uint32_t base;
2201      ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
2202      bool is_user = regime_is_user(env, mmu_idx);
2203  
2204      if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
2205          /* MPU disabled.  */
2206          result->f.phys_addr = address;
2207          result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2208          return false;
2209      }
2210  
2211      result->f.phys_addr = address;
2212      for (n = 7; n >= 0; n--) {
2213          base = env->cp15.c6_region[n];
2214          if ((base & 1) == 0) {
2215              continue;
2216          }
2217          mask = 1 << ((base >> 1) & 0x1f);
2218          /* Keep this shift separate from the above to avoid an
2219             (undefined) << 32.  */
2220          mask = (mask << 1) - 1;
2221          if (((base ^ address) & ~mask) == 0) {
2222              break;
2223          }
2224      }
2225      if (n < 0) {
2226          fi->type = ARMFault_Background;
2227          return true;
2228      }
2229  
2230      if (access_type == MMU_INST_FETCH) {
2231          mask = env->cp15.pmsav5_insn_ap;
2232      } else {
2233          mask = env->cp15.pmsav5_data_ap;
2234      }
2235      mask = (mask >> (n * 4)) & 0xf;
2236      switch (mask) {
2237      case 0:
2238          fi->type = ARMFault_Permission;
2239          fi->level = 1;
2240          return true;
2241      case 1:
2242          if (is_user) {
2243              fi->type = ARMFault_Permission;
2244              fi->level = 1;
2245              return true;
2246          }
2247          result->f.prot = PAGE_READ | PAGE_WRITE;
2248          break;
2249      case 2:
2250          result->f.prot = PAGE_READ;
2251          if (!is_user) {
2252              result->f.prot |= PAGE_WRITE;
2253          }
2254          break;
2255      case 3:
2256          result->f.prot = PAGE_READ | PAGE_WRITE;
2257          break;
2258      case 5:
2259          if (is_user) {
2260              fi->type = ARMFault_Permission;
2261              fi->level = 1;
2262              return true;
2263          }
2264          result->f.prot = PAGE_READ;
2265          break;
2266      case 6:
2267          result->f.prot = PAGE_READ;
2268          break;
2269      default:
2270          /* Bad permission.  */
2271          fi->type = ARMFault_Permission;
2272          fi->level = 1;
2273          return true;
2274      }
2275      result->f.prot |= PAGE_EXEC;
2276      return false;
2277  }
2278  
2279  static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
2280                                           int32_t address, uint8_t *prot)
2281  {
2282      if (!arm_feature(env, ARM_FEATURE_M)) {
2283          *prot = PAGE_READ | PAGE_WRITE;
2284          switch (address) {
2285          case 0xF0000000 ... 0xFFFFFFFF:
2286              if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
2287                  /* hivecs execing is ok */
2288                  *prot |= PAGE_EXEC;
2289              }
2290              break;
2291          case 0x00000000 ... 0x7FFFFFFF:
2292              *prot |= PAGE_EXEC;
2293              break;
2294          }
2295      } else {
2296          /* Default system address map for M profile cores.
2297           * The architecture specifies which regions are execute-never;
2298           * at the MPU level no other checks are defined.
2299           */
2300          switch (address) {
2301          case 0x00000000 ... 0x1fffffff: /* ROM */
2302          case 0x20000000 ... 0x3fffffff: /* SRAM */
2303          case 0x60000000 ... 0x7fffffff: /* RAM */
2304          case 0x80000000 ... 0x9fffffff: /* RAM */
2305              *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2306              break;
2307          case 0x40000000 ... 0x5fffffff: /* Peripheral */
2308          case 0xa0000000 ... 0xbfffffff: /* Device */
2309          case 0xc0000000 ... 0xdfffffff: /* Device */
2310          case 0xe0000000 ... 0xffffffff: /* System */
2311              *prot = PAGE_READ | PAGE_WRITE;
2312              break;
2313          default:
2314              g_assert_not_reached();
2315          }
2316      }
2317  }
2318  
2319  static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
2320  {
2321      /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
2322      return arm_feature(env, ARM_FEATURE_M) &&
2323          extract32(address, 20, 12) == 0xe00;
2324  }
2325  
2326  static bool m_is_system_region(CPUARMState *env, uint32_t address)
2327  {
2328      /*
2329       * True if address is in the M profile system region
2330       * 0xe0000000 - 0xffffffff
2331       */
2332      return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
2333  }
2334  
2335  static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2336                                           bool is_secure, bool is_user)
2337  {
2338      /*
2339       * Return true if we should use the default memory map as a
2340       * "background" region if there are no hits against any MPU regions.
2341       */
2342      CPUARMState *env = &cpu->env;
2343  
2344      if (is_user) {
2345          return false;
2346      }
2347  
2348      if (arm_feature(env, ARM_FEATURE_M)) {
2349          return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
2350      }
2351  
2352      if (mmu_idx == ARMMMUIdx_Stage2) {
2353          return false;
2354      }
2355  
2356      return regime_sctlr(env, mmu_idx) & SCTLR_BR;
2357  }
2358  
2359  static bool get_phys_addr_pmsav7(CPUARMState *env,
2360                                   S1Translate *ptw,
2361                                   uint32_t address,
2362                                   MMUAccessType access_type,
2363                                   GetPhysAddrResult *result,
2364                                   ARMMMUFaultInfo *fi)
2365  {
2366      ARMCPU *cpu = env_archcpu(env);
2367      int n;
2368      ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
2369      bool is_user = regime_is_user(env, mmu_idx);
2370      bool secure = arm_space_is_secure(ptw->in_space);
2371  
2372      result->f.phys_addr = address;
2373      result->f.lg_page_size = TARGET_PAGE_BITS;
2374      result->f.prot = 0;
2375  
2376      if (regime_translation_disabled(env, mmu_idx, ptw->in_space) ||
2377          m_is_ppb_region(env, address)) {
2378          /*
2379           * MPU disabled or M profile PPB access: use default memory map.
2380           * The other case which uses the default memory map in the
2381           * v7M ARM ARM pseudocode is exception vector reads from the vector
2382           * table. In QEMU those accesses are done in arm_v7m_load_vector(),
2383           * which always does a direct read using address_space_ldl(), rather
2384           * than going via this function, so we don't need to check that here.
2385           */
2386          get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
2387      } else { /* MPU enabled */
2388          for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
2389              /* region search */
2390              uint32_t base = env->pmsav7.drbar[n];
2391              uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
2392              uint32_t rmask;
2393              bool srdis = false;
2394  
2395              if (!(env->pmsav7.drsr[n] & 0x1)) {
2396                  continue;
2397              }
2398  
2399              if (!rsize) {
2400                  qemu_log_mask(LOG_GUEST_ERROR,
2401                                "DRSR[%d]: Rsize field cannot be 0\n", n);
2402                  continue;
2403              }
2404              rsize++;
2405              rmask = (1ull << rsize) - 1;
2406  
2407              if (base & rmask) {
2408                  qemu_log_mask(LOG_GUEST_ERROR,
2409                                "DRBAR[%d]: 0x%" PRIx32 " misaligned "
2410                                "to DRSR region size, mask = 0x%" PRIx32 "\n",
2411                                n, base, rmask);
2412                  continue;
2413              }
2414  
2415              if (address < base || address > base + rmask) {
2416                  /*
2417                   * Address not in this region. We must check whether the
2418                   * region covers addresses in the same page as our address.
2419                   * In that case we must not report a size that covers the
2420                   * whole page for a subsequent hit against a different MPU
2421                   * region or the background region, because it would result in
2422                   * incorrect TLB hits for subsequent accesses to addresses that
2423                   * are in this MPU region.
2424                   */
2425                  if (ranges_overlap(base, rmask,
2426                                     address & TARGET_PAGE_MASK,
2427                                     TARGET_PAGE_SIZE)) {
2428                      result->f.lg_page_size = 0;
2429                  }
2430                  continue;
2431              }
2432  
2433              /* Region matched */
2434  
2435              if (rsize >= 8) { /* no subregions for regions < 256 bytes */
2436                  int i, snd;
2437                  uint32_t srdis_mask;
2438  
2439                  rsize -= 3; /* sub region size (power of 2) */
2440                  snd = ((address - base) >> rsize) & 0x7;
2441                  srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
2442  
2443                  srdis_mask = srdis ? 0x3 : 0x0;
2444                  for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
2445                      /*
2446                       * This will check in groups of 2, 4 and then 8, whether
2447                       * the subregion bits are consistent. rsize is incremented
2448                       * back up to give the region size, considering consistent
2449                       * adjacent subregions as one region. Stop testing if rsize
2450                       * is already big enough for an entire QEMU page.
2451                       */
2452                      int snd_rounded = snd & ~(i - 1);
2453                      uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
2454                                                       snd_rounded + 8, i);
2455                      if (srdis_mask ^ srdis_multi) {
2456                          break;
2457                      }
2458                      srdis_mask = (srdis_mask << i) | srdis_mask;
2459                      rsize++;
2460                  }
2461              }
2462              if (srdis) {
2463                  continue;
2464              }
2465              if (rsize < TARGET_PAGE_BITS) {
2466                  result->f.lg_page_size = rsize;
2467              }
2468              break;
2469          }
2470  
2471          if (n == -1) { /* no hits */
2472              if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
2473                  /* background fault */
2474                  fi->type = ARMFault_Background;
2475                  return true;
2476              }
2477              get_phys_addr_pmsav7_default(env, mmu_idx, address,
2478                                           &result->f.prot);
2479          } else { /* a MPU hit! */
2480              uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
2481              uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
2482  
2483              if (m_is_system_region(env, address)) {
2484                  /* System space is always execute never */
2485                  xn = 1;
2486              }
2487  
2488              if (is_user) { /* User mode AP bit decoding */
2489                  switch (ap) {
2490                  case 0:
2491                  case 1:
2492                  case 5:
2493                      break; /* no access */
2494                  case 3:
2495                      result->f.prot |= PAGE_WRITE;
2496                      /* fall through */
2497                  case 2:
2498                  case 6:
2499                      result->f.prot |= PAGE_READ | PAGE_EXEC;
2500                      break;
2501                  case 7:
2502                      /* for v7M, same as 6; for R profile a reserved value */
2503                      if (arm_feature(env, ARM_FEATURE_M)) {
2504                          result->f.prot |= PAGE_READ | PAGE_EXEC;
2505                          break;
2506                      }
2507                      /* fall through */
2508                  default:
2509                      qemu_log_mask(LOG_GUEST_ERROR,
2510                                    "DRACR[%d]: Bad value for AP bits: 0x%"
2511                                    PRIx32 "\n", n, ap);
2512                  }
2513              } else { /* Priv. mode AP bits decoding */
2514                  switch (ap) {
2515                  case 0:
2516                      break; /* no access */
2517                  case 1:
2518                  case 2:
2519                  case 3:
2520                      result->f.prot |= PAGE_WRITE;
2521                      /* fall through */
2522                  case 5:
2523                  case 6:
2524                      result->f.prot |= PAGE_READ | PAGE_EXEC;
2525                      break;
2526                  case 7:
2527                      /* for v7M, same as 6; for R profile a reserved value */
2528                      if (arm_feature(env, ARM_FEATURE_M)) {
2529                          result->f.prot |= PAGE_READ | PAGE_EXEC;
2530                          break;
2531                      }
2532                      /* fall through */
2533                  default:
2534                      qemu_log_mask(LOG_GUEST_ERROR,
2535                                    "DRACR[%d]: Bad value for AP bits: 0x%"
2536                                    PRIx32 "\n", n, ap);
2537                  }
2538              }
2539  
2540              /* execute never */
2541              if (xn) {
2542                  result->f.prot &= ~PAGE_EXEC;
2543              }
2544          }
2545      }
2546  
2547      fi->type = ARMFault_Permission;
2548      fi->level = 1;
2549      return !(result->f.prot & (1 << access_type));
2550  }
2551  
2552  static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
2553                               uint32_t secure)
2554  {
2555      if (regime_el(env, mmu_idx) == 2) {
2556          return env->pmsav8.hprbar;
2557      } else {
2558          return env->pmsav8.rbar[secure];
2559      }
2560  }
2561  
2562  static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
2563                               uint32_t secure)
2564  {
2565      if (regime_el(env, mmu_idx) == 2) {
2566          return env->pmsav8.hprlar;
2567      } else {
2568          return env->pmsav8.rlar[secure];
2569      }
2570  }
2571  
2572  bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
2573                         MMUAccessType access_type, ARMMMUIdx mmu_idx,
2574                         bool secure, GetPhysAddrResult *result,
2575                         ARMMMUFaultInfo *fi, uint32_t *mregion)
2576  {
2577      /*
2578       * Perform a PMSAv8 MPU lookup (without also doing the SAU check
2579       * that a full phys-to-virt translation does).
2580       * mregion is (if not NULL) set to the region number which matched,
2581       * or -1 if no region number is returned (MPU off, address did not
2582       * hit a region, address hit in multiple regions).
2583       * If the region hit doesn't cover the entire TARGET_PAGE the address
2584       * is within, then we set the result page_size to 1 to force the
2585       * memory system to use a subpage.
2586       */
2587      ARMCPU *cpu = env_archcpu(env);
2588      bool is_user = regime_is_user(env, mmu_idx);
2589      int n;
2590      int matchregion = -1;
2591      bool hit = false;
2592      uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2593      uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2594      int region_counter;
2595  
2596      if (regime_el(env, mmu_idx) == 2) {
2597          region_counter = cpu->pmsav8r_hdregion;
2598      } else {
2599          region_counter = cpu->pmsav7_dregion;
2600      }
2601  
2602      result->f.lg_page_size = TARGET_PAGE_BITS;
2603      result->f.phys_addr = address;
2604      result->f.prot = 0;
2605      if (mregion) {
2606          *mregion = -1;
2607      }
2608  
2609      if (mmu_idx == ARMMMUIdx_Stage2) {
2610          fi->stage2 = true;
2611      }
2612  
2613      /*
2614       * Unlike the ARM ARM pseudocode, we don't need to check whether this
2615       * was an exception vector read from the vector table (which is always
2616       * done using the default system address map), because those accesses
2617       * are done in arm_v7m_load_vector(), which always does a direct
2618       * read using address_space_ldl(), rather than going via this function.
2619       */
2620      if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) {
2621          /* MPU disabled */
2622          hit = true;
2623      } else if (m_is_ppb_region(env, address)) {
2624          hit = true;
2625      } else {
2626          if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
2627              hit = true;
2628          }
2629  
2630          uint32_t bitmask;
2631          if (arm_feature(env, ARM_FEATURE_M)) {
2632              bitmask = 0x1f;
2633          } else {
2634              bitmask = 0x3f;
2635              fi->level = 0;
2636          }
2637  
2638          for (n = region_counter - 1; n >= 0; n--) {
2639              /* region search */
2640              /*
2641               * Note that the base address is bits [31:x] from the register
2642               * with bits [x-1:0] all zeroes, but the limit address is bits
2643               * [31:x] from the register with bits [x:0] all ones. Where x is
2644               * 5 for Cortex-M and 6 for Cortex-R
2645               */
2646              uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask;
2647              uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask;
2648  
2649              if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) {
2650                  /* Region disabled */
2651                  continue;
2652              }
2653  
2654              if (address < base || address > limit) {
2655                  /*
2656                   * Address not in this region. We must check whether the
2657                   * region covers addresses in the same page as our address.
2658                   * In that case we must not report a size that covers the
2659                   * whole page for a subsequent hit against a different MPU
2660                   * region or the background region, because it would result in
2661                   * incorrect TLB hits for subsequent accesses to addresses that
2662                   * are in this MPU region.
2663                   */
2664                  if (limit >= base &&
2665                      ranges_overlap(base, limit - base + 1,
2666                                     addr_page_base,
2667                                     TARGET_PAGE_SIZE)) {
2668                      result->f.lg_page_size = 0;
2669                  }
2670                  continue;
2671              }
2672  
2673              if (base > addr_page_base || limit < addr_page_limit) {
2674                  result->f.lg_page_size = 0;
2675              }
2676  
2677              if (matchregion != -1) {
2678                  /*
2679                   * Multiple regions match -- always a failure (unlike
2680                   * PMSAv7 where highest-numbered-region wins)
2681                   */
2682                  fi->type = ARMFault_Permission;
2683                  if (arm_feature(env, ARM_FEATURE_M)) {
2684                      fi->level = 1;
2685                  }
2686                  return true;
2687              }
2688  
2689              matchregion = n;
2690              hit = true;
2691          }
2692      }
2693  
2694      if (!hit) {
2695          if (arm_feature(env, ARM_FEATURE_M)) {
2696              fi->type = ARMFault_Background;
2697          } else {
2698              fi->type = ARMFault_Permission;
2699          }
2700          return true;
2701      }
2702  
2703      if (matchregion == -1) {
2704          /* hit using the background region */
2705          get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
2706      } else {
2707          uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion];
2708          uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion];
2709          uint32_t ap = extract32(matched_rbar, 1, 2);
2710          uint32_t xn = extract32(matched_rbar, 0, 1);
2711          bool pxn = false;
2712  
2713          if (arm_feature(env, ARM_FEATURE_V8_1M)) {
2714              pxn = extract32(matched_rlar, 4, 1);
2715          }
2716  
2717          if (m_is_system_region(env, address)) {
2718              /* System space is always execute never */
2719              xn = 1;
2720          }
2721  
2722          if (regime_el(env, mmu_idx) == 2) {
2723              result->f.prot = simple_ap_to_rw_prot_is_user(ap,
2724                                              mmu_idx != ARMMMUIdx_E2);
2725          } else {
2726              result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
2727          }
2728  
2729          if (!arm_feature(env, ARM_FEATURE_M)) {
2730              uint8_t attrindx = extract32(matched_rlar, 1, 3);
2731              uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
2732              uint8_t sh = extract32(matched_rlar, 3, 2);
2733  
2734              if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
2735                  result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) {
2736                  xn = 0x1;
2737              }
2738  
2739              if ((regime_el(env, mmu_idx) == 1) &&
2740                  regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
2741                  pxn = 0x1;
2742              }
2743  
2744              result->cacheattrs.is_s2_format = false;
2745              result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
2746              result->cacheattrs.shareability = sh;
2747          }
2748  
2749          if (result->f.prot && !xn && !(pxn && !is_user)) {
2750              result->f.prot |= PAGE_EXEC;
2751          }
2752  
2753          if (mregion) {
2754              *mregion = matchregion;
2755          }
2756      }
2757  
2758      fi->type = ARMFault_Permission;
2759      if (arm_feature(env, ARM_FEATURE_M)) {
2760          fi->level = 1;
2761      }
2762      return !(result->f.prot & (1 << access_type));
2763  }
2764  
2765  static bool v8m_is_sau_exempt(CPUARMState *env,
2766                                uint32_t address, MMUAccessType access_type)
2767  {
2768      /*
2769       * The architecture specifies that certain address ranges are
2770       * exempt from v8M SAU/IDAU checks.
2771       */
2772      return
2773          (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
2774          (address >= 0xe0000000 && address <= 0xe0002fff) ||
2775          (address >= 0xe000e000 && address <= 0xe000efff) ||
2776          (address >= 0xe002e000 && address <= 0xe002efff) ||
2777          (address >= 0xe0040000 && address <= 0xe0041fff) ||
2778          (address >= 0xe00ff000 && address <= 0xe00fffff);
2779  }
2780  
2781  void v8m_security_lookup(CPUARMState *env, uint32_t address,
2782                           MMUAccessType access_type, ARMMMUIdx mmu_idx,
2783                           bool is_secure, V8M_SAttributes *sattrs)
2784  {
2785      /*
2786       * Look up the security attributes for this address. Compare the
2787       * pseudocode SecurityCheck() function.
2788       * We assume the caller has zero-initialized *sattrs.
2789       */
2790      ARMCPU *cpu = env_archcpu(env);
2791      int r;
2792      bool idau_exempt = false, idau_ns = true, idau_nsc = true;
2793      int idau_region = IREGION_NOTVALID;
2794      uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2795      uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2796  
2797      if (cpu->idau) {
2798          IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
2799          IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
2800  
2801          iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
2802                     &idau_nsc);
2803      }
2804  
2805      if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
2806          /* 0xf0000000..0xffffffff is always S for insn fetches */
2807          return;
2808      }
2809  
2810      if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
2811          sattrs->ns = !is_secure;
2812          return;
2813      }
2814  
2815      if (idau_region != IREGION_NOTVALID) {
2816          sattrs->irvalid = true;
2817          sattrs->iregion = idau_region;
2818      }
2819  
2820      switch (env->sau.ctrl & 3) {
2821      case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
2822          break;
2823      case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
2824          sattrs->ns = true;
2825          break;
2826      default: /* SAU.ENABLE == 1 */
2827          for (r = 0; r < cpu->sau_sregion; r++) {
2828              if (env->sau.rlar[r] & 1) {
2829                  uint32_t base = env->sau.rbar[r] & ~0x1f;
2830                  uint32_t limit = env->sau.rlar[r] | 0x1f;
2831  
2832                  if (base <= address && limit >= address) {
2833                      if (base > addr_page_base || limit < addr_page_limit) {
2834                          sattrs->subpage = true;
2835                      }
2836                      if (sattrs->srvalid) {
2837                          /*
2838                           * If we hit in more than one region then we must report
2839                           * as Secure, not NS-Callable, with no valid region
2840                           * number info.
2841                           */
2842                          sattrs->ns = false;
2843                          sattrs->nsc = false;
2844                          sattrs->sregion = 0;
2845                          sattrs->srvalid = false;
2846                          break;
2847                      } else {
2848                          if (env->sau.rlar[r] & 2) {
2849                              sattrs->nsc = true;
2850                          } else {
2851                              sattrs->ns = true;
2852                          }
2853                          sattrs->srvalid = true;
2854                          sattrs->sregion = r;
2855                      }
2856                  } else {
2857                      /*
2858                       * Address not in this region. We must check whether the
2859                       * region covers addresses in the same page as our address.
2860                       * In that case we must not report a size that covers the
2861                       * whole page for a subsequent hit against a different MPU
2862                       * region or the background region, because it would result
2863                       * in incorrect TLB hits for subsequent accesses to
2864                       * addresses that are in this MPU region.
2865                       */
2866                      if (limit >= base &&
2867                          ranges_overlap(base, limit - base + 1,
2868                                         addr_page_base,
2869                                         TARGET_PAGE_SIZE)) {
2870                          sattrs->subpage = true;
2871                      }
2872                  }
2873              }
2874          }
2875          break;
2876      }
2877  
2878      /*
2879       * The IDAU will override the SAU lookup results if it specifies
2880       * higher security than the SAU does.
2881       */
2882      if (!idau_ns) {
2883          if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
2884              sattrs->ns = false;
2885              sattrs->nsc = idau_nsc;
2886          }
2887      }
2888  }
2889  
2890  static bool get_phys_addr_pmsav8(CPUARMState *env,
2891                                   S1Translate *ptw,
2892                                   uint32_t address,
2893                                   MMUAccessType access_type,
2894                                   GetPhysAddrResult *result,
2895                                   ARMMMUFaultInfo *fi)
2896  {
2897      V8M_SAttributes sattrs = {};
2898      ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
2899      bool secure = arm_space_is_secure(ptw->in_space);
2900      bool ret;
2901  
2902      if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2903          v8m_security_lookup(env, address, access_type, mmu_idx,
2904                              secure, &sattrs);
2905          if (access_type == MMU_INST_FETCH) {
2906              /*
2907               * Instruction fetches always use the MMU bank and the
2908               * transaction attribute determined by the fetch address,
2909               * regardless of CPU state. This is painful for QEMU
2910               * to handle, because it would mean we need to encode
2911               * into the mmu_idx not just the (user, negpri) information
2912               * for the current security state but also that for the
2913               * other security state, which would balloon the number
2914               * of mmu_idx values needed alarmingly.
2915               * Fortunately we can avoid this because it's not actually
2916               * possible to arbitrarily execute code from memory with
2917               * the wrong security attribute: it will always generate
2918               * an exception of some kind or another, apart from the
2919               * special case of an NS CPU executing an SG instruction
2920               * in S&NSC memory. So we always just fail the translation
2921               * here and sort things out in the exception handler
2922               * (including possibly emulating an SG instruction).
2923               */
2924              if (sattrs.ns != !secure) {
2925                  if (sattrs.nsc) {
2926                      fi->type = ARMFault_QEMU_NSCExec;
2927                  } else {
2928                      fi->type = ARMFault_QEMU_SFault;
2929                  }
2930                  result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2931                  result->f.phys_addr = address;
2932                  result->f.prot = 0;
2933                  return true;
2934              }
2935          } else {
2936              /*
2937               * For data accesses we always use the MMU bank indicated
2938               * by the current CPU state, but the security attributes
2939               * might downgrade a secure access to nonsecure.
2940               */
2941              if (sattrs.ns) {
2942                  result->f.attrs.secure = false;
2943                  result->f.attrs.space = ARMSS_NonSecure;
2944              } else if (!secure) {
2945                  /*
2946                   * NS access to S memory must fault.
2947                   * Architecturally we should first check whether the
2948                   * MPU information for this address indicates that we
2949                   * are doing an unaligned access to Device memory, which
2950                   * should generate a UsageFault instead. QEMU does not
2951                   * currently check for that kind of unaligned access though.
2952                   * If we added it we would need to do so as a special case
2953                   * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2954                   */
2955                  fi->type = ARMFault_QEMU_SFault;
2956                  result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2957                  result->f.phys_addr = address;
2958                  result->f.prot = 0;
2959                  return true;
2960              }
2961          }
2962      }
2963  
2964      ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2965                              result, fi, NULL);
2966      if (sattrs.subpage) {
2967          result->f.lg_page_size = 0;
2968      }
2969      return ret;
2970  }
2971  
2972  /*
2973   * Translate from the 4-bit stage 2 representation of
2974   * memory attributes (without cache-allocation hints) to
2975   * the 8-bit representation of the stage 1 MAIR registers
2976   * (which includes allocation hints).
2977   *
2978   * ref: shared/translation/attrs/S2AttrDecode()
2979   *      .../S2ConvertAttrsHints()
2980   */
2981  static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
2982  {
2983      uint8_t hiattr = extract32(s2attrs, 2, 2);
2984      uint8_t loattr = extract32(s2attrs, 0, 2);
2985      uint8_t hihint = 0, lohint = 0;
2986  
2987      if (hiattr != 0) { /* normal memory */
2988          if (hcr & HCR_CD) { /* cache disabled */
2989              hiattr = loattr = 1; /* non-cacheable */
2990          } else {
2991              if (hiattr != 1) { /* Write-through or write-back */
2992                  hihint = 3; /* RW allocate */
2993              }
2994              if (loattr != 1) { /* Write-through or write-back */
2995                  lohint = 3; /* RW allocate */
2996              }
2997          }
2998      }
2999  
3000      return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
3001  }
3002  
3003  /*
3004   * Combine either inner or outer cacheability attributes for normal
3005   * memory, according to table D4-42 and pseudocode procedure
3006   * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
3007   *
3008   * NB: only stage 1 includes allocation hints (RW bits), leading to
3009   * some asymmetry.
3010   */
3011  static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
3012  {
3013      if (s1 == 4 || s2 == 4) {
3014          /* non-cacheable has precedence */
3015          return 4;
3016      } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
3017          /* stage 1 write-through takes precedence */
3018          return s1;
3019      } else if (extract32(s2, 2, 2) == 2) {
3020          /* stage 2 write-through takes precedence, but the allocation hint
3021           * is still taken from stage 1
3022           */
3023          return (2 << 2) | extract32(s1, 0, 2);
3024      } else { /* write-back */
3025          return s1;
3026      }
3027  }
3028  
3029  /*
3030   * Combine the memory type and cacheability attributes of
3031   * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
3032   * combined attributes in MAIR_EL1 format.
3033   */
3034  static uint8_t combined_attrs_nofwb(uint64_t hcr,
3035                                      ARMCacheAttrs s1, ARMCacheAttrs s2)
3036  {
3037      uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
3038  
3039      if (s2.is_s2_format) {
3040          s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
3041      } else {
3042          s2_mair_attrs = s2.attrs;
3043      }
3044  
3045      s1lo = extract32(s1.attrs, 0, 4);
3046      s2lo = extract32(s2_mair_attrs, 0, 4);
3047      s1hi = extract32(s1.attrs, 4, 4);
3048      s2hi = extract32(s2_mair_attrs, 4, 4);
3049  
3050      /* Combine memory type and cacheability attributes */
3051      if (s1hi == 0 || s2hi == 0) {
3052          /* Device has precedence over normal */
3053          if (s1lo == 0 || s2lo == 0) {
3054              /* nGnRnE has precedence over anything */
3055              ret_attrs = 0;
3056          } else if (s1lo == 4 || s2lo == 4) {
3057              /* non-Reordering has precedence over Reordering */
3058              ret_attrs = 4;  /* nGnRE */
3059          } else if (s1lo == 8 || s2lo == 8) {
3060              /* non-Gathering has precedence over Gathering */
3061              ret_attrs = 8;  /* nGRE */
3062          } else {
3063              ret_attrs = 0xc; /* GRE */
3064          }
3065      } else { /* Normal memory */
3066          /* Outer/inner cacheability combine independently */
3067          ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
3068                    | combine_cacheattr_nibble(s1lo, s2lo);
3069      }
3070      return ret_attrs;
3071  }
3072  
3073  static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
3074  {
3075      /*
3076       * Given the 4 bits specifying the outer or inner cacheability
3077       * in MAIR format, return a value specifying Normal Write-Back,
3078       * with the allocation and transient hints taken from the input
3079       * if the input specified some kind of cacheable attribute.
3080       */
3081      if (attr == 0 || attr == 4) {
3082          /*
3083           * 0 == an UNPREDICTABLE encoding
3084           * 4 == Non-cacheable
3085           * Either way, force Write-Back RW allocate non-transient
3086           */
3087          return 0xf;
3088      }
3089      /* Change WriteThrough to WriteBack, keep allocation and transient hints */
3090      return attr | 4;
3091  }
3092  
3093  /*
3094   * Combine the memory type and cacheability attributes of
3095   * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
3096   * combined attributes in MAIR_EL1 format.
3097   */
3098  static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
3099  {
3100      assert(s2.is_s2_format && !s1.is_s2_format);
3101  
3102      switch (s2.attrs) {
3103      case 7:
3104          /* Use stage 1 attributes */
3105          return s1.attrs;
3106      case 6:
3107          /*
3108           * Force Normal Write-Back. Note that if S1 is Normal cacheable
3109           * then we take the allocation hints from it; otherwise it is
3110           * RW allocate, non-transient.
3111           */
3112          if ((s1.attrs & 0xf0) == 0) {
3113              /* S1 is Device */
3114              return 0xff;
3115          }
3116          /* Need to check the Inner and Outer nibbles separately */
3117          return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
3118              force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
3119      case 5:
3120          /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
3121          if ((s1.attrs & 0xf0) == 0) {
3122              return s1.attrs;
3123          }
3124          return 0x44;
3125      case 0 ... 3:
3126          /* Force Device, of subtype specified by S2 */
3127          return s2.attrs << 2;
3128      default:
3129          /*
3130           * RESERVED values (including RES0 descriptor bit [5] being nonzero);
3131           * arbitrarily force Device.
3132           */
3133          return 0;
3134      }
3135  }
3136  
3137  /*
3138   * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
3139   * and CombineS1S2Desc()
3140   *
3141   * @env:     CPUARMState
3142   * @s1:      Attributes from stage 1 walk
3143   * @s2:      Attributes from stage 2 walk
3144   */
3145  static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
3146                                          ARMCacheAttrs s1, ARMCacheAttrs s2)
3147  {
3148      ARMCacheAttrs ret;
3149      bool tagged = false;
3150  
3151      assert(!s1.is_s2_format);
3152      ret.is_s2_format = false;
3153  
3154      if (s1.attrs == 0xf0) {
3155          tagged = true;
3156          s1.attrs = 0xff;
3157      }
3158  
3159      /* Combine shareability attributes (table D4-43) */
3160      if (s1.shareability == 2 || s2.shareability == 2) {
3161          /* if either are outer-shareable, the result is outer-shareable */
3162          ret.shareability = 2;
3163      } else if (s1.shareability == 3 || s2.shareability == 3) {
3164          /* if either are inner-shareable, the result is inner-shareable */
3165          ret.shareability = 3;
3166      } else {
3167          /* both non-shareable */
3168          ret.shareability = 0;
3169      }
3170  
3171      /* Combine memory type and cacheability attributes */
3172      if (hcr & HCR_FWB) {
3173          ret.attrs = combined_attrs_fwb(s1, s2);
3174      } else {
3175          ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
3176      }
3177  
3178      /*
3179       * Any location for which the resultant memory type is any
3180       * type of Device memory is always treated as Outer Shareable.
3181       * Any location for which the resultant memory type is Normal
3182       * Inner Non-cacheable, Outer Non-cacheable is always treated
3183       * as Outer Shareable.
3184       * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
3185       */
3186      if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
3187          ret.shareability = 2;
3188      }
3189  
3190      /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
3191      if (tagged && ret.attrs == 0xff) {
3192          ret.attrs = 0xf0;
3193      }
3194  
3195      return ret;
3196  }
3197  
3198  /*
3199   * MMU disabled.  S1 addresses within aa64 translation regimes are
3200   * still checked for bounds -- see AArch64.S1DisabledOutput().
3201   */
3202  static bool get_phys_addr_disabled(CPUARMState *env,
3203                                     S1Translate *ptw,
3204                                     target_ulong address,
3205                                     MMUAccessType access_type,
3206                                     GetPhysAddrResult *result,
3207                                     ARMMMUFaultInfo *fi)
3208  {
3209      ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
3210      uint8_t memattr = 0x00;    /* Device nGnRnE */
3211      uint8_t shareability = 0;  /* non-shareable */
3212      int r_el;
3213  
3214      switch (mmu_idx) {
3215      case ARMMMUIdx_Stage2:
3216      case ARMMMUIdx_Stage2_S:
3217      case ARMMMUIdx_Phys_S:
3218      case ARMMMUIdx_Phys_NS:
3219      case ARMMMUIdx_Phys_Root:
3220      case ARMMMUIdx_Phys_Realm:
3221          break;
3222  
3223      default:
3224          r_el = regime_el(env, mmu_idx);
3225          if (arm_el_is_aa64(env, r_el)) {
3226              int pamax = arm_pamax(env_archcpu(env));
3227              uint64_t tcr = env->cp15.tcr_el[r_el];
3228              int addrtop, tbi;
3229  
3230              tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
3231              if (access_type == MMU_INST_FETCH) {
3232                  tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
3233              }
3234              tbi = (tbi >> extract64(address, 55, 1)) & 1;
3235              addrtop = (tbi ? 55 : 63);
3236  
3237              if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
3238                  fi->type = ARMFault_AddressSize;
3239                  fi->level = 0;
3240                  fi->stage2 = false;
3241                  return 1;
3242              }
3243  
3244              /*
3245               * When TBI is disabled, we've just validated that all of the
3246               * bits above PAMax are zero, so logically we only need to
3247               * clear the top byte for TBI.  But it's clearer to follow
3248               * the pseudocode set of addrdesc.paddress.
3249               */
3250              address = extract64(address, 0, 52);
3251          }
3252  
3253          /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
3254          if (r_el == 1) {
3255              uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
3256              if (hcr & HCR_DC) {
3257                  if (hcr & HCR_DCT) {
3258                      memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
3259                  } else {
3260                      memattr = 0xff;  /* Normal, WB, RWA */
3261                  }
3262              }
3263          }
3264          if (memattr == 0) {
3265              if (access_type == MMU_INST_FETCH) {
3266                  if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
3267                      memattr = 0xee;  /* Normal, WT, RA, NT */
3268                  } else {
3269                      memattr = 0x44;  /* Normal, NC, No */
3270                  }
3271              }
3272              shareability = 2; /* outer shareable */
3273          }
3274          result->cacheattrs.is_s2_format = false;
3275          break;
3276      }
3277  
3278      result->f.phys_addr = address;
3279      result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
3280      result->f.lg_page_size = TARGET_PAGE_BITS;
3281      result->cacheattrs.shareability = shareability;
3282      result->cacheattrs.attrs = memattr;
3283      return false;
3284  }
3285  
3286  static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
3287                                     target_ulong address,
3288                                     MMUAccessType access_type,
3289                                     GetPhysAddrResult *result,
3290                                     ARMMMUFaultInfo *fi)
3291  {
3292      hwaddr ipa;
3293      int s1_prot, s1_lgpgsz;
3294      ARMSecuritySpace in_space = ptw->in_space;
3295      bool ret, ipa_secure, s1_guarded;
3296      ARMCacheAttrs cacheattrs1;
3297      ARMSecuritySpace ipa_space;
3298      uint64_t hcr;
3299  
3300      ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
3301  
3302      /* If S1 fails, return early.  */
3303      if (ret) {
3304          return ret;
3305      }
3306  
3307      ipa = result->f.phys_addr;
3308      ipa_secure = result->f.attrs.secure;
3309      ipa_space = result->f.attrs.space;
3310  
3311      ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
3312      ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
3313      ptw->in_space = ipa_space;
3314      ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
3315  
3316      /*
3317       * S1 is done, now do S2 translation.
3318       * Save the stage1 results so that we may merge prot and cacheattrs later.
3319       */
3320      s1_prot = result->f.prot;
3321      s1_lgpgsz = result->f.lg_page_size;
3322      s1_guarded = result->f.extra.arm.guarded;
3323      cacheattrs1 = result->cacheattrs;
3324      memset(result, 0, sizeof(*result));
3325  
3326      ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
3327      fi->s2addr = ipa;
3328  
3329      /* Combine the S1 and S2 perms.  */
3330      result->f.prot &= s1_prot;
3331  
3332      /* If S2 fails, return early.  */
3333      if (ret) {
3334          return ret;
3335      }
3336  
3337      /*
3338       * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
3339       * this means "don't put this in the TLB"; in this case, return a
3340       * result with lg_page_size == 0 to achieve that. Otherwise,
3341       * use the maximum of the S1 & S2 page size, so that invalidation
3342       * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
3343       * we know the combined result permissions etc only cover the minimum
3344       * of the S1 and S2 page size, because we know that the common TLB code
3345       * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
3346       * and passing a larger page size value only affects invalidations.)
3347       */
3348      if (result->f.lg_page_size < TARGET_PAGE_BITS ||
3349          s1_lgpgsz < TARGET_PAGE_BITS) {
3350          result->f.lg_page_size = 0;
3351      } else if (result->f.lg_page_size < s1_lgpgsz) {
3352          result->f.lg_page_size = s1_lgpgsz;
3353      }
3354  
3355      /* Combine the S1 and S2 cache attributes. */
3356      hcr = arm_hcr_el2_eff_secstate(env, in_space);
3357      if (hcr & HCR_DC) {
3358          /*
3359           * HCR.DC forces the first stage attributes to
3360           *  Normal Non-Shareable,
3361           *  Inner Write-Back Read-Allocate Write-Allocate,
3362           *  Outer Write-Back Read-Allocate Write-Allocate.
3363           * Do not overwrite Tagged within attrs.
3364           */
3365          if (cacheattrs1.attrs != 0xf0) {
3366              cacheattrs1.attrs = 0xff;
3367          }
3368          cacheattrs1.shareability = 0;
3369      }
3370      result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
3371                                              result->cacheattrs);
3372  
3373      /* No BTI GP information in stage 2, we just use the S1 value */
3374      result->f.extra.arm.guarded = s1_guarded;
3375  
3376      /*
3377       * Check if IPA translates to secure or non-secure PA space.
3378       * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
3379       */
3380      if (in_space == ARMSS_Secure) {
3381          result->f.attrs.secure =
3382              !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
3383              && (ipa_secure
3384                  || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
3385          result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
3386      }
3387  
3388      return false;
3389  }
3390  
3391  static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
3392                                        target_ulong address,
3393                                        MMUAccessType access_type,
3394                                        GetPhysAddrResult *result,
3395                                        ARMMMUFaultInfo *fi)
3396  {
3397      ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
3398      ARMMMUIdx s1_mmu_idx;
3399  
3400      /*
3401       * The page table entries may downgrade Secure to NonSecure, but
3402       * cannot upgrade a NonSecure translation regime's attributes
3403       * to Secure or Realm.
3404       */
3405      result->f.attrs.space = ptw->in_space;
3406      result->f.attrs.secure = arm_space_is_secure(ptw->in_space);
3407  
3408      switch (mmu_idx) {
3409      case ARMMMUIdx_Phys_S:
3410      case ARMMMUIdx_Phys_NS:
3411      case ARMMMUIdx_Phys_Root:
3412      case ARMMMUIdx_Phys_Realm:
3413          /* Checking Phys early avoids special casing later vs regime_el. */
3414          return get_phys_addr_disabled(env, ptw, address, access_type,
3415                                        result, fi);
3416  
3417      case ARMMMUIdx_Stage1_E0:
3418      case ARMMMUIdx_Stage1_E1:
3419      case ARMMMUIdx_Stage1_E1_PAN:
3420          /*
3421           * First stage lookup uses second stage for ptw; only
3422           * Secure has both S and NS IPA and starts with Stage2_S.
3423           */
3424          ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ?
3425              ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
3426          break;
3427  
3428      case ARMMMUIdx_Stage2:
3429      case ARMMMUIdx_Stage2_S:
3430          /*
3431           * Second stage lookup uses physical for ptw; whether this is S or
3432           * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
3433           * the Secure EL2&0 regime.
3434           */
3435          ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx);
3436          break;
3437  
3438      case ARMMMUIdx_E10_0:
3439          s1_mmu_idx = ARMMMUIdx_Stage1_E0;
3440          goto do_twostage;
3441      case ARMMMUIdx_E10_1:
3442          s1_mmu_idx = ARMMMUIdx_Stage1_E1;
3443          goto do_twostage;
3444      case ARMMMUIdx_E10_1_PAN:
3445          s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
3446      do_twostage:
3447          /*
3448           * Call ourselves recursively to do the stage 1 and then stage 2
3449           * translations if mmu_idx is a two-stage regime, and EL2 present.
3450           * Otherwise, a stage1+stage2 translation is just stage 1.
3451           */
3452          ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
3453          if (arm_feature(env, ARM_FEATURE_EL2) &&
3454              !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
3455              return get_phys_addr_twostage(env, ptw, address, access_type,
3456                                            result, fi);
3457          }
3458          /* fall through */
3459  
3460      default:
3461          /* Single stage uses physical for ptw. */
3462          ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space);
3463          break;
3464      }
3465  
3466      result->f.attrs.user = regime_is_user(env, mmu_idx);
3467  
3468      /*
3469       * Fast Context Switch Extension. This doesn't exist at all in v8.
3470       * In v7 and earlier it affects all stage 1 translations.
3471       */
3472      if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
3473          && !arm_feature(env, ARM_FEATURE_V8)) {
3474          if (regime_el(env, mmu_idx) == 3) {
3475              address += env->cp15.fcseidr_s;
3476          } else {
3477              address += env->cp15.fcseidr_ns;
3478          }
3479      }
3480  
3481      if (arm_feature(env, ARM_FEATURE_PMSA)) {
3482          bool ret;
3483          result->f.lg_page_size = TARGET_PAGE_BITS;
3484  
3485          if (arm_feature(env, ARM_FEATURE_V8)) {
3486              /* PMSAv8 */
3487              ret = get_phys_addr_pmsav8(env, ptw, address, access_type,
3488                                         result, fi);
3489          } else if (arm_feature(env, ARM_FEATURE_V7)) {
3490              /* PMSAv7 */
3491              ret = get_phys_addr_pmsav7(env, ptw, address, access_type,
3492                                         result, fi);
3493          } else {
3494              /* Pre-v7 MPU */
3495              ret = get_phys_addr_pmsav5(env, ptw, address, access_type,
3496                                         result, fi);
3497          }
3498          qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
3499                        " mmu_idx %u -> %s (prot %c%c%c)\n",
3500                        access_type == MMU_DATA_LOAD ? "reading" :
3501                        (access_type == MMU_DATA_STORE ? "writing" : "execute"),
3502                        (uint32_t)address, mmu_idx,
3503                        ret ? "Miss" : "Hit",
3504                        result->f.prot & PAGE_READ ? 'r' : '-',
3505                        result->f.prot & PAGE_WRITE ? 'w' : '-',
3506                        result->f.prot & PAGE_EXEC ? 'x' : '-');
3507  
3508          return ret;
3509      }
3510  
3511      /* Definitely a real MMU, not an MPU */
3512  
3513      if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
3514          return get_phys_addr_disabled(env, ptw, address, access_type,
3515                                        result, fi);
3516      }
3517  
3518      if (regime_using_lpae_format(env, mmu_idx)) {
3519          return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
3520      } else if (arm_feature(env, ARM_FEATURE_V7) ||
3521                 regime_sctlr(env, mmu_idx) & SCTLR_XP) {
3522          return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
3523      } else {
3524          return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
3525      }
3526  }
3527  
3528  static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
3529                                target_ulong address,
3530                                MMUAccessType access_type,
3531                                GetPhysAddrResult *result,
3532                                ARMMMUFaultInfo *fi)
3533  {
3534      if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
3535          return true;
3536      }
3537      if (!granule_protection_check(env, result->f.phys_addr,
3538                                    result->f.attrs.space, fi)) {
3539          fi->type = ARMFault_GPCFOnOutput;
3540          return true;
3541      }
3542      return false;
3543  }
3544  
3545  bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
3546                                      MMUAccessType access_type,
3547                                      ARMMMUIdx mmu_idx, ARMSecuritySpace space,
3548                                      GetPhysAddrResult *result,
3549                                      ARMMMUFaultInfo *fi)
3550  {
3551      S1Translate ptw = {
3552          .in_mmu_idx = mmu_idx,
3553          .in_space = space,
3554      };
3555      return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi);
3556  }
3557  
3558  bool get_phys_addr(CPUARMState *env, target_ulong address,
3559                     MMUAccessType access_type, ARMMMUIdx mmu_idx,
3560                     GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
3561  {
3562      S1Translate ptw = {
3563          .in_mmu_idx = mmu_idx,
3564      };
3565      ARMSecuritySpace ss;
3566  
3567      switch (mmu_idx) {
3568      case ARMMMUIdx_E10_0:
3569      case ARMMMUIdx_E10_1:
3570      case ARMMMUIdx_E10_1_PAN:
3571      case ARMMMUIdx_E20_0:
3572      case ARMMMUIdx_E20_2:
3573      case ARMMMUIdx_E20_2_PAN:
3574      case ARMMMUIdx_Stage1_E0:
3575      case ARMMMUIdx_Stage1_E1:
3576      case ARMMMUIdx_Stage1_E1_PAN:
3577      case ARMMMUIdx_E2:
3578          ss = arm_security_space_below_el3(env);
3579          break;
3580      case ARMMMUIdx_Stage2:
3581          /*
3582           * For Secure EL2, we need this index to be NonSecure;
3583           * otherwise this will already be NonSecure or Realm.
3584           */
3585          ss = arm_security_space_below_el3(env);
3586          if (ss == ARMSS_Secure) {
3587              ss = ARMSS_NonSecure;
3588          }
3589          break;
3590      case ARMMMUIdx_Phys_NS:
3591      case ARMMMUIdx_MPrivNegPri:
3592      case ARMMMUIdx_MUserNegPri:
3593      case ARMMMUIdx_MPriv:
3594      case ARMMMUIdx_MUser:
3595          ss = ARMSS_NonSecure;
3596          break;
3597      case ARMMMUIdx_Stage2_S:
3598      case ARMMMUIdx_Phys_S:
3599      case ARMMMUIdx_MSPrivNegPri:
3600      case ARMMMUIdx_MSUserNegPri:
3601      case ARMMMUIdx_MSPriv:
3602      case ARMMMUIdx_MSUser:
3603          ss = ARMSS_Secure;
3604          break;
3605      case ARMMMUIdx_E3:
3606          if (arm_feature(env, ARM_FEATURE_AARCH64) &&
3607              cpu_isar_feature(aa64_rme, env_archcpu(env))) {
3608              ss = ARMSS_Root;
3609          } else {
3610              ss = ARMSS_Secure;
3611          }
3612          break;
3613      case ARMMMUIdx_Phys_Root:
3614          ss = ARMSS_Root;
3615          break;
3616      case ARMMMUIdx_Phys_Realm:
3617          ss = ARMSS_Realm;
3618          break;
3619      default:
3620          g_assert_not_reached();
3621      }
3622  
3623      ptw.in_space = ss;
3624      return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
3625  }
3626  
3627  hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
3628                                           MemTxAttrs *attrs)
3629  {
3630      ARMCPU *cpu = ARM_CPU(cs);
3631      CPUARMState *env = &cpu->env;
3632      ARMMMUIdx mmu_idx = arm_mmu_idx(env);
3633      ARMSecuritySpace ss = arm_security_space(env);
3634      S1Translate ptw = {
3635          .in_mmu_idx = mmu_idx,
3636          .in_space = ss,
3637          .in_debug = true,
3638      };
3639      GetPhysAddrResult res = {};
3640      ARMMMUFaultInfo fi = {};
3641      bool ret;
3642  
3643      ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
3644      *attrs = res.f.attrs;
3645  
3646      if (ret) {
3647          return -1;
3648      }
3649      return res.f.phys_addr;
3650  }
3651