xref: /openbmc/qemu/target/arm/ptw.c (revision e4c93e44ab103f6c67abd85d620343f61aafa004)
1 /*
2  * ARM page table walking.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/range.h"
12 #include "exec/exec-all.h"
13 #include "cpu.h"
14 #include "internals.h"
15 #include "idau.h"
16 
17 
18 typedef struct S1Translate {
19     ARMMMUIdx in_mmu_idx;
20     bool in_secure;
21     bool in_debug;
22     bool out_secure;
23     bool out_be;
24     hwaddr out_phys;
25     void *out_host;
26 } S1Translate;
27 
28 static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
29                                uint64_t address,
30                                MMUAccessType access_type, bool s1_is_el0,
31                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
32     __attribute__((nonnull));
33 
34 static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
35                                       target_ulong address,
36                                       MMUAccessType access_type,
37                                       GetPhysAddrResult *result,
38                                       ARMMMUFaultInfo *fi)
39     __attribute__((nonnull));
40 
41 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
42 static const uint8_t pamax_map[] = {
43     [0] = 32,
44     [1] = 36,
45     [2] = 40,
46     [3] = 42,
47     [4] = 44,
48     [5] = 48,
49     [6] = 52,
50 };
51 
52 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
53 unsigned int arm_pamax(ARMCPU *cpu)
54 {
55     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
56         unsigned int parange =
57             FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
58 
59         /*
60          * id_aa64mmfr0 is a read-only register so values outside of the
61          * supported mappings can be considered an implementation error.
62          */
63         assert(parange < ARRAY_SIZE(pamax_map));
64         return pamax_map[parange];
65     }
66 
67     /*
68      * In machvirt_init, we call arm_pamax on a cpu that is not fully
69      * initialized, so we can't rely on the propagation done in realize.
70      */
71     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
72         arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
73         /* v7 with LPAE */
74         return 40;
75     }
76     /* Anything else */
77     return 32;
78 }
79 
80 /*
81  * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
82  */
83 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
84 {
85     switch (mmu_idx) {
86     case ARMMMUIdx_E10_0:
87         return ARMMMUIdx_Stage1_E0;
88     case ARMMMUIdx_E10_1:
89         return ARMMMUIdx_Stage1_E1;
90     case ARMMMUIdx_E10_1_PAN:
91         return ARMMMUIdx_Stage1_E1_PAN;
92     default:
93         return mmu_idx;
94     }
95 }
96 
97 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
98 {
99     return stage_1_mmu_idx(arm_mmu_idx(env));
100 }
101 
102 static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
103 {
104     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
105 }
106 
107 /* Return the TTBR associated with this translation regime */
108 static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
109 {
110     if (mmu_idx == ARMMMUIdx_Stage2) {
111         return env->cp15.vttbr_el2;
112     }
113     if (mmu_idx == ARMMMUIdx_Stage2_S) {
114         return env->cp15.vsttbr_el2;
115     }
116     if (ttbrn == 0) {
117         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
118     } else {
119         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
120     }
121 }
122 
123 /* Return true if the specified stage of address translation is disabled */
124 static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
125                                         bool is_secure)
126 {
127     uint64_t hcr_el2;
128 
129     if (arm_feature(env, ARM_FEATURE_M)) {
130         switch (env->v7m.mpu_ctrl[is_secure] &
131                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
132         case R_V7M_MPU_CTRL_ENABLE_MASK:
133             /* Enabled, but not for HardFault and NMI */
134             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
135         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
136             /* Enabled for all cases */
137             return false;
138         case 0:
139         default:
140             /*
141              * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
142              * we warned about that in armv7m_nvic.c when the guest set it.
143              */
144             return true;
145         }
146     }
147 
148     hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
149 
150     switch (mmu_idx) {
151     case ARMMMUIdx_Stage2:
152     case ARMMMUIdx_Stage2_S:
153         /* HCR.DC means HCR.VM behaves as 1 */
154         return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
155 
156     case ARMMMUIdx_E10_0:
157     case ARMMMUIdx_E10_1:
158     case ARMMMUIdx_E10_1_PAN:
159         /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
160         if (hcr_el2 & HCR_TGE) {
161             return true;
162         }
163         break;
164 
165     case ARMMMUIdx_Stage1_E0:
166     case ARMMMUIdx_Stage1_E1:
167     case ARMMMUIdx_Stage1_E1_PAN:
168         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
169         if (hcr_el2 & HCR_DC) {
170             return true;
171         }
172         break;
173 
174     case ARMMMUIdx_E20_0:
175     case ARMMMUIdx_E20_2:
176     case ARMMMUIdx_E20_2_PAN:
177     case ARMMMUIdx_E2:
178     case ARMMMUIdx_E3:
179         break;
180 
181     case ARMMMUIdx_Phys_NS:
182     case ARMMMUIdx_Phys_S:
183         /* No translation for physical address spaces. */
184         return true;
185 
186     default:
187         g_assert_not_reached();
188     }
189 
190     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
191 }
192 
193 static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
194 {
195     /*
196      * For an S1 page table walk, the stage 1 attributes are always
197      * some form of "this is Normal memory". The combined S1+S2
198      * attributes are therefore only Device if stage 2 specifies Device.
199      * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
200      * ie when cacheattrs.attrs bits [3:2] are 0b00.
201      * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
202      * when cacheattrs.attrs bit [2] is 0.
203      */
204     if (hcr & HCR_FWB) {
205         return (attrs & 0x4) == 0;
206     } else {
207         return (attrs & 0xc) == 0;
208     }
209 }
210 
211 /* Translate a S1 pagetable walk through S2 if needed.  */
212 static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
213                              hwaddr addr, ARMMMUFaultInfo *fi)
214 {
215     bool is_secure = ptw->in_secure;
216     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
217     ARMMMUIdx s2_mmu_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
218     bool s2_phys = false;
219     uint8_t pte_attrs;
220     bool pte_secure;
221 
222     if (!arm_mmu_idx_is_stage1_of_2(mmu_idx)
223         || regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
224         s2_mmu_idx = is_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
225         s2_phys = true;
226     }
227 
228     if (unlikely(ptw->in_debug)) {
229         /*
230          * From gdbstub, do not use softmmu so that we don't modify the
231          * state of the cpu at all, including softmmu tlb contents.
232          */
233         if (s2_phys) {
234             ptw->out_phys = addr;
235             pte_attrs = 0;
236             pte_secure = is_secure;
237         } else {
238             S1Translate s2ptw = {
239                 .in_mmu_idx = s2_mmu_idx,
240                 .in_secure = is_secure,
241                 .in_debug = true,
242             };
243             GetPhysAddrResult s2 = { };
244             if (!get_phys_addr_lpae(env, &s2ptw, addr, MMU_DATA_LOAD,
245                                     false, &s2, fi)) {
246                 goto fail;
247             }
248             ptw->out_phys = s2.f.phys_addr;
249             pte_attrs = s2.cacheattrs.attrs;
250             pte_secure = s2.f.attrs.secure;
251         }
252         ptw->out_host = NULL;
253     } else {
254         CPUTLBEntryFull *full;
255         int flags;
256 
257         env->tlb_fi = fi;
258         flags = probe_access_full(env, addr, MMU_DATA_LOAD,
259                                   arm_to_core_mmu_idx(s2_mmu_idx),
260                                   true, &ptw->out_host, &full, 0);
261         env->tlb_fi = NULL;
262 
263         if (unlikely(flags & TLB_INVALID_MASK)) {
264             goto fail;
265         }
266         ptw->out_phys = full->phys_addr;
267         pte_attrs = full->pte_attrs;
268         pte_secure = full->attrs.secure;
269     }
270 
271     if (!s2_phys) {
272         uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
273 
274         if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
275             /*
276              * PTW set and S1 walk touched S2 Device memory:
277              * generate Permission fault.
278              */
279             fi->type = ARMFault_Permission;
280             fi->s2addr = addr;
281             fi->stage2 = true;
282             fi->s1ptw = true;
283             fi->s1ns = !is_secure;
284             return false;
285         }
286     }
287 
288     /* Check if page table walk is to secure or non-secure PA space. */
289     ptw->out_secure = (is_secure
290                        && !(pte_secure
291                             ? env->cp15.vstcr_el2 & VSTCR_SW
292                             : env->cp15.vtcr_el2 & VTCR_NSW));
293     ptw->out_be = regime_translation_big_endian(env, mmu_idx);
294     return true;
295 
296  fail:
297     assert(fi->type != ARMFault_None);
298     fi->s2addr = addr;
299     fi->stage2 = true;
300     fi->s1ptw = true;
301     fi->s1ns = !is_secure;
302     return false;
303 }
304 
305 /* All loads done in the course of a page table walk go through here. */
306 static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
307                             ARMMMUFaultInfo *fi)
308 {
309     CPUState *cs = env_cpu(env);
310     uint32_t data;
311 
312     if (!S1_ptw_translate(env, ptw, addr, fi)) {
313         /* Failure. */
314         assert(fi->s1ptw);
315         return 0;
316     }
317 
318     if (likely(ptw->out_host)) {
319         /* Page tables are in RAM, and we have the host address. */
320         if (ptw->out_be) {
321             data = ldl_be_p(ptw->out_host);
322         } else {
323             data = ldl_le_p(ptw->out_host);
324         }
325     } else {
326         /* Page tables are in MMIO. */
327         MemTxAttrs attrs = { .secure = ptw->out_secure };
328         AddressSpace *as = arm_addressspace(cs, attrs);
329         MemTxResult result = MEMTX_OK;
330 
331         if (ptw->out_be) {
332             data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
333         } else {
334             data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
335         }
336         if (unlikely(result != MEMTX_OK)) {
337             fi->type = ARMFault_SyncExternalOnWalk;
338             fi->ea = arm_extabort_type(result);
339             return 0;
340         }
341     }
342     return data;
343 }
344 
345 static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, hwaddr addr,
346                             ARMMMUFaultInfo *fi)
347 {
348     CPUState *cs = env_cpu(env);
349     uint64_t data;
350 
351     if (!S1_ptw_translate(env, ptw, addr, fi)) {
352         /* Failure. */
353         assert(fi->s1ptw);
354         return 0;
355     }
356 
357     if (likely(ptw->out_host)) {
358         /* Page tables are in RAM, and we have the host address. */
359         if (ptw->out_be) {
360             data = ldq_be_p(ptw->out_host);
361         } else {
362             data = ldq_le_p(ptw->out_host);
363         }
364     } else {
365         /* Page tables are in MMIO. */
366         MemTxAttrs attrs = { .secure = ptw->out_secure };
367         AddressSpace *as = arm_addressspace(cs, attrs);
368         MemTxResult result = MEMTX_OK;
369 
370         if (ptw->out_be) {
371             data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
372         } else {
373             data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
374         }
375         if (unlikely(result != MEMTX_OK)) {
376             fi->type = ARMFault_SyncExternalOnWalk;
377             fi->ea = arm_extabort_type(result);
378             return 0;
379         }
380     }
381     return data;
382 }
383 
384 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
385                                      uint32_t *table, uint32_t address)
386 {
387     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
388     uint64_t tcr = regime_tcr(env, mmu_idx);
389     int maskshift = extract32(tcr, 0, 3);
390     uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
391     uint32_t base_mask;
392 
393     if (address & mask) {
394         if (tcr & TTBCR_PD1) {
395             /* Translation table walk disabled for TTBR1 */
396             return false;
397         }
398         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
399     } else {
400         if (tcr & TTBCR_PD0) {
401             /* Translation table walk disabled for TTBR0 */
402             return false;
403         }
404         base_mask = ~((uint32_t)0x3fffu >> maskshift);
405         *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
406     }
407     *table |= (address >> 18) & 0x3ffc;
408     return true;
409 }
410 
411 /*
412  * Translate section/page access permissions to page R/W protection flags
413  * @env:         CPUARMState
414  * @mmu_idx:     MMU index indicating required translation regime
415  * @ap:          The 3-bit access permissions (AP[2:0])
416  * @domain_prot: The 2-bit domain access permissions
417  */
418 static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
419                          int ap, int domain_prot)
420 {
421     bool is_user = regime_is_user(env, mmu_idx);
422 
423     if (domain_prot == 3) {
424         return PAGE_READ | PAGE_WRITE;
425     }
426 
427     switch (ap) {
428     case 0:
429         if (arm_feature(env, ARM_FEATURE_V7)) {
430             return 0;
431         }
432         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
433         case SCTLR_S:
434             return is_user ? 0 : PAGE_READ;
435         case SCTLR_R:
436             return PAGE_READ;
437         default:
438             return 0;
439         }
440     case 1:
441         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
442     case 2:
443         if (is_user) {
444             return PAGE_READ;
445         } else {
446             return PAGE_READ | PAGE_WRITE;
447         }
448     case 3:
449         return PAGE_READ | PAGE_WRITE;
450     case 4: /* Reserved.  */
451         return 0;
452     case 5:
453         return is_user ? 0 : PAGE_READ;
454     case 6:
455         return PAGE_READ;
456     case 7:
457         if (!arm_feature(env, ARM_FEATURE_V6K)) {
458             return 0;
459         }
460         return PAGE_READ;
461     default:
462         g_assert_not_reached();
463     }
464 }
465 
466 /*
467  * Translate section/page access permissions to page R/W protection flags.
468  * @ap:      The 2-bit simple AP (AP[2:1])
469  * @is_user: TRUE if accessing from PL0
470  */
471 static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
472 {
473     switch (ap) {
474     case 0:
475         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
476     case 1:
477         return PAGE_READ | PAGE_WRITE;
478     case 2:
479         return is_user ? 0 : PAGE_READ;
480     case 3:
481         return PAGE_READ;
482     default:
483         g_assert_not_reached();
484     }
485 }
486 
487 static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
488 {
489     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
490 }
491 
492 static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
493                              uint32_t address, MMUAccessType access_type,
494                              GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
495 {
496     int level = 1;
497     uint32_t table;
498     uint32_t desc;
499     int type;
500     int ap;
501     int domain = 0;
502     int domain_prot;
503     hwaddr phys_addr;
504     uint32_t dacr;
505 
506     /* Pagetable walk.  */
507     /* Lookup l1 descriptor.  */
508     if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
509         /* Section translation fault if page walk is disabled by PD0 or PD1 */
510         fi->type = ARMFault_Translation;
511         goto do_fault;
512     }
513     desc = arm_ldl_ptw(env, ptw, table, fi);
514     if (fi->type != ARMFault_None) {
515         goto do_fault;
516     }
517     type = (desc & 3);
518     domain = (desc >> 5) & 0x0f;
519     if (regime_el(env, ptw->in_mmu_idx) == 1) {
520         dacr = env->cp15.dacr_ns;
521     } else {
522         dacr = env->cp15.dacr_s;
523     }
524     domain_prot = (dacr >> (domain * 2)) & 3;
525     if (type == 0) {
526         /* Section translation fault.  */
527         fi->type = ARMFault_Translation;
528         goto do_fault;
529     }
530     if (type != 2) {
531         level = 2;
532     }
533     if (domain_prot == 0 || domain_prot == 2) {
534         fi->type = ARMFault_Domain;
535         goto do_fault;
536     }
537     if (type == 2) {
538         /* 1Mb section.  */
539         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
540         ap = (desc >> 10) & 3;
541         result->f.lg_page_size = 20; /* 1MB */
542     } else {
543         /* Lookup l2 entry.  */
544         if (type == 1) {
545             /* Coarse pagetable.  */
546             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
547         } else {
548             /* Fine pagetable.  */
549             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
550         }
551         desc = arm_ldl_ptw(env, ptw, table, fi);
552         if (fi->type != ARMFault_None) {
553             goto do_fault;
554         }
555         switch (desc & 3) {
556         case 0: /* Page translation fault.  */
557             fi->type = ARMFault_Translation;
558             goto do_fault;
559         case 1: /* 64k page.  */
560             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
561             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
562             result->f.lg_page_size = 16;
563             break;
564         case 2: /* 4k page.  */
565             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
566             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
567             result->f.lg_page_size = 12;
568             break;
569         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
570             if (type == 1) {
571                 /* ARMv6/XScale extended small page format */
572                 if (arm_feature(env, ARM_FEATURE_XSCALE)
573                     || arm_feature(env, ARM_FEATURE_V6)) {
574                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
575                     result->f.lg_page_size = 12;
576                 } else {
577                     /*
578                      * UNPREDICTABLE in ARMv5; we choose to take a
579                      * page translation fault.
580                      */
581                     fi->type = ARMFault_Translation;
582                     goto do_fault;
583                 }
584             } else {
585                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
586                 result->f.lg_page_size = 10;
587             }
588             ap = (desc >> 4) & 3;
589             break;
590         default:
591             /* Never happens, but compiler isn't smart enough to tell.  */
592             g_assert_not_reached();
593         }
594     }
595     result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
596     result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
597     if (!(result->f.prot & (1 << access_type))) {
598         /* Access permission fault.  */
599         fi->type = ARMFault_Permission;
600         goto do_fault;
601     }
602     result->f.phys_addr = phys_addr;
603     return false;
604 do_fault:
605     fi->domain = domain;
606     fi->level = level;
607     return true;
608 }
609 
610 static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
611                              uint32_t address, MMUAccessType access_type,
612                              GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
613 {
614     ARMCPU *cpu = env_archcpu(env);
615     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
616     int level = 1;
617     uint32_t table;
618     uint32_t desc;
619     uint32_t xn;
620     uint32_t pxn = 0;
621     int type;
622     int ap;
623     int domain = 0;
624     int domain_prot;
625     hwaddr phys_addr;
626     uint32_t dacr;
627     bool ns;
628 
629     /* Pagetable walk.  */
630     /* Lookup l1 descriptor.  */
631     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
632         /* Section translation fault if page walk is disabled by PD0 or PD1 */
633         fi->type = ARMFault_Translation;
634         goto do_fault;
635     }
636     desc = arm_ldl_ptw(env, ptw, table, fi);
637     if (fi->type != ARMFault_None) {
638         goto do_fault;
639     }
640     type = (desc & 3);
641     if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
642         /* Section translation fault, or attempt to use the encoding
643          * which is Reserved on implementations without PXN.
644          */
645         fi->type = ARMFault_Translation;
646         goto do_fault;
647     }
648     if ((type == 1) || !(desc & (1 << 18))) {
649         /* Page or Section.  */
650         domain = (desc >> 5) & 0x0f;
651     }
652     if (regime_el(env, mmu_idx) == 1) {
653         dacr = env->cp15.dacr_ns;
654     } else {
655         dacr = env->cp15.dacr_s;
656     }
657     if (type == 1) {
658         level = 2;
659     }
660     domain_prot = (dacr >> (domain * 2)) & 3;
661     if (domain_prot == 0 || domain_prot == 2) {
662         /* Section or Page domain fault */
663         fi->type = ARMFault_Domain;
664         goto do_fault;
665     }
666     if (type != 1) {
667         if (desc & (1 << 18)) {
668             /* Supersection.  */
669             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
670             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
671             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
672             result->f.lg_page_size = 24;  /* 16MB */
673         } else {
674             /* Section.  */
675             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
676             result->f.lg_page_size = 20;  /* 1MB */
677         }
678         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
679         xn = desc & (1 << 4);
680         pxn = desc & 1;
681         ns = extract32(desc, 19, 1);
682     } else {
683         if (cpu_isar_feature(aa32_pxn, cpu)) {
684             pxn = (desc >> 2) & 1;
685         }
686         ns = extract32(desc, 3, 1);
687         /* Lookup l2 entry.  */
688         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
689         desc = arm_ldl_ptw(env, ptw, table, fi);
690         if (fi->type != ARMFault_None) {
691             goto do_fault;
692         }
693         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
694         switch (desc & 3) {
695         case 0: /* Page translation fault.  */
696             fi->type = ARMFault_Translation;
697             goto do_fault;
698         case 1: /* 64k page.  */
699             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
700             xn = desc & (1 << 15);
701             result->f.lg_page_size = 16;
702             break;
703         case 2: case 3: /* 4k page.  */
704             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
705             xn = desc & 1;
706             result->f.lg_page_size = 12;
707             break;
708         default:
709             /* Never happens, but compiler isn't smart enough to tell.  */
710             g_assert_not_reached();
711         }
712     }
713     if (domain_prot == 3) {
714         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
715     } else {
716         if (pxn && !regime_is_user(env, mmu_idx)) {
717             xn = 1;
718         }
719         if (xn && access_type == MMU_INST_FETCH) {
720             fi->type = ARMFault_Permission;
721             goto do_fault;
722         }
723 
724         if (arm_feature(env, ARM_FEATURE_V6K) &&
725                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
726             /* The simplified model uses AP[0] as an access control bit.  */
727             if ((ap & 1) == 0) {
728                 /* Access flag fault.  */
729                 fi->type = ARMFault_AccessFlag;
730                 goto do_fault;
731             }
732             result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
733         } else {
734             result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
735         }
736         if (result->f.prot && !xn) {
737             result->f.prot |= PAGE_EXEC;
738         }
739         if (!(result->f.prot & (1 << access_type))) {
740             /* Access permission fault.  */
741             fi->type = ARMFault_Permission;
742             goto do_fault;
743         }
744     }
745     if (ns) {
746         /* The NS bit will (as required by the architecture) have no effect if
747          * the CPU doesn't support TZ or this is a non-secure translation
748          * regime, because the attribute will already be non-secure.
749          */
750         result->f.attrs.secure = false;
751     }
752     result->f.phys_addr = phys_addr;
753     return false;
754 do_fault:
755     fi->domain = domain;
756     fi->level = level;
757     return true;
758 }
759 
760 /*
761  * Translate S2 section/page access permissions to protection flags
762  * @env:     CPUARMState
763  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
764  * @xn:      XN (execute-never) bits
765  * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
766  */
767 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
768 {
769     int prot = 0;
770 
771     if (s2ap & 1) {
772         prot |= PAGE_READ;
773     }
774     if (s2ap & 2) {
775         prot |= PAGE_WRITE;
776     }
777 
778     if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
779         switch (xn) {
780         case 0:
781             prot |= PAGE_EXEC;
782             break;
783         case 1:
784             if (s1_is_el0) {
785                 prot |= PAGE_EXEC;
786             }
787             break;
788         case 2:
789             break;
790         case 3:
791             if (!s1_is_el0) {
792                 prot |= PAGE_EXEC;
793             }
794             break;
795         default:
796             g_assert_not_reached();
797         }
798     } else {
799         if (!extract32(xn, 1, 1)) {
800             if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
801                 prot |= PAGE_EXEC;
802             }
803         }
804     }
805     return prot;
806 }
807 
808 /*
809  * Translate section/page access permissions to protection flags
810  * @env:     CPUARMState
811  * @mmu_idx: MMU index indicating required translation regime
812  * @is_aa64: TRUE if AArch64
813  * @ap:      The 2-bit simple AP (AP[2:1])
814  * @ns:      NS (non-secure) bit
815  * @xn:      XN (execute-never) bit
816  * @pxn:     PXN (privileged execute-never) bit
817  */
818 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
819                       int ap, int ns, int xn, int pxn)
820 {
821     bool is_user = regime_is_user(env, mmu_idx);
822     int prot_rw, user_rw;
823     bool have_wxn;
824     int wxn = 0;
825 
826     assert(mmu_idx != ARMMMUIdx_Stage2);
827     assert(mmu_idx != ARMMMUIdx_Stage2_S);
828 
829     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
830     if (is_user) {
831         prot_rw = user_rw;
832     } else {
833         if (user_rw && regime_is_pan(env, mmu_idx)) {
834             /* PAN forbids data accesses but doesn't affect insn fetch */
835             prot_rw = 0;
836         } else {
837             prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
838         }
839     }
840 
841     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
842         return prot_rw;
843     }
844 
845     /* TODO have_wxn should be replaced with
846      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
847      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
848      * compatible processors have EL2, which is required for [U]WXN.
849      */
850     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
851 
852     if (have_wxn) {
853         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
854     }
855 
856     if (is_aa64) {
857         if (regime_has_2_ranges(mmu_idx) && !is_user) {
858             xn = pxn || (user_rw & PAGE_WRITE);
859         }
860     } else if (arm_feature(env, ARM_FEATURE_V7)) {
861         switch (regime_el(env, mmu_idx)) {
862         case 1:
863         case 3:
864             if (is_user) {
865                 xn = xn || !(user_rw & PAGE_READ);
866             } else {
867                 int uwxn = 0;
868                 if (have_wxn) {
869                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
870                 }
871                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
872                      (uwxn && (user_rw & PAGE_WRITE));
873             }
874             break;
875         case 2:
876             break;
877         }
878     } else {
879         xn = wxn = 0;
880     }
881 
882     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
883         return prot_rw;
884     }
885     return prot_rw | PAGE_EXEC;
886 }
887 
888 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
889                                           ARMMMUIdx mmu_idx)
890 {
891     uint64_t tcr = regime_tcr(env, mmu_idx);
892     uint32_t el = regime_el(env, mmu_idx);
893     int select, tsz;
894     bool epd, hpd;
895 
896     assert(mmu_idx != ARMMMUIdx_Stage2_S);
897 
898     if (mmu_idx == ARMMMUIdx_Stage2) {
899         /* VTCR */
900         bool sext = extract32(tcr, 4, 1);
901         bool sign = extract32(tcr, 3, 1);
902 
903         /*
904          * If the sign-extend bit is not the same as t0sz[3], the result
905          * is unpredictable. Flag this as a guest error.
906          */
907         if (sign != sext) {
908             qemu_log_mask(LOG_GUEST_ERROR,
909                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
910         }
911         tsz = sextract32(tcr, 0, 4) + 8;
912         select = 0;
913         hpd = false;
914         epd = false;
915     } else if (el == 2) {
916         /* HTCR */
917         tsz = extract32(tcr, 0, 3);
918         select = 0;
919         hpd = extract64(tcr, 24, 1);
920         epd = false;
921     } else {
922         int t0sz = extract32(tcr, 0, 3);
923         int t1sz = extract32(tcr, 16, 3);
924 
925         if (t1sz == 0) {
926             select = va > (0xffffffffu >> t0sz);
927         } else {
928             /* Note that we will detect errors later.  */
929             select = va >= ~(0xffffffffu >> t1sz);
930         }
931         if (!select) {
932             tsz = t0sz;
933             epd = extract32(tcr, 7, 1);
934             hpd = extract64(tcr, 41, 1);
935         } else {
936             tsz = t1sz;
937             epd = extract32(tcr, 23, 1);
938             hpd = extract64(tcr, 42, 1);
939         }
940         /* For aarch32, hpd0 is not enabled without t2e as well.  */
941         hpd &= extract32(tcr, 6, 1);
942     }
943 
944     return (ARMVAParameters) {
945         .tsz = tsz,
946         .select = select,
947         .epd = epd,
948         .hpd = hpd,
949     };
950 }
951 
952 /*
953  * check_s2_mmu_setup
954  * @cpu:        ARMCPU
955  * @is_aa64:    True if the translation regime is in AArch64 state
956  * @startlevel: Suggested starting level
957  * @inputsize:  Bitsize of IPAs
958  * @stride:     Page-table stride (See the ARM ARM)
959  *
960  * Returns true if the suggested S2 translation parameters are OK and
961  * false otherwise.
962  */
963 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
964                                int inputsize, int stride, int outputsize)
965 {
966     const int grainsize = stride + 3;
967     int startsizecheck;
968 
969     /*
970      * Negative levels are usually not allowed...
971      * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
972      * begins with level -1.  Note that previous feature tests will have
973      * eliminated this combination if it is not enabled.
974      */
975     if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
976         return false;
977     }
978 
979     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
980     if (startsizecheck < 1 || startsizecheck > stride + 4) {
981         return false;
982     }
983 
984     if (is_aa64) {
985         switch (stride) {
986         case 13: /* 64KB Pages.  */
987             if (level == 0 || (level == 1 && outputsize <= 42)) {
988                 return false;
989             }
990             break;
991         case 11: /* 16KB Pages.  */
992             if (level == 0 || (level == 1 && outputsize <= 40)) {
993                 return false;
994             }
995             break;
996         case 9: /* 4KB Pages.  */
997             if (level == 0 && outputsize <= 42) {
998                 return false;
999             }
1000             break;
1001         default:
1002             g_assert_not_reached();
1003         }
1004 
1005         /* Inputsize checks.  */
1006         if (inputsize > outputsize &&
1007             (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
1008             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
1009             return false;
1010         }
1011     } else {
1012         /* AArch32 only supports 4KB pages. Assert on that.  */
1013         assert(stride == 9);
1014 
1015         if (level == 0) {
1016             return false;
1017         }
1018     }
1019     return true;
1020 }
1021 
1022 /**
1023  * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
1024  *
1025  * Returns false if the translation was successful. Otherwise, phys_ptr,
1026  * attrs, prot and page_size may not be filled in, and the populated fsr
1027  * value provides information on why the translation aborted, in the format
1028  * of a long-format DFSR/IFSR fault register, with the following caveat:
1029  * the WnR bit is never set (the caller must do this).
1030  *
1031  * @env: CPUARMState
1032  * @ptw: Current and next stage parameters for the walk.
1033  * @address: virtual address to get physical address for
1034  * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1035  * @s1_is_el0: if @ptw->in_mmu_idx is ARMMMUIdx_Stage2
1036  *             (so this is a stage 2 page table walk),
1037  *             must be true if this is stage 2 of a stage 1+2
1038  *             walk for an EL0 access. If @mmu_idx is anything else,
1039  *             @s1_is_el0 is ignored.
1040  * @result: set on translation success,
1041  * @fi: set to fault info if the translation fails
1042  */
1043 static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
1044                                uint64_t address,
1045                                MMUAccessType access_type, bool s1_is_el0,
1046                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1047 {
1048     ARMCPU *cpu = env_archcpu(env);
1049     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
1050     bool is_secure = ptw->in_secure;
1051     /* Read an LPAE long-descriptor translation table. */
1052     ARMFaultType fault_type = ARMFault_Translation;
1053     uint32_t level;
1054     ARMVAParameters param;
1055     uint64_t ttbr;
1056     hwaddr descaddr, indexmask, indexmask_grainsize;
1057     uint32_t tableattrs;
1058     target_ulong page_size;
1059     uint32_t attrs;
1060     int32_t stride;
1061     int addrsize, inputsize, outputsize;
1062     uint64_t tcr = regime_tcr(env, mmu_idx);
1063     int ap, ns, xn, pxn;
1064     uint32_t el = regime_el(env, mmu_idx);
1065     uint64_t descaddrmask;
1066     bool aarch64 = arm_el_is_aa64(env, el);
1067     bool guarded = false;
1068 
1069     /* TODO: This code does not support shareability levels. */
1070     if (aarch64) {
1071         int ps;
1072 
1073         param = aa64_va_parameters(env, address, mmu_idx,
1074                                    access_type != MMU_INST_FETCH);
1075         level = 0;
1076 
1077         /*
1078          * If TxSZ is programmed to a value larger than the maximum,
1079          * or smaller than the effective minimum, it is IMPLEMENTATION
1080          * DEFINED whether we behave as if the field were programmed
1081          * within bounds, or if a level 0 Translation fault is generated.
1082          *
1083          * With FEAT_LVA, fault on less than minimum becomes required,
1084          * so our choice is to always raise the fault.
1085          */
1086         if (param.tsz_oob) {
1087             fault_type = ARMFault_Translation;
1088             goto do_fault;
1089         }
1090 
1091         addrsize = 64 - 8 * param.tbi;
1092         inputsize = 64 - param.tsz;
1093 
1094         /*
1095          * Bound PS by PARANGE to find the effective output address size.
1096          * ID_AA64MMFR0 is a read-only register so values outside of the
1097          * supported mappings can be considered an implementation error.
1098          */
1099         ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1100         ps = MIN(ps, param.ps);
1101         assert(ps < ARRAY_SIZE(pamax_map));
1102         outputsize = pamax_map[ps];
1103     } else {
1104         param = aa32_va_parameters(env, address, mmu_idx);
1105         level = 1;
1106         addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1107         inputsize = addrsize - param.tsz;
1108         outputsize = 40;
1109     }
1110 
1111     /*
1112      * We determined the region when collecting the parameters, but we
1113      * have not yet validated that the address is valid for the region.
1114      * Extract the top bits and verify that they all match select.
1115      *
1116      * For aa32, if inputsize == addrsize, then we have selected the
1117      * region by exclusion in aa32_va_parameters and there is no more
1118      * validation to do here.
1119      */
1120     if (inputsize < addrsize) {
1121         target_ulong top_bits = sextract64(address, inputsize,
1122                                            addrsize - inputsize);
1123         if (-top_bits != param.select) {
1124             /* The gap between the two regions is a Translation fault */
1125             fault_type = ARMFault_Translation;
1126             goto do_fault;
1127         }
1128     }
1129 
1130     stride = arm_granule_bits(param.gran) - 3;
1131 
1132     /*
1133      * Note that QEMU ignores shareability and cacheability attributes,
1134      * so we don't need to do anything with the SH, ORGN, IRGN fields
1135      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
1136      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1137      * implement any ASID-like capability so we can ignore it (instead
1138      * we will always flush the TLB any time the ASID is changed).
1139      */
1140     ttbr = regime_ttbr(env, mmu_idx, param.select);
1141 
1142     /*
1143      * Here we should have set up all the parameters for the translation:
1144      * inputsize, ttbr, epd, stride, tbi
1145      */
1146 
1147     if (param.epd) {
1148         /*
1149          * Translation table walk disabled => Translation fault on TLB miss
1150          * Note: This is always 0 on 64-bit EL2 and EL3.
1151          */
1152         goto do_fault;
1153     }
1154 
1155     if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
1156         /*
1157          * The starting level depends on the virtual address size (which can
1158          * be up to 48 bits) and the translation granule size. It indicates
1159          * the number of strides (stride bits at a time) needed to
1160          * consume the bits of the input address. In the pseudocode this is:
1161          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
1162          * where their 'inputsize' is our 'inputsize', 'grainsize' is
1163          * our 'stride + 3' and 'stride' is our 'stride'.
1164          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1165          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1166          * = 4 - (inputsize - 4) / stride;
1167          */
1168         level = 4 - (inputsize - 4) / stride;
1169     } else {
1170         /*
1171          * For stage 2 translations the starting level is specified by the
1172          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1173          */
1174         uint32_t sl0 = extract32(tcr, 6, 2);
1175         uint32_t sl2 = extract64(tcr, 33, 1);
1176         uint32_t startlevel;
1177         bool ok;
1178 
1179         /* SL2 is RES0 unless DS=1 & 4kb granule. */
1180         if (param.ds && stride == 9 && sl2) {
1181             if (sl0 != 0) {
1182                 level = 0;
1183                 fault_type = ARMFault_Translation;
1184                 goto do_fault;
1185             }
1186             startlevel = -1;
1187         } else if (!aarch64 || stride == 9) {
1188             /* AArch32 or 4KB pages */
1189             startlevel = 2 - sl0;
1190 
1191             if (cpu_isar_feature(aa64_st, cpu)) {
1192                 startlevel &= 3;
1193             }
1194         } else {
1195             /* 16KB or 64KB pages */
1196             startlevel = 3 - sl0;
1197         }
1198 
1199         /* Check that the starting level is valid. */
1200         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1201                                 inputsize, stride, outputsize);
1202         if (!ok) {
1203             fault_type = ARMFault_Translation;
1204             goto do_fault;
1205         }
1206         level = startlevel;
1207     }
1208 
1209     indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1210     indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1211 
1212     /* Now we can extract the actual base address from the TTBR */
1213     descaddr = extract64(ttbr, 0, 48);
1214 
1215     /*
1216      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1217      *
1218      * Otherwise, if the base address is out of range, raise AddressSizeFault.
1219      * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1220      * but we've just cleared the bits above 47, so simplify the test.
1221      */
1222     if (outputsize > 48) {
1223         descaddr |= extract64(ttbr, 2, 4) << 48;
1224     } else if (descaddr >> outputsize) {
1225         level = 0;
1226         fault_type = ARMFault_AddressSize;
1227         goto do_fault;
1228     }
1229 
1230     /*
1231      * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1232      * and also to mask out CnP (bit 0) which could validly be non-zero.
1233      */
1234     descaddr &= ~indexmask;
1235 
1236     /*
1237      * For AArch32, the address field in the descriptor goes up to bit 39
1238      * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
1239      * or an AddressSize fault is raised.  So for v8 we extract those SBZ
1240      * bits as part of the address, which will be checked via outputsize.
1241      * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1242      * the highest bits of a 52-bit output are placed elsewhere.
1243      */
1244     if (param.ds) {
1245         descaddrmask = MAKE_64BIT_MASK(0, 50);
1246     } else if (arm_feature(env, ARM_FEATURE_V8)) {
1247         descaddrmask = MAKE_64BIT_MASK(0, 48);
1248     } else {
1249         descaddrmask = MAKE_64BIT_MASK(0, 40);
1250     }
1251     descaddrmask &= ~indexmask_grainsize;
1252 
1253     /*
1254      * Secure accesses start with the page table in secure memory and
1255      * can be downgraded to non-secure at any step. Non-secure accesses
1256      * remain non-secure. We implement this by just ORing in the NSTable/NS
1257      * bits at each step.
1258      */
1259     tableattrs = is_secure ? 0 : (1 << 4);
1260     for (;;) {
1261         uint64_t descriptor;
1262         bool nstable;
1263 
1264         descaddr |= (address >> (stride * (4 - level))) & indexmask;
1265         descaddr &= ~7ULL;
1266         nstable = extract32(tableattrs, 4, 1);
1267         ptw->in_secure = !nstable;
1268         descriptor = arm_ldq_ptw(env, ptw, descaddr, fi);
1269         if (fi->type != ARMFault_None) {
1270             goto do_fault;
1271         }
1272 
1273         if (!(descriptor & 1) ||
1274             (!(descriptor & 2) && (level == 3))) {
1275             /* Invalid, or the Reserved level 3 encoding */
1276             goto do_fault;
1277         }
1278 
1279         descaddr = descriptor & descaddrmask;
1280 
1281         /*
1282          * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1283          * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
1284          * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
1285          * raise AddressSizeFault.
1286          */
1287         if (outputsize > 48) {
1288             if (param.ds) {
1289                 descaddr |= extract64(descriptor, 8, 2) << 50;
1290             } else {
1291                 descaddr |= extract64(descriptor, 12, 4) << 48;
1292             }
1293         } else if (descaddr >> outputsize) {
1294             fault_type = ARMFault_AddressSize;
1295             goto do_fault;
1296         }
1297 
1298         if ((descriptor & 2) && (level < 3)) {
1299             /*
1300              * Table entry. The top five bits are attributes which may
1301              * propagate down through lower levels of the table (and
1302              * which are all arranged so that 0 means "no effect", so
1303              * we can gather them up by ORing in the bits at each level).
1304              */
1305             tableattrs |= extract64(descriptor, 59, 5);
1306             level++;
1307             indexmask = indexmask_grainsize;
1308             continue;
1309         }
1310         /*
1311          * Block entry at level 1 or 2, or page entry at level 3.
1312          * These are basically the same thing, although the number
1313          * of bits we pull in from the vaddr varies. Note that although
1314          * descaddrmask masks enough of the low bits of the descriptor
1315          * to give a correct page or table address, the address field
1316          * in a block descriptor is smaller; so we need to explicitly
1317          * clear the lower bits here before ORing in the low vaddr bits.
1318          */
1319         page_size = (1ULL << ((stride * (4 - level)) + 3));
1320         descaddr &= ~(hwaddr)(page_size - 1);
1321         descaddr |= (address & (page_size - 1));
1322         /* Extract attributes from the descriptor */
1323         attrs = extract64(descriptor, 2, 10)
1324             | (extract64(descriptor, 52, 12) << 10);
1325 
1326         if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1327             /* Stage 2 table descriptors do not include any attribute fields */
1328             break;
1329         }
1330         /* Merge in attributes from table descriptors */
1331         attrs |= nstable << 3; /* NS */
1332         guarded = extract64(descriptor, 50, 1);  /* GP */
1333         if (param.hpd) {
1334             /* HPD disables all the table attributes except NSTable.  */
1335             break;
1336         }
1337         attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
1338         /*
1339          * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1340          * means "force PL1 access only", which means forcing AP[1] to 0.
1341          */
1342         attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
1343         attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
1344         break;
1345     }
1346     /*
1347      * Here descaddr is the final physical address, and attributes
1348      * are all in attrs.
1349      */
1350     fault_type = ARMFault_AccessFlag;
1351     if ((attrs & (1 << 8)) == 0) {
1352         /* Access flag */
1353         goto do_fault;
1354     }
1355 
1356     ap = extract32(attrs, 4, 2);
1357 
1358     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1359         ns = mmu_idx == ARMMMUIdx_Stage2;
1360         xn = extract32(attrs, 11, 2);
1361         result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
1362     } else {
1363         ns = extract32(attrs, 3, 1);
1364         xn = extract32(attrs, 12, 1);
1365         pxn = extract32(attrs, 11, 1);
1366         result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
1367     }
1368 
1369     fault_type = ARMFault_Permission;
1370     if (!(result->f.prot & (1 << access_type))) {
1371         goto do_fault;
1372     }
1373 
1374     if (ns) {
1375         /*
1376          * The NS bit will (as required by the architecture) have no effect if
1377          * the CPU doesn't support TZ or this is a non-secure translation
1378          * regime, because the attribute will already be non-secure.
1379          */
1380         result->f.attrs.secure = false;
1381     }
1382 
1383     /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB.  */
1384     if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
1385         result->f.guarded = guarded;
1386     }
1387 
1388     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1389         result->cacheattrs.is_s2_format = true;
1390         result->cacheattrs.attrs = extract32(attrs, 0, 4);
1391     } else {
1392         /* Index into MAIR registers for cache attributes */
1393         uint8_t attrindx = extract32(attrs, 0, 3);
1394         uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1395         assert(attrindx <= 7);
1396         result->cacheattrs.is_s2_format = false;
1397         result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
1398     }
1399 
1400     /*
1401      * For FEAT_LPA2 and effective DS, the SH field in the attributes
1402      * was re-purposed for output address bits.  The SH attribute in
1403      * that case comes from TCR_ELx, which we extracted earlier.
1404      */
1405     if (param.ds) {
1406         result->cacheattrs.shareability = param.sh;
1407     } else {
1408         result->cacheattrs.shareability = extract32(attrs, 6, 2);
1409     }
1410 
1411     result->f.phys_addr = descaddr;
1412     result->f.lg_page_size = ctz64(page_size);
1413     return false;
1414 
1415 do_fault:
1416     fi->type = fault_type;
1417     fi->level = level;
1418     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
1419     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
1420                                mmu_idx == ARMMMUIdx_Stage2_S);
1421     fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1422     return true;
1423 }
1424 
1425 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1426                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1427                                  bool is_secure, GetPhysAddrResult *result,
1428                                  ARMMMUFaultInfo *fi)
1429 {
1430     int n;
1431     uint32_t mask;
1432     uint32_t base;
1433     bool is_user = regime_is_user(env, mmu_idx);
1434 
1435     if (regime_translation_disabled(env, mmu_idx, is_secure)) {
1436         /* MPU disabled.  */
1437         result->f.phys_addr = address;
1438         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1439         return false;
1440     }
1441 
1442     result->f.phys_addr = address;
1443     for (n = 7; n >= 0; n--) {
1444         base = env->cp15.c6_region[n];
1445         if ((base & 1) == 0) {
1446             continue;
1447         }
1448         mask = 1 << ((base >> 1) & 0x1f);
1449         /* Keep this shift separate from the above to avoid an
1450            (undefined) << 32.  */
1451         mask = (mask << 1) - 1;
1452         if (((base ^ address) & ~mask) == 0) {
1453             break;
1454         }
1455     }
1456     if (n < 0) {
1457         fi->type = ARMFault_Background;
1458         return true;
1459     }
1460 
1461     if (access_type == MMU_INST_FETCH) {
1462         mask = env->cp15.pmsav5_insn_ap;
1463     } else {
1464         mask = env->cp15.pmsav5_data_ap;
1465     }
1466     mask = (mask >> (n * 4)) & 0xf;
1467     switch (mask) {
1468     case 0:
1469         fi->type = ARMFault_Permission;
1470         fi->level = 1;
1471         return true;
1472     case 1:
1473         if (is_user) {
1474             fi->type = ARMFault_Permission;
1475             fi->level = 1;
1476             return true;
1477         }
1478         result->f.prot = PAGE_READ | PAGE_WRITE;
1479         break;
1480     case 2:
1481         result->f.prot = PAGE_READ;
1482         if (!is_user) {
1483             result->f.prot |= PAGE_WRITE;
1484         }
1485         break;
1486     case 3:
1487         result->f.prot = PAGE_READ | PAGE_WRITE;
1488         break;
1489     case 5:
1490         if (is_user) {
1491             fi->type = ARMFault_Permission;
1492             fi->level = 1;
1493             return true;
1494         }
1495         result->f.prot = PAGE_READ;
1496         break;
1497     case 6:
1498         result->f.prot = PAGE_READ;
1499         break;
1500     default:
1501         /* Bad permission.  */
1502         fi->type = ARMFault_Permission;
1503         fi->level = 1;
1504         return true;
1505     }
1506     result->f.prot |= PAGE_EXEC;
1507     return false;
1508 }
1509 
1510 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
1511                                          int32_t address, uint8_t *prot)
1512 {
1513     if (!arm_feature(env, ARM_FEATURE_M)) {
1514         *prot = PAGE_READ | PAGE_WRITE;
1515         switch (address) {
1516         case 0xF0000000 ... 0xFFFFFFFF:
1517             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1518                 /* hivecs execing is ok */
1519                 *prot |= PAGE_EXEC;
1520             }
1521             break;
1522         case 0x00000000 ... 0x7FFFFFFF:
1523             *prot |= PAGE_EXEC;
1524             break;
1525         }
1526     } else {
1527         /* Default system address map for M profile cores.
1528          * The architecture specifies which regions are execute-never;
1529          * at the MPU level no other checks are defined.
1530          */
1531         switch (address) {
1532         case 0x00000000 ... 0x1fffffff: /* ROM */
1533         case 0x20000000 ... 0x3fffffff: /* SRAM */
1534         case 0x60000000 ... 0x7fffffff: /* RAM */
1535         case 0x80000000 ... 0x9fffffff: /* RAM */
1536             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1537             break;
1538         case 0x40000000 ... 0x5fffffff: /* Peripheral */
1539         case 0xa0000000 ... 0xbfffffff: /* Device */
1540         case 0xc0000000 ... 0xdfffffff: /* Device */
1541         case 0xe0000000 ... 0xffffffff: /* System */
1542             *prot = PAGE_READ | PAGE_WRITE;
1543             break;
1544         default:
1545             g_assert_not_reached();
1546         }
1547     }
1548 }
1549 
1550 static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1551 {
1552     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1553     return arm_feature(env, ARM_FEATURE_M) &&
1554         extract32(address, 20, 12) == 0xe00;
1555 }
1556 
1557 static bool m_is_system_region(CPUARMState *env, uint32_t address)
1558 {
1559     /*
1560      * True if address is in the M profile system region
1561      * 0xe0000000 - 0xffffffff
1562      */
1563     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1564 }
1565 
1566 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1567                                          bool is_secure, bool is_user)
1568 {
1569     /*
1570      * Return true if we should use the default memory map as a
1571      * "background" region if there are no hits against any MPU regions.
1572      */
1573     CPUARMState *env = &cpu->env;
1574 
1575     if (is_user) {
1576         return false;
1577     }
1578 
1579     if (arm_feature(env, ARM_FEATURE_M)) {
1580         return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1581     } else {
1582         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1583     }
1584 }
1585 
1586 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1587                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1588                                  bool secure, GetPhysAddrResult *result,
1589                                  ARMMMUFaultInfo *fi)
1590 {
1591     ARMCPU *cpu = env_archcpu(env);
1592     int n;
1593     bool is_user = regime_is_user(env, mmu_idx);
1594 
1595     result->f.phys_addr = address;
1596     result->f.lg_page_size = TARGET_PAGE_BITS;
1597     result->f.prot = 0;
1598 
1599     if (regime_translation_disabled(env, mmu_idx, secure) ||
1600         m_is_ppb_region(env, address)) {
1601         /*
1602          * MPU disabled or M profile PPB access: use default memory map.
1603          * The other case which uses the default memory map in the
1604          * v7M ARM ARM pseudocode is exception vector reads from the vector
1605          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1606          * which always does a direct read using address_space_ldl(), rather
1607          * than going via this function, so we don't need to check that here.
1608          */
1609         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
1610     } else { /* MPU enabled */
1611         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1612             /* region search */
1613             uint32_t base = env->pmsav7.drbar[n];
1614             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1615             uint32_t rmask;
1616             bool srdis = false;
1617 
1618             if (!(env->pmsav7.drsr[n] & 0x1)) {
1619                 continue;
1620             }
1621 
1622             if (!rsize) {
1623                 qemu_log_mask(LOG_GUEST_ERROR,
1624                               "DRSR[%d]: Rsize field cannot be 0\n", n);
1625                 continue;
1626             }
1627             rsize++;
1628             rmask = (1ull << rsize) - 1;
1629 
1630             if (base & rmask) {
1631                 qemu_log_mask(LOG_GUEST_ERROR,
1632                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1633                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
1634                               n, base, rmask);
1635                 continue;
1636             }
1637 
1638             if (address < base || address > base + rmask) {
1639                 /*
1640                  * Address not in this region. We must check whether the
1641                  * region covers addresses in the same page as our address.
1642                  * In that case we must not report a size that covers the
1643                  * whole page for a subsequent hit against a different MPU
1644                  * region or the background region, because it would result in
1645                  * incorrect TLB hits for subsequent accesses to addresses that
1646                  * are in this MPU region.
1647                  */
1648                 if (ranges_overlap(base, rmask,
1649                                    address & TARGET_PAGE_MASK,
1650                                    TARGET_PAGE_SIZE)) {
1651                     result->f.lg_page_size = 0;
1652                 }
1653                 continue;
1654             }
1655 
1656             /* Region matched */
1657 
1658             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
1659                 int i, snd;
1660                 uint32_t srdis_mask;
1661 
1662                 rsize -= 3; /* sub region size (power of 2) */
1663                 snd = ((address - base) >> rsize) & 0x7;
1664                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1665 
1666                 srdis_mask = srdis ? 0x3 : 0x0;
1667                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1668                     /*
1669                      * This will check in groups of 2, 4 and then 8, whether
1670                      * the subregion bits are consistent. rsize is incremented
1671                      * back up to give the region size, considering consistent
1672                      * adjacent subregions as one region. Stop testing if rsize
1673                      * is already big enough for an entire QEMU page.
1674                      */
1675                     int snd_rounded = snd & ~(i - 1);
1676                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1677                                                      snd_rounded + 8, i);
1678                     if (srdis_mask ^ srdis_multi) {
1679                         break;
1680                     }
1681                     srdis_mask = (srdis_mask << i) | srdis_mask;
1682                     rsize++;
1683                 }
1684             }
1685             if (srdis) {
1686                 continue;
1687             }
1688             if (rsize < TARGET_PAGE_BITS) {
1689                 result->f.lg_page_size = rsize;
1690             }
1691             break;
1692         }
1693 
1694         if (n == -1) { /* no hits */
1695             if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1696                 /* background fault */
1697                 fi->type = ARMFault_Background;
1698                 return true;
1699             }
1700             get_phys_addr_pmsav7_default(env, mmu_idx, address,
1701                                          &result->f.prot);
1702         } else { /* a MPU hit! */
1703             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1704             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1705 
1706             if (m_is_system_region(env, address)) {
1707                 /* System space is always execute never */
1708                 xn = 1;
1709             }
1710 
1711             if (is_user) { /* User mode AP bit decoding */
1712                 switch (ap) {
1713                 case 0:
1714                 case 1:
1715                 case 5:
1716                     break; /* no access */
1717                 case 3:
1718                     result->f.prot |= PAGE_WRITE;
1719                     /* fall through */
1720                 case 2:
1721                 case 6:
1722                     result->f.prot |= PAGE_READ | PAGE_EXEC;
1723                     break;
1724                 case 7:
1725                     /* for v7M, same as 6; for R profile a reserved value */
1726                     if (arm_feature(env, ARM_FEATURE_M)) {
1727                         result->f.prot |= PAGE_READ | PAGE_EXEC;
1728                         break;
1729                     }
1730                     /* fall through */
1731                 default:
1732                     qemu_log_mask(LOG_GUEST_ERROR,
1733                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1734                                   PRIx32 "\n", n, ap);
1735                 }
1736             } else { /* Priv. mode AP bits decoding */
1737                 switch (ap) {
1738                 case 0:
1739                     break; /* no access */
1740                 case 1:
1741                 case 2:
1742                 case 3:
1743                     result->f.prot |= PAGE_WRITE;
1744                     /* fall through */
1745                 case 5:
1746                 case 6:
1747                     result->f.prot |= PAGE_READ | PAGE_EXEC;
1748                     break;
1749                 case 7:
1750                     /* for v7M, same as 6; for R profile a reserved value */
1751                     if (arm_feature(env, ARM_FEATURE_M)) {
1752                         result->f.prot |= PAGE_READ | PAGE_EXEC;
1753                         break;
1754                     }
1755                     /* fall through */
1756                 default:
1757                     qemu_log_mask(LOG_GUEST_ERROR,
1758                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1759                                   PRIx32 "\n", n, ap);
1760                 }
1761             }
1762 
1763             /* execute never */
1764             if (xn) {
1765                 result->f.prot &= ~PAGE_EXEC;
1766             }
1767         }
1768     }
1769 
1770     fi->type = ARMFault_Permission;
1771     fi->level = 1;
1772     return !(result->f.prot & (1 << access_type));
1773 }
1774 
1775 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1776                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1777                        bool secure, GetPhysAddrResult *result,
1778                        ARMMMUFaultInfo *fi, uint32_t *mregion)
1779 {
1780     /*
1781      * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1782      * that a full phys-to-virt translation does).
1783      * mregion is (if not NULL) set to the region number which matched,
1784      * or -1 if no region number is returned (MPU off, address did not
1785      * hit a region, address hit in multiple regions).
1786      * If the region hit doesn't cover the entire TARGET_PAGE the address
1787      * is within, then we set the result page_size to 1 to force the
1788      * memory system to use a subpage.
1789      */
1790     ARMCPU *cpu = env_archcpu(env);
1791     bool is_user = regime_is_user(env, mmu_idx);
1792     int n;
1793     int matchregion = -1;
1794     bool hit = false;
1795     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1796     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1797 
1798     result->f.lg_page_size = TARGET_PAGE_BITS;
1799     result->f.phys_addr = address;
1800     result->f.prot = 0;
1801     if (mregion) {
1802         *mregion = -1;
1803     }
1804 
1805     /*
1806      * Unlike the ARM ARM pseudocode, we don't need to check whether this
1807      * was an exception vector read from the vector table (which is always
1808      * done using the default system address map), because those accesses
1809      * are done in arm_v7m_load_vector(), which always does a direct
1810      * read using address_space_ldl(), rather than going via this function.
1811      */
1812     if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
1813         hit = true;
1814     } else if (m_is_ppb_region(env, address)) {
1815         hit = true;
1816     } else {
1817         if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1818             hit = true;
1819         }
1820 
1821         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1822             /* region search */
1823             /*
1824              * Note that the base address is bits [31:5] from the register
1825              * with bits [4:0] all zeroes, but the limit address is bits
1826              * [31:5] from the register with bits [4:0] all ones.
1827              */
1828             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
1829             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
1830 
1831             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
1832                 /* Region disabled */
1833                 continue;
1834             }
1835 
1836             if (address < base || address > limit) {
1837                 /*
1838                  * Address not in this region. We must check whether the
1839                  * region covers addresses in the same page as our address.
1840                  * In that case we must not report a size that covers the
1841                  * whole page for a subsequent hit against a different MPU
1842                  * region or the background region, because it would result in
1843                  * incorrect TLB hits for subsequent accesses to addresses that
1844                  * are in this MPU region.
1845                  */
1846                 if (limit >= base &&
1847                     ranges_overlap(base, limit - base + 1,
1848                                    addr_page_base,
1849                                    TARGET_PAGE_SIZE)) {
1850                     result->f.lg_page_size = 0;
1851                 }
1852                 continue;
1853             }
1854 
1855             if (base > addr_page_base || limit < addr_page_limit) {
1856                 result->f.lg_page_size = 0;
1857             }
1858 
1859             if (matchregion != -1) {
1860                 /*
1861                  * Multiple regions match -- always a failure (unlike
1862                  * PMSAv7 where highest-numbered-region wins)
1863                  */
1864                 fi->type = ARMFault_Permission;
1865                 fi->level = 1;
1866                 return true;
1867             }
1868 
1869             matchregion = n;
1870             hit = true;
1871         }
1872     }
1873 
1874     if (!hit) {
1875         /* background fault */
1876         fi->type = ARMFault_Background;
1877         return true;
1878     }
1879 
1880     if (matchregion == -1) {
1881         /* hit using the background region */
1882         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
1883     } else {
1884         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
1885         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
1886         bool pxn = false;
1887 
1888         if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1889             pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
1890         }
1891 
1892         if (m_is_system_region(env, address)) {
1893             /* System space is always execute never */
1894             xn = 1;
1895         }
1896 
1897         result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
1898         if (result->f.prot && !xn && !(pxn && !is_user)) {
1899             result->f.prot |= PAGE_EXEC;
1900         }
1901         /*
1902          * We don't need to look the attribute up in the MAIR0/MAIR1
1903          * registers because that only tells us about cacheability.
1904          */
1905         if (mregion) {
1906             *mregion = matchregion;
1907         }
1908     }
1909 
1910     fi->type = ARMFault_Permission;
1911     fi->level = 1;
1912     return !(result->f.prot & (1 << access_type));
1913 }
1914 
1915 static bool v8m_is_sau_exempt(CPUARMState *env,
1916                               uint32_t address, MMUAccessType access_type)
1917 {
1918     /*
1919      * The architecture specifies that certain address ranges are
1920      * exempt from v8M SAU/IDAU checks.
1921      */
1922     return
1923         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
1924         (address >= 0xe0000000 && address <= 0xe0002fff) ||
1925         (address >= 0xe000e000 && address <= 0xe000efff) ||
1926         (address >= 0xe002e000 && address <= 0xe002efff) ||
1927         (address >= 0xe0040000 && address <= 0xe0041fff) ||
1928         (address >= 0xe00ff000 && address <= 0xe00fffff);
1929 }
1930 
1931 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1932                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1933                          bool is_secure, V8M_SAttributes *sattrs)
1934 {
1935     /*
1936      * Look up the security attributes for this address. Compare the
1937      * pseudocode SecurityCheck() function.
1938      * We assume the caller has zero-initialized *sattrs.
1939      */
1940     ARMCPU *cpu = env_archcpu(env);
1941     int r;
1942     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
1943     int idau_region = IREGION_NOTVALID;
1944     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1945     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1946 
1947     if (cpu->idau) {
1948         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
1949         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
1950 
1951         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
1952                    &idau_nsc);
1953     }
1954 
1955     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
1956         /* 0xf0000000..0xffffffff is always S for insn fetches */
1957         return;
1958     }
1959 
1960     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
1961         sattrs->ns = !is_secure;
1962         return;
1963     }
1964 
1965     if (idau_region != IREGION_NOTVALID) {
1966         sattrs->irvalid = true;
1967         sattrs->iregion = idau_region;
1968     }
1969 
1970     switch (env->sau.ctrl & 3) {
1971     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
1972         break;
1973     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
1974         sattrs->ns = true;
1975         break;
1976     default: /* SAU.ENABLE == 1 */
1977         for (r = 0; r < cpu->sau_sregion; r++) {
1978             if (env->sau.rlar[r] & 1) {
1979                 uint32_t base = env->sau.rbar[r] & ~0x1f;
1980                 uint32_t limit = env->sau.rlar[r] | 0x1f;
1981 
1982                 if (base <= address && limit >= address) {
1983                     if (base > addr_page_base || limit < addr_page_limit) {
1984                         sattrs->subpage = true;
1985                     }
1986                     if (sattrs->srvalid) {
1987                         /*
1988                          * If we hit in more than one region then we must report
1989                          * as Secure, not NS-Callable, with no valid region
1990                          * number info.
1991                          */
1992                         sattrs->ns = false;
1993                         sattrs->nsc = false;
1994                         sattrs->sregion = 0;
1995                         sattrs->srvalid = false;
1996                         break;
1997                     } else {
1998                         if (env->sau.rlar[r] & 2) {
1999                             sattrs->nsc = true;
2000                         } else {
2001                             sattrs->ns = true;
2002                         }
2003                         sattrs->srvalid = true;
2004                         sattrs->sregion = r;
2005                     }
2006                 } else {
2007                     /*
2008                      * Address not in this region. We must check whether the
2009                      * region covers addresses in the same page as our address.
2010                      * In that case we must not report a size that covers the
2011                      * whole page for a subsequent hit against a different MPU
2012                      * region or the background region, because it would result
2013                      * in incorrect TLB hits for subsequent accesses to
2014                      * addresses that are in this MPU region.
2015                      */
2016                     if (limit >= base &&
2017                         ranges_overlap(base, limit - base + 1,
2018                                        addr_page_base,
2019                                        TARGET_PAGE_SIZE)) {
2020                         sattrs->subpage = true;
2021                     }
2022                 }
2023             }
2024         }
2025         break;
2026     }
2027 
2028     /*
2029      * The IDAU will override the SAU lookup results if it specifies
2030      * higher security than the SAU does.
2031      */
2032     if (!idau_ns) {
2033         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
2034             sattrs->ns = false;
2035             sattrs->nsc = idau_nsc;
2036         }
2037     }
2038 }
2039 
2040 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
2041                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
2042                                  bool secure, GetPhysAddrResult *result,
2043                                  ARMMMUFaultInfo *fi)
2044 {
2045     V8M_SAttributes sattrs = {};
2046     bool ret;
2047 
2048     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2049         v8m_security_lookup(env, address, access_type, mmu_idx,
2050                             secure, &sattrs);
2051         if (access_type == MMU_INST_FETCH) {
2052             /*
2053              * Instruction fetches always use the MMU bank and the
2054              * transaction attribute determined by the fetch address,
2055              * regardless of CPU state. This is painful for QEMU
2056              * to handle, because it would mean we need to encode
2057              * into the mmu_idx not just the (user, negpri) information
2058              * for the current security state but also that for the
2059              * other security state, which would balloon the number
2060              * of mmu_idx values needed alarmingly.
2061              * Fortunately we can avoid this because it's not actually
2062              * possible to arbitrarily execute code from memory with
2063              * the wrong security attribute: it will always generate
2064              * an exception of some kind or another, apart from the
2065              * special case of an NS CPU executing an SG instruction
2066              * in S&NSC memory. So we always just fail the translation
2067              * here and sort things out in the exception handler
2068              * (including possibly emulating an SG instruction).
2069              */
2070             if (sattrs.ns != !secure) {
2071                 if (sattrs.nsc) {
2072                     fi->type = ARMFault_QEMU_NSCExec;
2073                 } else {
2074                     fi->type = ARMFault_QEMU_SFault;
2075                 }
2076                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2077                 result->f.phys_addr = address;
2078                 result->f.prot = 0;
2079                 return true;
2080             }
2081         } else {
2082             /*
2083              * For data accesses we always use the MMU bank indicated
2084              * by the current CPU state, but the security attributes
2085              * might downgrade a secure access to nonsecure.
2086              */
2087             if (sattrs.ns) {
2088                 result->f.attrs.secure = false;
2089             } else if (!secure) {
2090                 /*
2091                  * NS access to S memory must fault.
2092                  * Architecturally we should first check whether the
2093                  * MPU information for this address indicates that we
2094                  * are doing an unaligned access to Device memory, which
2095                  * should generate a UsageFault instead. QEMU does not
2096                  * currently check for that kind of unaligned access though.
2097                  * If we added it we would need to do so as a special case
2098                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2099                  */
2100                 fi->type = ARMFault_QEMU_SFault;
2101                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2102                 result->f.phys_addr = address;
2103                 result->f.prot = 0;
2104                 return true;
2105             }
2106         }
2107     }
2108 
2109     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2110                             result, fi, NULL);
2111     if (sattrs.subpage) {
2112         result->f.lg_page_size = 0;
2113     }
2114     return ret;
2115 }
2116 
2117 /*
2118  * Translate from the 4-bit stage 2 representation of
2119  * memory attributes (without cache-allocation hints) to
2120  * the 8-bit representation of the stage 1 MAIR registers
2121  * (which includes allocation hints).
2122  *
2123  * ref: shared/translation/attrs/S2AttrDecode()
2124  *      .../S2ConvertAttrsHints()
2125  */
2126 static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
2127 {
2128     uint8_t hiattr = extract32(s2attrs, 2, 2);
2129     uint8_t loattr = extract32(s2attrs, 0, 2);
2130     uint8_t hihint = 0, lohint = 0;
2131 
2132     if (hiattr != 0) { /* normal memory */
2133         if (hcr & HCR_CD) { /* cache disabled */
2134             hiattr = loattr = 1; /* non-cacheable */
2135         } else {
2136             if (hiattr != 1) { /* Write-through or write-back */
2137                 hihint = 3; /* RW allocate */
2138             }
2139             if (loattr != 1) { /* Write-through or write-back */
2140                 lohint = 3; /* RW allocate */
2141             }
2142         }
2143     }
2144 
2145     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2146 }
2147 
2148 /*
2149  * Combine either inner or outer cacheability attributes for normal
2150  * memory, according to table D4-42 and pseudocode procedure
2151  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2152  *
2153  * NB: only stage 1 includes allocation hints (RW bits), leading to
2154  * some asymmetry.
2155  */
2156 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2157 {
2158     if (s1 == 4 || s2 == 4) {
2159         /* non-cacheable has precedence */
2160         return 4;
2161     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2162         /* stage 1 write-through takes precedence */
2163         return s1;
2164     } else if (extract32(s2, 2, 2) == 2) {
2165         /* stage 2 write-through takes precedence, but the allocation hint
2166          * is still taken from stage 1
2167          */
2168         return (2 << 2) | extract32(s1, 0, 2);
2169     } else { /* write-back */
2170         return s1;
2171     }
2172 }
2173 
2174 /*
2175  * Combine the memory type and cacheability attributes of
2176  * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2177  * combined attributes in MAIR_EL1 format.
2178  */
2179 static uint8_t combined_attrs_nofwb(uint64_t hcr,
2180                                     ARMCacheAttrs s1, ARMCacheAttrs s2)
2181 {
2182     uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2183 
2184     s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
2185 
2186     s1lo = extract32(s1.attrs, 0, 4);
2187     s2lo = extract32(s2_mair_attrs, 0, 4);
2188     s1hi = extract32(s1.attrs, 4, 4);
2189     s2hi = extract32(s2_mair_attrs, 4, 4);
2190 
2191     /* Combine memory type and cacheability attributes */
2192     if (s1hi == 0 || s2hi == 0) {
2193         /* Device has precedence over normal */
2194         if (s1lo == 0 || s2lo == 0) {
2195             /* nGnRnE has precedence over anything */
2196             ret_attrs = 0;
2197         } else if (s1lo == 4 || s2lo == 4) {
2198             /* non-Reordering has precedence over Reordering */
2199             ret_attrs = 4;  /* nGnRE */
2200         } else if (s1lo == 8 || s2lo == 8) {
2201             /* non-Gathering has precedence over Gathering */
2202             ret_attrs = 8;  /* nGRE */
2203         } else {
2204             ret_attrs = 0xc; /* GRE */
2205         }
2206     } else { /* Normal memory */
2207         /* Outer/inner cacheability combine independently */
2208         ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2209                   | combine_cacheattr_nibble(s1lo, s2lo);
2210     }
2211     return ret_attrs;
2212 }
2213 
2214 static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2215 {
2216     /*
2217      * Given the 4 bits specifying the outer or inner cacheability
2218      * in MAIR format, return a value specifying Normal Write-Back,
2219      * with the allocation and transient hints taken from the input
2220      * if the input specified some kind of cacheable attribute.
2221      */
2222     if (attr == 0 || attr == 4) {
2223         /*
2224          * 0 == an UNPREDICTABLE encoding
2225          * 4 == Non-cacheable
2226          * Either way, force Write-Back RW allocate non-transient
2227          */
2228         return 0xf;
2229     }
2230     /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2231     return attr | 4;
2232 }
2233 
2234 /*
2235  * Combine the memory type and cacheability attributes of
2236  * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2237  * combined attributes in MAIR_EL1 format.
2238  */
2239 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
2240 {
2241     switch (s2.attrs) {
2242     case 7:
2243         /* Use stage 1 attributes */
2244         return s1.attrs;
2245     case 6:
2246         /*
2247          * Force Normal Write-Back. Note that if S1 is Normal cacheable
2248          * then we take the allocation hints from it; otherwise it is
2249          * RW allocate, non-transient.
2250          */
2251         if ((s1.attrs & 0xf0) == 0) {
2252             /* S1 is Device */
2253             return 0xff;
2254         }
2255         /* Need to check the Inner and Outer nibbles separately */
2256         return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2257             force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2258     case 5:
2259         /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2260         if ((s1.attrs & 0xf0) == 0) {
2261             return s1.attrs;
2262         }
2263         return 0x44;
2264     case 0 ... 3:
2265         /* Force Device, of subtype specified by S2 */
2266         return s2.attrs << 2;
2267     default:
2268         /*
2269          * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2270          * arbitrarily force Device.
2271          */
2272         return 0;
2273     }
2274 }
2275 
2276 /*
2277  * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2278  * and CombineS1S2Desc()
2279  *
2280  * @env:     CPUARMState
2281  * @s1:      Attributes from stage 1 walk
2282  * @s2:      Attributes from stage 2 walk
2283  */
2284 static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
2285                                         ARMCacheAttrs s1, ARMCacheAttrs s2)
2286 {
2287     ARMCacheAttrs ret;
2288     bool tagged = false;
2289 
2290     assert(s2.is_s2_format && !s1.is_s2_format);
2291     ret.is_s2_format = false;
2292 
2293     if (s1.attrs == 0xf0) {
2294         tagged = true;
2295         s1.attrs = 0xff;
2296     }
2297 
2298     /* Combine shareability attributes (table D4-43) */
2299     if (s1.shareability == 2 || s2.shareability == 2) {
2300         /* if either are outer-shareable, the result is outer-shareable */
2301         ret.shareability = 2;
2302     } else if (s1.shareability == 3 || s2.shareability == 3) {
2303         /* if either are inner-shareable, the result is inner-shareable */
2304         ret.shareability = 3;
2305     } else {
2306         /* both non-shareable */
2307         ret.shareability = 0;
2308     }
2309 
2310     /* Combine memory type and cacheability attributes */
2311     if (hcr & HCR_FWB) {
2312         ret.attrs = combined_attrs_fwb(s1, s2);
2313     } else {
2314         ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
2315     }
2316 
2317     /*
2318      * Any location for which the resultant memory type is any
2319      * type of Device memory is always treated as Outer Shareable.
2320      * Any location for which the resultant memory type is Normal
2321      * Inner Non-cacheable, Outer Non-cacheable is always treated
2322      * as Outer Shareable.
2323      * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2324      */
2325     if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2326         ret.shareability = 2;
2327     }
2328 
2329     /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2330     if (tagged && ret.attrs == 0xff) {
2331         ret.attrs = 0xf0;
2332     }
2333 
2334     return ret;
2335 }
2336 
2337 /*
2338  * MMU disabled.  S1 addresses within aa64 translation regimes are
2339  * still checked for bounds -- see AArch64.S1DisabledOutput().
2340  */
2341 static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
2342                                    MMUAccessType access_type,
2343                                    ARMMMUIdx mmu_idx, bool is_secure,
2344                                    GetPhysAddrResult *result,
2345                                    ARMMMUFaultInfo *fi)
2346 {
2347     uint8_t memattr = 0x00;    /* Device nGnRnE */
2348     uint8_t shareability = 0;  /* non-sharable */
2349     int r_el;
2350 
2351     switch (mmu_idx) {
2352     case ARMMMUIdx_Stage2:
2353     case ARMMMUIdx_Stage2_S:
2354     case ARMMMUIdx_Phys_NS:
2355     case ARMMMUIdx_Phys_S:
2356         break;
2357 
2358     default:
2359         r_el = regime_el(env, mmu_idx);
2360         if (arm_el_is_aa64(env, r_el)) {
2361             int pamax = arm_pamax(env_archcpu(env));
2362             uint64_t tcr = env->cp15.tcr_el[r_el];
2363             int addrtop, tbi;
2364 
2365             tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2366             if (access_type == MMU_INST_FETCH) {
2367                 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2368             }
2369             tbi = (tbi >> extract64(address, 55, 1)) & 1;
2370             addrtop = (tbi ? 55 : 63);
2371 
2372             if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2373                 fi->type = ARMFault_AddressSize;
2374                 fi->level = 0;
2375                 fi->stage2 = false;
2376                 return 1;
2377             }
2378 
2379             /*
2380              * When TBI is disabled, we've just validated that all of the
2381              * bits above PAMax are zero, so logically we only need to
2382              * clear the top byte for TBI.  But it's clearer to follow
2383              * the pseudocode set of addrdesc.paddress.
2384              */
2385             address = extract64(address, 0, 52);
2386         }
2387 
2388         /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2389         if (r_el == 1) {
2390             uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2391             if (hcr & HCR_DC) {
2392                 if (hcr & HCR_DCT) {
2393                     memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
2394                 } else {
2395                     memattr = 0xff;  /* Normal, WB, RWA */
2396                 }
2397             }
2398         }
2399         if (memattr == 0 && access_type == MMU_INST_FETCH) {
2400             if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2401                 memattr = 0xee;  /* Normal, WT, RA, NT */
2402             } else {
2403                 memattr = 0x44;  /* Normal, NC, No */
2404             }
2405             shareability = 2; /* outer sharable */
2406         }
2407         result->cacheattrs.is_s2_format = false;
2408         break;
2409     }
2410 
2411     result->f.phys_addr = address;
2412     result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2413     result->f.lg_page_size = TARGET_PAGE_BITS;
2414     result->cacheattrs.shareability = shareability;
2415     result->cacheattrs.attrs = memattr;
2416     return false;
2417 }
2418 
2419 static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
2420                                    target_ulong address,
2421                                    MMUAccessType access_type,
2422                                    GetPhysAddrResult *result,
2423                                    ARMMMUFaultInfo *fi)
2424 {
2425     hwaddr ipa;
2426     int s1_prot;
2427     bool is_secure = ptw->in_secure;
2428     bool ret, ipa_secure, s2walk_secure;
2429     ARMCacheAttrs cacheattrs1;
2430     bool is_el0;
2431     uint64_t hcr;
2432 
2433     ret = get_phys_addr_with_struct(env, ptw, address, access_type, result, fi);
2434 
2435     /* If S1 fails or S2 is disabled, return early.  */
2436     if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
2437         return ret;
2438     }
2439 
2440     ipa = result->f.phys_addr;
2441     ipa_secure = result->f.attrs.secure;
2442     if (is_secure) {
2443         /* Select TCR based on the NS bit from the S1 walk. */
2444         s2walk_secure = !(ipa_secure
2445                           ? env->cp15.vstcr_el2 & VSTCR_SW
2446                           : env->cp15.vtcr_el2 & VTCR_NSW);
2447     } else {
2448         assert(!ipa_secure);
2449         s2walk_secure = false;
2450     }
2451 
2452     is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
2453     ptw->in_mmu_idx = s2walk_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2454     ptw->in_secure = s2walk_secure;
2455 
2456     /*
2457      * S1 is done, now do S2 translation.
2458      * Save the stage1 results so that we may merge prot and cacheattrs later.
2459      */
2460     s1_prot = result->f.prot;
2461     cacheattrs1 = result->cacheattrs;
2462     memset(result, 0, sizeof(*result));
2463 
2464     ret = get_phys_addr_lpae(env, ptw, ipa, access_type, is_el0, result, fi);
2465     fi->s2addr = ipa;
2466 
2467     /* Combine the S1 and S2 perms.  */
2468     result->f.prot &= s1_prot;
2469 
2470     /* If S2 fails, return early.  */
2471     if (ret) {
2472         return ret;
2473     }
2474 
2475     /* Combine the S1 and S2 cache attributes. */
2476     hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2477     if (hcr & HCR_DC) {
2478         /*
2479          * HCR.DC forces the first stage attributes to
2480          *  Normal Non-Shareable,
2481          *  Inner Write-Back Read-Allocate Write-Allocate,
2482          *  Outer Write-Back Read-Allocate Write-Allocate.
2483          * Do not overwrite Tagged within attrs.
2484          */
2485         if (cacheattrs1.attrs != 0xf0) {
2486             cacheattrs1.attrs = 0xff;
2487         }
2488         cacheattrs1.shareability = 0;
2489     }
2490     result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
2491                                             result->cacheattrs);
2492 
2493     /*
2494      * Check if IPA translates to secure or non-secure PA space.
2495      * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
2496      */
2497     result->f.attrs.secure =
2498         (is_secure
2499          && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
2500          && (ipa_secure
2501              || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
2502 
2503     return false;
2504 }
2505 
2506 static bool get_phys_addr_with_struct(CPUARMState *env, S1Translate *ptw,
2507                                       target_ulong address,
2508                                       MMUAccessType access_type,
2509                                       GetPhysAddrResult *result,
2510                                       ARMMMUFaultInfo *fi)
2511 {
2512     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
2513     ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
2514     bool is_secure = ptw->in_secure;
2515 
2516     if (mmu_idx != s1_mmu_idx) {
2517         /*
2518          * Call ourselves recursively to do the stage 1 and then stage 2
2519          * translations if mmu_idx is a two-stage regime, and EL2 present.
2520          * Otherwise, a stage1+stage2 translation is just stage 1.
2521          */
2522         ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
2523         if (arm_feature(env, ARM_FEATURE_EL2)) {
2524             return get_phys_addr_twostage(env, ptw, address, access_type,
2525                                           result, fi);
2526         }
2527     }
2528 
2529     /*
2530      * The page table entries may downgrade secure to non-secure, but
2531      * cannot upgrade an non-secure translation regime's attributes
2532      * to secure.
2533      */
2534     result->f.attrs.secure = is_secure;
2535     result->f.attrs.user = regime_is_user(env, mmu_idx);
2536 
2537     /*
2538      * Fast Context Switch Extension. This doesn't exist at all in v8.
2539      * In v7 and earlier it affects all stage 1 translations.
2540      */
2541     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2542         && !arm_feature(env, ARM_FEATURE_V8)) {
2543         if (regime_el(env, mmu_idx) == 3) {
2544             address += env->cp15.fcseidr_s;
2545         } else {
2546             address += env->cp15.fcseidr_ns;
2547         }
2548     }
2549 
2550     if (arm_feature(env, ARM_FEATURE_PMSA)) {
2551         bool ret;
2552         result->f.lg_page_size = TARGET_PAGE_BITS;
2553 
2554         if (arm_feature(env, ARM_FEATURE_V8)) {
2555             /* PMSAv8 */
2556             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2557                                        is_secure, result, fi);
2558         } else if (arm_feature(env, ARM_FEATURE_V7)) {
2559             /* PMSAv7 */
2560             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2561                                        is_secure, result, fi);
2562         } else {
2563             /* Pre-v7 MPU */
2564             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2565                                        is_secure, result, fi);
2566         }
2567         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2568                       " mmu_idx %u -> %s (prot %c%c%c)\n",
2569                       access_type == MMU_DATA_LOAD ? "reading" :
2570                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2571                       (uint32_t)address, mmu_idx,
2572                       ret ? "Miss" : "Hit",
2573                       result->f.prot & PAGE_READ ? 'r' : '-',
2574                       result->f.prot & PAGE_WRITE ? 'w' : '-',
2575                       result->f.prot & PAGE_EXEC ? 'x' : '-');
2576 
2577         return ret;
2578     }
2579 
2580     /* Definitely a real MMU, not an MPU */
2581 
2582     if (regime_translation_disabled(env, mmu_idx, is_secure)) {
2583         return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2584                                       is_secure, result, fi);
2585     }
2586 
2587     if (regime_using_lpae_format(env, mmu_idx)) {
2588         return get_phys_addr_lpae(env, ptw, address, access_type, false,
2589                                   result, fi);
2590     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
2591         return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
2592     } else {
2593         return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
2594     }
2595 }
2596 
2597 bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
2598                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
2599                                bool is_secure, GetPhysAddrResult *result,
2600                                ARMMMUFaultInfo *fi)
2601 {
2602     S1Translate ptw = {
2603         .in_mmu_idx = mmu_idx,
2604         .in_secure = is_secure,
2605     };
2606     return get_phys_addr_with_struct(env, &ptw, address, access_type,
2607                                      result, fi);
2608 }
2609 
2610 bool get_phys_addr(CPUARMState *env, target_ulong address,
2611                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
2612                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
2613 {
2614     bool is_secure;
2615 
2616     switch (mmu_idx) {
2617     case ARMMMUIdx_E10_0:
2618     case ARMMMUIdx_E10_1:
2619     case ARMMMUIdx_E10_1_PAN:
2620     case ARMMMUIdx_E20_0:
2621     case ARMMMUIdx_E20_2:
2622     case ARMMMUIdx_E20_2_PAN:
2623     case ARMMMUIdx_Stage1_E0:
2624     case ARMMMUIdx_Stage1_E1:
2625     case ARMMMUIdx_Stage1_E1_PAN:
2626     case ARMMMUIdx_E2:
2627         is_secure = arm_is_secure_below_el3(env);
2628         break;
2629     case ARMMMUIdx_Stage2:
2630     case ARMMMUIdx_Phys_NS:
2631     case ARMMMUIdx_MPrivNegPri:
2632     case ARMMMUIdx_MUserNegPri:
2633     case ARMMMUIdx_MPriv:
2634     case ARMMMUIdx_MUser:
2635         is_secure = false;
2636         break;
2637     case ARMMMUIdx_E3:
2638     case ARMMMUIdx_Stage2_S:
2639     case ARMMMUIdx_Phys_S:
2640     case ARMMMUIdx_MSPrivNegPri:
2641     case ARMMMUIdx_MSUserNegPri:
2642     case ARMMMUIdx_MSPriv:
2643     case ARMMMUIdx_MSUser:
2644         is_secure = true;
2645         break;
2646     default:
2647         g_assert_not_reached();
2648     }
2649     return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
2650                                      is_secure, result, fi);
2651 }
2652 
2653 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2654                                          MemTxAttrs *attrs)
2655 {
2656     ARMCPU *cpu = ARM_CPU(cs);
2657     CPUARMState *env = &cpu->env;
2658     S1Translate ptw = {
2659         .in_mmu_idx = arm_mmu_idx(env),
2660         .in_secure = arm_is_secure(env),
2661         .in_debug = true,
2662     };
2663     GetPhysAddrResult res = {};
2664     ARMMMUFaultInfo fi = {};
2665     bool ret;
2666 
2667     ret = get_phys_addr_with_struct(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
2668     *attrs = res.f.attrs;
2669 
2670     if (ret) {
2671         return -1;
2672     }
2673     return res.f.phys_addr;
2674 }
2675