xref: /openbmc/qemu/target/arm/ptw.c (revision 6c187695)
1 /*
2  * ARM page table walking.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/range.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "idau.h"
15 
16 
17 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
18                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
19                                bool is_secure, bool s1_is_el0,
20                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
21     __attribute__((nonnull));
22 
23 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
24 static const uint8_t pamax_map[] = {
25     [0] = 32,
26     [1] = 36,
27     [2] = 40,
28     [3] = 42,
29     [4] = 44,
30     [5] = 48,
31     [6] = 52,
32 };
33 
34 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
35 unsigned int arm_pamax(ARMCPU *cpu)
36 {
37     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
38         unsigned int parange =
39             FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
40 
41         /*
42          * id_aa64mmfr0 is a read-only register so values outside of the
43          * supported mappings can be considered an implementation error.
44          */
45         assert(parange < ARRAY_SIZE(pamax_map));
46         return pamax_map[parange];
47     }
48 
49     /*
50      * In machvirt_init, we call arm_pamax on a cpu that is not fully
51      * initialized, so we can't rely on the propagation done in realize.
52      */
53     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
54         arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
55         /* v7 with LPAE */
56         return 40;
57     }
58     /* Anything else */
59     return 32;
60 }
61 
62 /*
63  * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
64  */
65 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
66 {
67     switch (mmu_idx) {
68     case ARMMMUIdx_E10_0:
69         return ARMMMUIdx_Stage1_E0;
70     case ARMMMUIdx_E10_1:
71         return ARMMMUIdx_Stage1_E1;
72     case ARMMMUIdx_E10_1_PAN:
73         return ARMMMUIdx_Stage1_E1_PAN;
74     default:
75         return mmu_idx;
76     }
77 }
78 
79 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
80 {
81     return stage_1_mmu_idx(arm_mmu_idx(env));
82 }
83 
84 static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
85 {
86     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
87 }
88 
89 static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
90 {
91     switch (mmu_idx) {
92     case ARMMMUIdx_E20_0:
93     case ARMMMUIdx_Stage1_E0:
94     case ARMMMUIdx_MUser:
95     case ARMMMUIdx_MSUser:
96     case ARMMMUIdx_MUserNegPri:
97     case ARMMMUIdx_MSUserNegPri:
98         return true;
99     default:
100         return false;
101     case ARMMMUIdx_E10_0:
102     case ARMMMUIdx_E10_1:
103     case ARMMMUIdx_E10_1_PAN:
104         g_assert_not_reached();
105     }
106 }
107 
108 /* Return the TTBR associated with this translation regime */
109 static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
110 {
111     if (mmu_idx == ARMMMUIdx_Stage2) {
112         return env->cp15.vttbr_el2;
113     }
114     if (mmu_idx == ARMMMUIdx_Stage2_S) {
115         return env->cp15.vsttbr_el2;
116     }
117     if (ttbrn == 0) {
118         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
119     } else {
120         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
121     }
122 }
123 
124 /* Return true if the specified stage of address translation is disabled */
125 static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
126                                         bool is_secure)
127 {
128     uint64_t hcr_el2;
129 
130     if (arm_feature(env, ARM_FEATURE_M)) {
131         switch (env->v7m.mpu_ctrl[is_secure] &
132                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
133         case R_V7M_MPU_CTRL_ENABLE_MASK:
134             /* Enabled, but not for HardFault and NMI */
135             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
136         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
137             /* Enabled for all cases */
138             return false;
139         case 0:
140         default:
141             /*
142              * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
143              * we warned about that in armv7m_nvic.c when the guest set it.
144              */
145             return true;
146         }
147     }
148 
149     hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
150 
151     switch (mmu_idx) {
152     case ARMMMUIdx_Stage2:
153     case ARMMMUIdx_Stage2_S:
154         /* HCR.DC means HCR.VM behaves as 1 */
155         return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
156 
157     case ARMMMUIdx_E10_0:
158     case ARMMMUIdx_E10_1:
159     case ARMMMUIdx_E10_1_PAN:
160         /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
161         if (hcr_el2 & HCR_TGE) {
162             return true;
163         }
164         break;
165 
166     case ARMMMUIdx_Stage1_E0:
167     case ARMMMUIdx_Stage1_E1:
168     case ARMMMUIdx_Stage1_E1_PAN:
169         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
170         if (hcr_el2 & HCR_DC) {
171             return true;
172         }
173         break;
174 
175     case ARMMMUIdx_E20_0:
176     case ARMMMUIdx_E20_2:
177     case ARMMMUIdx_E20_2_PAN:
178     case ARMMMUIdx_E2:
179     case ARMMMUIdx_E3:
180         break;
181 
182     default:
183         g_assert_not_reached();
184     }
185 
186     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
187 }
188 
189 static bool ptw_attrs_are_device(uint64_t hcr, ARMCacheAttrs cacheattrs)
190 {
191     /*
192      * For an S1 page table walk, the stage 1 attributes are always
193      * some form of "this is Normal memory". The combined S1+S2
194      * attributes are therefore only Device if stage 2 specifies Device.
195      * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
196      * ie when cacheattrs.attrs bits [3:2] are 0b00.
197      * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
198      * when cacheattrs.attrs bit [2] is 0.
199      */
200     assert(cacheattrs.is_s2_format);
201     if (hcr & HCR_FWB) {
202         return (cacheattrs.attrs & 0x4) == 0;
203     } else {
204         return (cacheattrs.attrs & 0xc) == 0;
205     }
206 }
207 
208 /* Translate a S1 pagetable walk through S2 if needed.  */
209 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
210                                hwaddr addr, bool *is_secure_ptr,
211                                ARMMMUFaultInfo *fi)
212 {
213     bool is_secure = *is_secure_ptr;
214     ARMMMUIdx s2_mmu_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
215 
216     if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
217         !regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
218         GetPhysAddrResult s2 = {};
219         uint64_t hcr;
220         int ret;
221 
222         ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx,
223                                  is_secure, false, &s2, fi);
224         if (ret) {
225             assert(fi->type != ARMFault_None);
226             fi->s2addr = addr;
227             fi->stage2 = true;
228             fi->s1ptw = true;
229             fi->s1ns = !is_secure;
230             return ~0;
231         }
232 
233         hcr = arm_hcr_el2_eff_secstate(env, is_secure);
234         if ((hcr & HCR_PTW) && ptw_attrs_are_device(hcr, s2.cacheattrs)) {
235             /*
236              * PTW set and S1 walk touched S2 Device memory:
237              * generate Permission fault.
238              */
239             fi->type = ARMFault_Permission;
240             fi->s2addr = addr;
241             fi->stage2 = true;
242             fi->s1ptw = true;
243             fi->s1ns = !is_secure;
244             return ~0;
245         }
246 
247         if (arm_is_secure_below_el3(env)) {
248             /* Check if page table walk is to secure or non-secure PA space. */
249             if (is_secure) {
250                 is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
251             } else {
252                 is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
253             }
254             *is_secure_ptr = is_secure;
255         } else {
256             assert(!is_secure);
257         }
258 
259         addr = s2.f.phys_addr;
260     }
261     return addr;
262 }
263 
264 /* All loads done in the course of a page table walk go through here. */
265 static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
266                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
267 {
268     CPUState *cs = env_cpu(env);
269     MemTxAttrs attrs = {};
270     MemTxResult result = MEMTX_OK;
271     AddressSpace *as;
272     uint32_t data;
273 
274     addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
275     attrs.secure = is_secure;
276     as = arm_addressspace(cs, attrs);
277     if (fi->s1ptw) {
278         return 0;
279     }
280     if (regime_translation_big_endian(env, mmu_idx)) {
281         data = address_space_ldl_be(as, addr, attrs, &result);
282     } else {
283         data = address_space_ldl_le(as, addr, attrs, &result);
284     }
285     if (result == MEMTX_OK) {
286         return data;
287     }
288     fi->type = ARMFault_SyncExternalOnWalk;
289     fi->ea = arm_extabort_type(result);
290     return 0;
291 }
292 
293 static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
294                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
295 {
296     CPUState *cs = env_cpu(env);
297     MemTxAttrs attrs = {};
298     MemTxResult result = MEMTX_OK;
299     AddressSpace *as;
300     uint64_t data;
301 
302     addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
303     attrs.secure = is_secure;
304     as = arm_addressspace(cs, attrs);
305     if (fi->s1ptw) {
306         return 0;
307     }
308     if (regime_translation_big_endian(env, mmu_idx)) {
309         data = address_space_ldq_be(as, addr, attrs, &result);
310     } else {
311         data = address_space_ldq_le(as, addr, attrs, &result);
312     }
313     if (result == MEMTX_OK) {
314         return data;
315     }
316     fi->type = ARMFault_SyncExternalOnWalk;
317     fi->ea = arm_extabort_type(result);
318     return 0;
319 }
320 
321 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
322                                      uint32_t *table, uint32_t address)
323 {
324     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
325     uint64_t tcr = regime_tcr(env, mmu_idx);
326     int maskshift = extract32(tcr, 0, 3);
327     uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
328     uint32_t base_mask;
329 
330     if (address & mask) {
331         if (tcr & TTBCR_PD1) {
332             /* Translation table walk disabled for TTBR1 */
333             return false;
334         }
335         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
336     } else {
337         if (tcr & TTBCR_PD0) {
338             /* Translation table walk disabled for TTBR0 */
339             return false;
340         }
341         base_mask = ~((uint32_t)0x3fffu >> maskshift);
342         *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
343     }
344     *table |= (address >> 18) & 0x3ffc;
345     return true;
346 }
347 
348 /*
349  * Translate section/page access permissions to page R/W protection flags
350  * @env:         CPUARMState
351  * @mmu_idx:     MMU index indicating required translation regime
352  * @ap:          The 3-bit access permissions (AP[2:0])
353  * @domain_prot: The 2-bit domain access permissions
354  */
355 static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
356                          int ap, int domain_prot)
357 {
358     bool is_user = regime_is_user(env, mmu_idx);
359 
360     if (domain_prot == 3) {
361         return PAGE_READ | PAGE_WRITE;
362     }
363 
364     switch (ap) {
365     case 0:
366         if (arm_feature(env, ARM_FEATURE_V7)) {
367             return 0;
368         }
369         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
370         case SCTLR_S:
371             return is_user ? 0 : PAGE_READ;
372         case SCTLR_R:
373             return PAGE_READ;
374         default:
375             return 0;
376         }
377     case 1:
378         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
379     case 2:
380         if (is_user) {
381             return PAGE_READ;
382         } else {
383             return PAGE_READ | PAGE_WRITE;
384         }
385     case 3:
386         return PAGE_READ | PAGE_WRITE;
387     case 4: /* Reserved.  */
388         return 0;
389     case 5:
390         return is_user ? 0 : PAGE_READ;
391     case 6:
392         return PAGE_READ;
393     case 7:
394         if (!arm_feature(env, ARM_FEATURE_V6K)) {
395             return 0;
396         }
397         return PAGE_READ;
398     default:
399         g_assert_not_reached();
400     }
401 }
402 
403 /*
404  * Translate section/page access permissions to page R/W protection flags.
405  * @ap:      The 2-bit simple AP (AP[2:1])
406  * @is_user: TRUE if accessing from PL0
407  */
408 static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
409 {
410     switch (ap) {
411     case 0:
412         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
413     case 1:
414         return PAGE_READ | PAGE_WRITE;
415     case 2:
416         return is_user ? 0 : PAGE_READ;
417     case 3:
418         return PAGE_READ;
419     default:
420         g_assert_not_reached();
421     }
422 }
423 
424 static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
425 {
426     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
427 }
428 
429 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
430                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
431                              bool is_secure, GetPhysAddrResult *result,
432                              ARMMMUFaultInfo *fi)
433 {
434     int level = 1;
435     uint32_t table;
436     uint32_t desc;
437     int type;
438     int ap;
439     int domain = 0;
440     int domain_prot;
441     hwaddr phys_addr;
442     uint32_t dacr;
443 
444     /* Pagetable walk.  */
445     /* Lookup l1 descriptor.  */
446     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
447         /* Section translation fault if page walk is disabled by PD0 or PD1 */
448         fi->type = ARMFault_Translation;
449         goto do_fault;
450     }
451     desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
452     if (fi->type != ARMFault_None) {
453         goto do_fault;
454     }
455     type = (desc & 3);
456     domain = (desc >> 5) & 0x0f;
457     if (regime_el(env, mmu_idx) == 1) {
458         dacr = env->cp15.dacr_ns;
459     } else {
460         dacr = env->cp15.dacr_s;
461     }
462     domain_prot = (dacr >> (domain * 2)) & 3;
463     if (type == 0) {
464         /* Section translation fault.  */
465         fi->type = ARMFault_Translation;
466         goto do_fault;
467     }
468     if (type != 2) {
469         level = 2;
470     }
471     if (domain_prot == 0 || domain_prot == 2) {
472         fi->type = ARMFault_Domain;
473         goto do_fault;
474     }
475     if (type == 2) {
476         /* 1Mb section.  */
477         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
478         ap = (desc >> 10) & 3;
479         result->f.lg_page_size = 20; /* 1MB */
480     } else {
481         /* Lookup l2 entry.  */
482         if (type == 1) {
483             /* Coarse pagetable.  */
484             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
485         } else {
486             /* Fine pagetable.  */
487             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
488         }
489         desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
490         if (fi->type != ARMFault_None) {
491             goto do_fault;
492         }
493         switch (desc & 3) {
494         case 0: /* Page translation fault.  */
495             fi->type = ARMFault_Translation;
496             goto do_fault;
497         case 1: /* 64k page.  */
498             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
499             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
500             result->f.lg_page_size = 16;
501             break;
502         case 2: /* 4k page.  */
503             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
504             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
505             result->f.lg_page_size = 12;
506             break;
507         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
508             if (type == 1) {
509                 /* ARMv6/XScale extended small page format */
510                 if (arm_feature(env, ARM_FEATURE_XSCALE)
511                     || arm_feature(env, ARM_FEATURE_V6)) {
512                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
513                     result->f.lg_page_size = 12;
514                 } else {
515                     /*
516                      * UNPREDICTABLE in ARMv5; we choose to take a
517                      * page translation fault.
518                      */
519                     fi->type = ARMFault_Translation;
520                     goto do_fault;
521                 }
522             } else {
523                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
524                 result->f.lg_page_size = 10;
525             }
526             ap = (desc >> 4) & 3;
527             break;
528         default:
529             /* Never happens, but compiler isn't smart enough to tell.  */
530             g_assert_not_reached();
531         }
532     }
533     result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
534     result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
535     if (!(result->f.prot & (1 << access_type))) {
536         /* Access permission fault.  */
537         fi->type = ARMFault_Permission;
538         goto do_fault;
539     }
540     result->f.phys_addr = phys_addr;
541     return false;
542 do_fault:
543     fi->domain = domain;
544     fi->level = level;
545     return true;
546 }
547 
548 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
549                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
550                              bool is_secure, GetPhysAddrResult *result,
551                              ARMMMUFaultInfo *fi)
552 {
553     ARMCPU *cpu = env_archcpu(env);
554     int level = 1;
555     uint32_t table;
556     uint32_t desc;
557     uint32_t xn;
558     uint32_t pxn = 0;
559     int type;
560     int ap;
561     int domain = 0;
562     int domain_prot;
563     hwaddr phys_addr;
564     uint32_t dacr;
565     bool ns;
566 
567     /* Pagetable walk.  */
568     /* Lookup l1 descriptor.  */
569     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
570         /* Section translation fault if page walk is disabled by PD0 or PD1 */
571         fi->type = ARMFault_Translation;
572         goto do_fault;
573     }
574     desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
575     if (fi->type != ARMFault_None) {
576         goto do_fault;
577     }
578     type = (desc & 3);
579     if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
580         /* Section translation fault, or attempt to use the encoding
581          * which is Reserved on implementations without PXN.
582          */
583         fi->type = ARMFault_Translation;
584         goto do_fault;
585     }
586     if ((type == 1) || !(desc & (1 << 18))) {
587         /* Page or Section.  */
588         domain = (desc >> 5) & 0x0f;
589     }
590     if (regime_el(env, mmu_idx) == 1) {
591         dacr = env->cp15.dacr_ns;
592     } else {
593         dacr = env->cp15.dacr_s;
594     }
595     if (type == 1) {
596         level = 2;
597     }
598     domain_prot = (dacr >> (domain * 2)) & 3;
599     if (domain_prot == 0 || domain_prot == 2) {
600         /* Section or Page domain fault */
601         fi->type = ARMFault_Domain;
602         goto do_fault;
603     }
604     if (type != 1) {
605         if (desc & (1 << 18)) {
606             /* Supersection.  */
607             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
608             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
609             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
610             result->f.lg_page_size = 24;  /* 16MB */
611         } else {
612             /* Section.  */
613             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
614             result->f.lg_page_size = 20;  /* 1MB */
615         }
616         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
617         xn = desc & (1 << 4);
618         pxn = desc & 1;
619         ns = extract32(desc, 19, 1);
620     } else {
621         if (cpu_isar_feature(aa32_pxn, cpu)) {
622             pxn = (desc >> 2) & 1;
623         }
624         ns = extract32(desc, 3, 1);
625         /* Lookup l2 entry.  */
626         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
627         desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
628         if (fi->type != ARMFault_None) {
629             goto do_fault;
630         }
631         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
632         switch (desc & 3) {
633         case 0: /* Page translation fault.  */
634             fi->type = ARMFault_Translation;
635             goto do_fault;
636         case 1: /* 64k page.  */
637             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
638             xn = desc & (1 << 15);
639             result->f.lg_page_size = 16;
640             break;
641         case 2: case 3: /* 4k page.  */
642             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
643             xn = desc & 1;
644             result->f.lg_page_size = 12;
645             break;
646         default:
647             /* Never happens, but compiler isn't smart enough to tell.  */
648             g_assert_not_reached();
649         }
650     }
651     if (domain_prot == 3) {
652         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
653     } else {
654         if (pxn && !regime_is_user(env, mmu_idx)) {
655             xn = 1;
656         }
657         if (xn && access_type == MMU_INST_FETCH) {
658             fi->type = ARMFault_Permission;
659             goto do_fault;
660         }
661 
662         if (arm_feature(env, ARM_FEATURE_V6K) &&
663                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
664             /* The simplified model uses AP[0] as an access control bit.  */
665             if ((ap & 1) == 0) {
666                 /* Access flag fault.  */
667                 fi->type = ARMFault_AccessFlag;
668                 goto do_fault;
669             }
670             result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
671         } else {
672             result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
673         }
674         if (result->f.prot && !xn) {
675             result->f.prot |= PAGE_EXEC;
676         }
677         if (!(result->f.prot & (1 << access_type))) {
678             /* Access permission fault.  */
679             fi->type = ARMFault_Permission;
680             goto do_fault;
681         }
682     }
683     if (ns) {
684         /* The NS bit will (as required by the architecture) have no effect if
685          * the CPU doesn't support TZ or this is a non-secure translation
686          * regime, because the attribute will already be non-secure.
687          */
688         result->f.attrs.secure = false;
689     }
690     result->f.phys_addr = phys_addr;
691     return false;
692 do_fault:
693     fi->domain = domain;
694     fi->level = level;
695     return true;
696 }
697 
698 /*
699  * Translate S2 section/page access permissions to protection flags
700  * @env:     CPUARMState
701  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
702  * @xn:      XN (execute-never) bits
703  * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
704  */
705 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
706 {
707     int prot = 0;
708 
709     if (s2ap & 1) {
710         prot |= PAGE_READ;
711     }
712     if (s2ap & 2) {
713         prot |= PAGE_WRITE;
714     }
715 
716     if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
717         switch (xn) {
718         case 0:
719             prot |= PAGE_EXEC;
720             break;
721         case 1:
722             if (s1_is_el0) {
723                 prot |= PAGE_EXEC;
724             }
725             break;
726         case 2:
727             break;
728         case 3:
729             if (!s1_is_el0) {
730                 prot |= PAGE_EXEC;
731             }
732             break;
733         default:
734             g_assert_not_reached();
735         }
736     } else {
737         if (!extract32(xn, 1, 1)) {
738             if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
739                 prot |= PAGE_EXEC;
740             }
741         }
742     }
743     return prot;
744 }
745 
746 /*
747  * Translate section/page access permissions to protection flags
748  * @env:     CPUARMState
749  * @mmu_idx: MMU index indicating required translation regime
750  * @is_aa64: TRUE if AArch64
751  * @ap:      The 2-bit simple AP (AP[2:1])
752  * @ns:      NS (non-secure) bit
753  * @xn:      XN (execute-never) bit
754  * @pxn:     PXN (privileged execute-never) bit
755  */
756 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
757                       int ap, int ns, int xn, int pxn)
758 {
759     bool is_user = regime_is_user(env, mmu_idx);
760     int prot_rw, user_rw;
761     bool have_wxn;
762     int wxn = 0;
763 
764     assert(mmu_idx != ARMMMUIdx_Stage2);
765     assert(mmu_idx != ARMMMUIdx_Stage2_S);
766 
767     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
768     if (is_user) {
769         prot_rw = user_rw;
770     } else {
771         if (user_rw && regime_is_pan(env, mmu_idx)) {
772             /* PAN forbids data accesses but doesn't affect insn fetch */
773             prot_rw = 0;
774         } else {
775             prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
776         }
777     }
778 
779     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
780         return prot_rw;
781     }
782 
783     /* TODO have_wxn should be replaced with
784      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
785      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
786      * compatible processors have EL2, which is required for [U]WXN.
787      */
788     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
789 
790     if (have_wxn) {
791         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
792     }
793 
794     if (is_aa64) {
795         if (regime_has_2_ranges(mmu_idx) && !is_user) {
796             xn = pxn || (user_rw & PAGE_WRITE);
797         }
798     } else if (arm_feature(env, ARM_FEATURE_V7)) {
799         switch (regime_el(env, mmu_idx)) {
800         case 1:
801         case 3:
802             if (is_user) {
803                 xn = xn || !(user_rw & PAGE_READ);
804             } else {
805                 int uwxn = 0;
806                 if (have_wxn) {
807                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
808                 }
809                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
810                      (uwxn && (user_rw & PAGE_WRITE));
811             }
812             break;
813         case 2:
814             break;
815         }
816     } else {
817         xn = wxn = 0;
818     }
819 
820     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
821         return prot_rw;
822     }
823     return prot_rw | PAGE_EXEC;
824 }
825 
826 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
827                                           ARMMMUIdx mmu_idx)
828 {
829     uint64_t tcr = regime_tcr(env, mmu_idx);
830     uint32_t el = regime_el(env, mmu_idx);
831     int select, tsz;
832     bool epd, hpd;
833 
834     assert(mmu_idx != ARMMMUIdx_Stage2_S);
835 
836     if (mmu_idx == ARMMMUIdx_Stage2) {
837         /* VTCR */
838         bool sext = extract32(tcr, 4, 1);
839         bool sign = extract32(tcr, 3, 1);
840 
841         /*
842          * If the sign-extend bit is not the same as t0sz[3], the result
843          * is unpredictable. Flag this as a guest error.
844          */
845         if (sign != sext) {
846             qemu_log_mask(LOG_GUEST_ERROR,
847                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
848         }
849         tsz = sextract32(tcr, 0, 4) + 8;
850         select = 0;
851         hpd = false;
852         epd = false;
853     } else if (el == 2) {
854         /* HTCR */
855         tsz = extract32(tcr, 0, 3);
856         select = 0;
857         hpd = extract64(tcr, 24, 1);
858         epd = false;
859     } else {
860         int t0sz = extract32(tcr, 0, 3);
861         int t1sz = extract32(tcr, 16, 3);
862 
863         if (t1sz == 0) {
864             select = va > (0xffffffffu >> t0sz);
865         } else {
866             /* Note that we will detect errors later.  */
867             select = va >= ~(0xffffffffu >> t1sz);
868         }
869         if (!select) {
870             tsz = t0sz;
871             epd = extract32(tcr, 7, 1);
872             hpd = extract64(tcr, 41, 1);
873         } else {
874             tsz = t1sz;
875             epd = extract32(tcr, 23, 1);
876             hpd = extract64(tcr, 42, 1);
877         }
878         /* For aarch32, hpd0 is not enabled without t2e as well.  */
879         hpd &= extract32(tcr, 6, 1);
880     }
881 
882     return (ARMVAParameters) {
883         .tsz = tsz,
884         .select = select,
885         .epd = epd,
886         .hpd = hpd,
887     };
888 }
889 
890 /*
891  * check_s2_mmu_setup
892  * @cpu:        ARMCPU
893  * @is_aa64:    True if the translation regime is in AArch64 state
894  * @startlevel: Suggested starting level
895  * @inputsize:  Bitsize of IPAs
896  * @stride:     Page-table stride (See the ARM ARM)
897  *
898  * Returns true if the suggested S2 translation parameters are OK and
899  * false otherwise.
900  */
901 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
902                                int inputsize, int stride, int outputsize)
903 {
904     const int grainsize = stride + 3;
905     int startsizecheck;
906 
907     /*
908      * Negative levels are usually not allowed...
909      * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
910      * begins with level -1.  Note that previous feature tests will have
911      * eliminated this combination if it is not enabled.
912      */
913     if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
914         return false;
915     }
916 
917     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
918     if (startsizecheck < 1 || startsizecheck > stride + 4) {
919         return false;
920     }
921 
922     if (is_aa64) {
923         switch (stride) {
924         case 13: /* 64KB Pages.  */
925             if (level == 0 || (level == 1 && outputsize <= 42)) {
926                 return false;
927             }
928             break;
929         case 11: /* 16KB Pages.  */
930             if (level == 0 || (level == 1 && outputsize <= 40)) {
931                 return false;
932             }
933             break;
934         case 9: /* 4KB Pages.  */
935             if (level == 0 && outputsize <= 42) {
936                 return false;
937             }
938             break;
939         default:
940             g_assert_not_reached();
941         }
942 
943         /* Inputsize checks.  */
944         if (inputsize > outputsize &&
945             (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
946             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
947             return false;
948         }
949     } else {
950         /* AArch32 only supports 4KB pages. Assert on that.  */
951         assert(stride == 9);
952 
953         if (level == 0) {
954             return false;
955         }
956     }
957     return true;
958 }
959 
960 /**
961  * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
962  *
963  * Returns false if the translation was successful. Otherwise, phys_ptr,
964  * attrs, prot and page_size may not be filled in, and the populated fsr
965  * value provides information on why the translation aborted, in the format
966  * of a long-format DFSR/IFSR fault register, with the following caveat:
967  * the WnR bit is never set (the caller must do this).
968  *
969  * @env: CPUARMState
970  * @address: virtual address to get physical address for
971  * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
972  * @mmu_idx: MMU index indicating required translation regime
973  * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page
974  *             table walk), must be true if this is stage 2 of a stage 1+2
975  *             walk for an EL0 access. If @mmu_idx is anything else,
976  *             @s1_is_el0 is ignored.
977  * @result: set on translation success,
978  * @fi: set to fault info if the translation fails
979  */
980 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
981                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
982                                bool is_secure, bool s1_is_el0,
983                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
984 {
985     ARMCPU *cpu = env_archcpu(env);
986     /* Read an LPAE long-descriptor translation table. */
987     ARMFaultType fault_type = ARMFault_Translation;
988     uint32_t level;
989     ARMVAParameters param;
990     uint64_t ttbr;
991     hwaddr descaddr, indexmask, indexmask_grainsize;
992     uint32_t tableattrs;
993     target_ulong page_size;
994     uint32_t attrs;
995     int32_t stride;
996     int addrsize, inputsize, outputsize;
997     uint64_t tcr = regime_tcr(env, mmu_idx);
998     int ap, ns, xn, pxn;
999     uint32_t el = regime_el(env, mmu_idx);
1000     uint64_t descaddrmask;
1001     bool aarch64 = arm_el_is_aa64(env, el);
1002     bool guarded = false;
1003 
1004     /* TODO: This code does not support shareability levels. */
1005     if (aarch64) {
1006         int ps;
1007 
1008         param = aa64_va_parameters(env, address, mmu_idx,
1009                                    access_type != MMU_INST_FETCH);
1010         level = 0;
1011 
1012         /*
1013          * If TxSZ is programmed to a value larger than the maximum,
1014          * or smaller than the effective minimum, it is IMPLEMENTATION
1015          * DEFINED whether we behave as if the field were programmed
1016          * within bounds, or if a level 0 Translation fault is generated.
1017          *
1018          * With FEAT_LVA, fault on less than minimum becomes required,
1019          * so our choice is to always raise the fault.
1020          */
1021         if (param.tsz_oob) {
1022             fault_type = ARMFault_Translation;
1023             goto do_fault;
1024         }
1025 
1026         addrsize = 64 - 8 * param.tbi;
1027         inputsize = 64 - param.tsz;
1028 
1029         /*
1030          * Bound PS by PARANGE to find the effective output address size.
1031          * ID_AA64MMFR0 is a read-only register so values outside of the
1032          * supported mappings can be considered an implementation error.
1033          */
1034         ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1035         ps = MIN(ps, param.ps);
1036         assert(ps < ARRAY_SIZE(pamax_map));
1037         outputsize = pamax_map[ps];
1038     } else {
1039         param = aa32_va_parameters(env, address, mmu_idx);
1040         level = 1;
1041         addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1042         inputsize = addrsize - param.tsz;
1043         outputsize = 40;
1044     }
1045 
1046     /*
1047      * We determined the region when collecting the parameters, but we
1048      * have not yet validated that the address is valid for the region.
1049      * Extract the top bits and verify that they all match select.
1050      *
1051      * For aa32, if inputsize == addrsize, then we have selected the
1052      * region by exclusion in aa32_va_parameters and there is no more
1053      * validation to do here.
1054      */
1055     if (inputsize < addrsize) {
1056         target_ulong top_bits = sextract64(address, inputsize,
1057                                            addrsize - inputsize);
1058         if (-top_bits != param.select) {
1059             /* The gap between the two regions is a Translation fault */
1060             fault_type = ARMFault_Translation;
1061             goto do_fault;
1062         }
1063     }
1064 
1065     stride = arm_granule_bits(param.gran) - 3;
1066 
1067     /*
1068      * Note that QEMU ignores shareability and cacheability attributes,
1069      * so we don't need to do anything with the SH, ORGN, IRGN fields
1070      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
1071      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1072      * implement any ASID-like capability so we can ignore it (instead
1073      * we will always flush the TLB any time the ASID is changed).
1074      */
1075     ttbr = regime_ttbr(env, mmu_idx, param.select);
1076 
1077     /*
1078      * Here we should have set up all the parameters for the translation:
1079      * inputsize, ttbr, epd, stride, tbi
1080      */
1081 
1082     if (param.epd) {
1083         /*
1084          * Translation table walk disabled => Translation fault on TLB miss
1085          * Note: This is always 0 on 64-bit EL2 and EL3.
1086          */
1087         goto do_fault;
1088     }
1089 
1090     if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
1091         /*
1092          * The starting level depends on the virtual address size (which can
1093          * be up to 48 bits) and the translation granule size. It indicates
1094          * the number of strides (stride bits at a time) needed to
1095          * consume the bits of the input address. In the pseudocode this is:
1096          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
1097          * where their 'inputsize' is our 'inputsize', 'grainsize' is
1098          * our 'stride + 3' and 'stride' is our 'stride'.
1099          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1100          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1101          * = 4 - (inputsize - 4) / stride;
1102          */
1103         level = 4 - (inputsize - 4) / stride;
1104     } else {
1105         /*
1106          * For stage 2 translations the starting level is specified by the
1107          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1108          */
1109         uint32_t sl0 = extract32(tcr, 6, 2);
1110         uint32_t sl2 = extract64(tcr, 33, 1);
1111         uint32_t startlevel;
1112         bool ok;
1113 
1114         /* SL2 is RES0 unless DS=1 & 4kb granule. */
1115         if (param.ds && stride == 9 && sl2) {
1116             if (sl0 != 0) {
1117                 level = 0;
1118                 fault_type = ARMFault_Translation;
1119                 goto do_fault;
1120             }
1121             startlevel = -1;
1122         } else if (!aarch64 || stride == 9) {
1123             /* AArch32 or 4KB pages */
1124             startlevel = 2 - sl0;
1125 
1126             if (cpu_isar_feature(aa64_st, cpu)) {
1127                 startlevel &= 3;
1128             }
1129         } else {
1130             /* 16KB or 64KB pages */
1131             startlevel = 3 - sl0;
1132         }
1133 
1134         /* Check that the starting level is valid. */
1135         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1136                                 inputsize, stride, outputsize);
1137         if (!ok) {
1138             fault_type = ARMFault_Translation;
1139             goto do_fault;
1140         }
1141         level = startlevel;
1142     }
1143 
1144     indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1145     indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1146 
1147     /* Now we can extract the actual base address from the TTBR */
1148     descaddr = extract64(ttbr, 0, 48);
1149 
1150     /*
1151      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1152      *
1153      * Otherwise, if the base address is out of range, raise AddressSizeFault.
1154      * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1155      * but we've just cleared the bits above 47, so simplify the test.
1156      */
1157     if (outputsize > 48) {
1158         descaddr |= extract64(ttbr, 2, 4) << 48;
1159     } else if (descaddr >> outputsize) {
1160         level = 0;
1161         fault_type = ARMFault_AddressSize;
1162         goto do_fault;
1163     }
1164 
1165     /*
1166      * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1167      * and also to mask out CnP (bit 0) which could validly be non-zero.
1168      */
1169     descaddr &= ~indexmask;
1170 
1171     /*
1172      * For AArch32, the address field in the descriptor goes up to bit 39
1173      * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
1174      * or an AddressSize fault is raised.  So for v8 we extract those SBZ
1175      * bits as part of the address, which will be checked via outputsize.
1176      * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1177      * the highest bits of a 52-bit output are placed elsewhere.
1178      */
1179     if (param.ds) {
1180         descaddrmask = MAKE_64BIT_MASK(0, 50);
1181     } else if (arm_feature(env, ARM_FEATURE_V8)) {
1182         descaddrmask = MAKE_64BIT_MASK(0, 48);
1183     } else {
1184         descaddrmask = MAKE_64BIT_MASK(0, 40);
1185     }
1186     descaddrmask &= ~indexmask_grainsize;
1187 
1188     /*
1189      * Secure accesses start with the page table in secure memory and
1190      * can be downgraded to non-secure at any step. Non-secure accesses
1191      * remain non-secure. We implement this by just ORing in the NSTable/NS
1192      * bits at each step.
1193      */
1194     tableattrs = is_secure ? 0 : (1 << 4);
1195     for (;;) {
1196         uint64_t descriptor;
1197         bool nstable;
1198 
1199         descaddr |= (address >> (stride * (4 - level))) & indexmask;
1200         descaddr &= ~7ULL;
1201         nstable = extract32(tableattrs, 4, 1);
1202         descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi);
1203         if (fi->type != ARMFault_None) {
1204             goto do_fault;
1205         }
1206 
1207         if (!(descriptor & 1) ||
1208             (!(descriptor & 2) && (level == 3))) {
1209             /* Invalid, or the Reserved level 3 encoding */
1210             goto do_fault;
1211         }
1212 
1213         descaddr = descriptor & descaddrmask;
1214 
1215         /*
1216          * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1217          * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
1218          * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
1219          * raise AddressSizeFault.
1220          */
1221         if (outputsize > 48) {
1222             if (param.ds) {
1223                 descaddr |= extract64(descriptor, 8, 2) << 50;
1224             } else {
1225                 descaddr |= extract64(descriptor, 12, 4) << 48;
1226             }
1227         } else if (descaddr >> outputsize) {
1228             fault_type = ARMFault_AddressSize;
1229             goto do_fault;
1230         }
1231 
1232         if ((descriptor & 2) && (level < 3)) {
1233             /*
1234              * Table entry. The top five bits are attributes which may
1235              * propagate down through lower levels of the table (and
1236              * which are all arranged so that 0 means "no effect", so
1237              * we can gather them up by ORing in the bits at each level).
1238              */
1239             tableattrs |= extract64(descriptor, 59, 5);
1240             level++;
1241             indexmask = indexmask_grainsize;
1242             continue;
1243         }
1244         /*
1245          * Block entry at level 1 or 2, or page entry at level 3.
1246          * These are basically the same thing, although the number
1247          * of bits we pull in from the vaddr varies. Note that although
1248          * descaddrmask masks enough of the low bits of the descriptor
1249          * to give a correct page or table address, the address field
1250          * in a block descriptor is smaller; so we need to explicitly
1251          * clear the lower bits here before ORing in the low vaddr bits.
1252          */
1253         page_size = (1ULL << ((stride * (4 - level)) + 3));
1254         descaddr &= ~(hwaddr)(page_size - 1);
1255         descaddr |= (address & (page_size - 1));
1256         /* Extract attributes from the descriptor */
1257         attrs = extract64(descriptor, 2, 10)
1258             | (extract64(descriptor, 52, 12) << 10);
1259 
1260         if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1261             /* Stage 2 table descriptors do not include any attribute fields */
1262             break;
1263         }
1264         /* Merge in attributes from table descriptors */
1265         attrs |= nstable << 3; /* NS */
1266         guarded = extract64(descriptor, 50, 1);  /* GP */
1267         if (param.hpd) {
1268             /* HPD disables all the table attributes except NSTable.  */
1269             break;
1270         }
1271         attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
1272         /*
1273          * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1274          * means "force PL1 access only", which means forcing AP[1] to 0.
1275          */
1276         attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
1277         attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
1278         break;
1279     }
1280     /*
1281      * Here descaddr is the final physical address, and attributes
1282      * are all in attrs.
1283      */
1284     fault_type = ARMFault_AccessFlag;
1285     if ((attrs & (1 << 8)) == 0) {
1286         /* Access flag */
1287         goto do_fault;
1288     }
1289 
1290     ap = extract32(attrs, 4, 2);
1291 
1292     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1293         ns = mmu_idx == ARMMMUIdx_Stage2;
1294         xn = extract32(attrs, 11, 2);
1295         result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
1296     } else {
1297         ns = extract32(attrs, 3, 1);
1298         xn = extract32(attrs, 12, 1);
1299         pxn = extract32(attrs, 11, 1);
1300         result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
1301     }
1302 
1303     fault_type = ARMFault_Permission;
1304     if (!(result->f.prot & (1 << access_type))) {
1305         goto do_fault;
1306     }
1307 
1308     if (ns) {
1309         /*
1310          * The NS bit will (as required by the architecture) have no effect if
1311          * the CPU doesn't support TZ or this is a non-secure translation
1312          * regime, because the attribute will already be non-secure.
1313          */
1314         result->f.attrs.secure = false;
1315     }
1316     /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
1317     if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
1318         arm_tlb_bti_gp(&result->f.attrs) = true;
1319     }
1320 
1321     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1322         result->cacheattrs.is_s2_format = true;
1323         result->cacheattrs.attrs = extract32(attrs, 0, 4);
1324     } else {
1325         /* Index into MAIR registers for cache attributes */
1326         uint8_t attrindx = extract32(attrs, 0, 3);
1327         uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1328         assert(attrindx <= 7);
1329         result->cacheattrs.is_s2_format = false;
1330         result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
1331     }
1332 
1333     /*
1334      * For FEAT_LPA2 and effective DS, the SH field in the attributes
1335      * was re-purposed for output address bits.  The SH attribute in
1336      * that case comes from TCR_ELx, which we extracted earlier.
1337      */
1338     if (param.ds) {
1339         result->cacheattrs.shareability = param.sh;
1340     } else {
1341         result->cacheattrs.shareability = extract32(attrs, 6, 2);
1342     }
1343 
1344     result->f.phys_addr = descaddr;
1345     result->f.lg_page_size = ctz64(page_size);
1346     return false;
1347 
1348 do_fault:
1349     fi->type = fault_type;
1350     fi->level = level;
1351     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
1352     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
1353                                mmu_idx == ARMMMUIdx_Stage2_S);
1354     fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1355     return true;
1356 }
1357 
1358 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1359                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1360                                  bool is_secure, GetPhysAddrResult *result,
1361                                  ARMMMUFaultInfo *fi)
1362 {
1363     int n;
1364     uint32_t mask;
1365     uint32_t base;
1366     bool is_user = regime_is_user(env, mmu_idx);
1367 
1368     if (regime_translation_disabled(env, mmu_idx, is_secure)) {
1369         /* MPU disabled.  */
1370         result->f.phys_addr = address;
1371         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1372         return false;
1373     }
1374 
1375     result->f.phys_addr = address;
1376     for (n = 7; n >= 0; n--) {
1377         base = env->cp15.c6_region[n];
1378         if ((base & 1) == 0) {
1379             continue;
1380         }
1381         mask = 1 << ((base >> 1) & 0x1f);
1382         /* Keep this shift separate from the above to avoid an
1383            (undefined) << 32.  */
1384         mask = (mask << 1) - 1;
1385         if (((base ^ address) & ~mask) == 0) {
1386             break;
1387         }
1388     }
1389     if (n < 0) {
1390         fi->type = ARMFault_Background;
1391         return true;
1392     }
1393 
1394     if (access_type == MMU_INST_FETCH) {
1395         mask = env->cp15.pmsav5_insn_ap;
1396     } else {
1397         mask = env->cp15.pmsav5_data_ap;
1398     }
1399     mask = (mask >> (n * 4)) & 0xf;
1400     switch (mask) {
1401     case 0:
1402         fi->type = ARMFault_Permission;
1403         fi->level = 1;
1404         return true;
1405     case 1:
1406         if (is_user) {
1407             fi->type = ARMFault_Permission;
1408             fi->level = 1;
1409             return true;
1410         }
1411         result->f.prot = PAGE_READ | PAGE_WRITE;
1412         break;
1413     case 2:
1414         result->f.prot = PAGE_READ;
1415         if (!is_user) {
1416             result->f.prot |= PAGE_WRITE;
1417         }
1418         break;
1419     case 3:
1420         result->f.prot = PAGE_READ | PAGE_WRITE;
1421         break;
1422     case 5:
1423         if (is_user) {
1424             fi->type = ARMFault_Permission;
1425             fi->level = 1;
1426             return true;
1427         }
1428         result->f.prot = PAGE_READ;
1429         break;
1430     case 6:
1431         result->f.prot = PAGE_READ;
1432         break;
1433     default:
1434         /* Bad permission.  */
1435         fi->type = ARMFault_Permission;
1436         fi->level = 1;
1437         return true;
1438     }
1439     result->f.prot |= PAGE_EXEC;
1440     return false;
1441 }
1442 
1443 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
1444                                          int32_t address, uint8_t *prot)
1445 {
1446     if (!arm_feature(env, ARM_FEATURE_M)) {
1447         *prot = PAGE_READ | PAGE_WRITE;
1448         switch (address) {
1449         case 0xF0000000 ... 0xFFFFFFFF:
1450             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1451                 /* hivecs execing is ok */
1452                 *prot |= PAGE_EXEC;
1453             }
1454             break;
1455         case 0x00000000 ... 0x7FFFFFFF:
1456             *prot |= PAGE_EXEC;
1457             break;
1458         }
1459     } else {
1460         /* Default system address map for M profile cores.
1461          * The architecture specifies which regions are execute-never;
1462          * at the MPU level no other checks are defined.
1463          */
1464         switch (address) {
1465         case 0x00000000 ... 0x1fffffff: /* ROM */
1466         case 0x20000000 ... 0x3fffffff: /* SRAM */
1467         case 0x60000000 ... 0x7fffffff: /* RAM */
1468         case 0x80000000 ... 0x9fffffff: /* RAM */
1469             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1470             break;
1471         case 0x40000000 ... 0x5fffffff: /* Peripheral */
1472         case 0xa0000000 ... 0xbfffffff: /* Device */
1473         case 0xc0000000 ... 0xdfffffff: /* Device */
1474         case 0xe0000000 ... 0xffffffff: /* System */
1475             *prot = PAGE_READ | PAGE_WRITE;
1476             break;
1477         default:
1478             g_assert_not_reached();
1479         }
1480     }
1481 }
1482 
1483 static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1484 {
1485     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1486     return arm_feature(env, ARM_FEATURE_M) &&
1487         extract32(address, 20, 12) == 0xe00;
1488 }
1489 
1490 static bool m_is_system_region(CPUARMState *env, uint32_t address)
1491 {
1492     /*
1493      * True if address is in the M profile system region
1494      * 0xe0000000 - 0xffffffff
1495      */
1496     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1497 }
1498 
1499 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1500                                          bool is_secure, bool is_user)
1501 {
1502     /*
1503      * Return true if we should use the default memory map as a
1504      * "background" region if there are no hits against any MPU regions.
1505      */
1506     CPUARMState *env = &cpu->env;
1507 
1508     if (is_user) {
1509         return false;
1510     }
1511 
1512     if (arm_feature(env, ARM_FEATURE_M)) {
1513         return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1514     } else {
1515         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1516     }
1517 }
1518 
1519 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1520                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1521                                  bool secure, GetPhysAddrResult *result,
1522                                  ARMMMUFaultInfo *fi)
1523 {
1524     ARMCPU *cpu = env_archcpu(env);
1525     int n;
1526     bool is_user = regime_is_user(env, mmu_idx);
1527 
1528     result->f.phys_addr = address;
1529     result->f.lg_page_size = TARGET_PAGE_BITS;
1530     result->f.prot = 0;
1531 
1532     if (regime_translation_disabled(env, mmu_idx, secure) ||
1533         m_is_ppb_region(env, address)) {
1534         /*
1535          * MPU disabled or M profile PPB access: use default memory map.
1536          * The other case which uses the default memory map in the
1537          * v7M ARM ARM pseudocode is exception vector reads from the vector
1538          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1539          * which always does a direct read using address_space_ldl(), rather
1540          * than going via this function, so we don't need to check that here.
1541          */
1542         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
1543     } else { /* MPU enabled */
1544         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1545             /* region search */
1546             uint32_t base = env->pmsav7.drbar[n];
1547             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1548             uint32_t rmask;
1549             bool srdis = false;
1550 
1551             if (!(env->pmsav7.drsr[n] & 0x1)) {
1552                 continue;
1553             }
1554 
1555             if (!rsize) {
1556                 qemu_log_mask(LOG_GUEST_ERROR,
1557                               "DRSR[%d]: Rsize field cannot be 0\n", n);
1558                 continue;
1559             }
1560             rsize++;
1561             rmask = (1ull << rsize) - 1;
1562 
1563             if (base & rmask) {
1564                 qemu_log_mask(LOG_GUEST_ERROR,
1565                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1566                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
1567                               n, base, rmask);
1568                 continue;
1569             }
1570 
1571             if (address < base || address > base + rmask) {
1572                 /*
1573                  * Address not in this region. We must check whether the
1574                  * region covers addresses in the same page as our address.
1575                  * In that case we must not report a size that covers the
1576                  * whole page for a subsequent hit against a different MPU
1577                  * region or the background region, because it would result in
1578                  * incorrect TLB hits for subsequent accesses to addresses that
1579                  * are in this MPU region.
1580                  */
1581                 if (ranges_overlap(base, rmask,
1582                                    address & TARGET_PAGE_MASK,
1583                                    TARGET_PAGE_SIZE)) {
1584                     result->f.lg_page_size = 0;
1585                 }
1586                 continue;
1587             }
1588 
1589             /* Region matched */
1590 
1591             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
1592                 int i, snd;
1593                 uint32_t srdis_mask;
1594 
1595                 rsize -= 3; /* sub region size (power of 2) */
1596                 snd = ((address - base) >> rsize) & 0x7;
1597                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1598 
1599                 srdis_mask = srdis ? 0x3 : 0x0;
1600                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1601                     /*
1602                      * This will check in groups of 2, 4 and then 8, whether
1603                      * the subregion bits are consistent. rsize is incremented
1604                      * back up to give the region size, considering consistent
1605                      * adjacent subregions as one region. Stop testing if rsize
1606                      * is already big enough for an entire QEMU page.
1607                      */
1608                     int snd_rounded = snd & ~(i - 1);
1609                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1610                                                      snd_rounded + 8, i);
1611                     if (srdis_mask ^ srdis_multi) {
1612                         break;
1613                     }
1614                     srdis_mask = (srdis_mask << i) | srdis_mask;
1615                     rsize++;
1616                 }
1617             }
1618             if (srdis) {
1619                 continue;
1620             }
1621             if (rsize < TARGET_PAGE_BITS) {
1622                 result->f.lg_page_size = rsize;
1623             }
1624             break;
1625         }
1626 
1627         if (n == -1) { /* no hits */
1628             if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1629                 /* background fault */
1630                 fi->type = ARMFault_Background;
1631                 return true;
1632             }
1633             get_phys_addr_pmsav7_default(env, mmu_idx, address,
1634                                          &result->f.prot);
1635         } else { /* a MPU hit! */
1636             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1637             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1638 
1639             if (m_is_system_region(env, address)) {
1640                 /* System space is always execute never */
1641                 xn = 1;
1642             }
1643 
1644             if (is_user) { /* User mode AP bit decoding */
1645                 switch (ap) {
1646                 case 0:
1647                 case 1:
1648                 case 5:
1649                     break; /* no access */
1650                 case 3:
1651                     result->f.prot |= PAGE_WRITE;
1652                     /* fall through */
1653                 case 2:
1654                 case 6:
1655                     result->f.prot |= PAGE_READ | PAGE_EXEC;
1656                     break;
1657                 case 7:
1658                     /* for v7M, same as 6; for R profile a reserved value */
1659                     if (arm_feature(env, ARM_FEATURE_M)) {
1660                         result->f.prot |= PAGE_READ | PAGE_EXEC;
1661                         break;
1662                     }
1663                     /* fall through */
1664                 default:
1665                     qemu_log_mask(LOG_GUEST_ERROR,
1666                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1667                                   PRIx32 "\n", n, ap);
1668                 }
1669             } else { /* Priv. mode AP bits decoding */
1670                 switch (ap) {
1671                 case 0:
1672                     break; /* no access */
1673                 case 1:
1674                 case 2:
1675                 case 3:
1676                     result->f.prot |= PAGE_WRITE;
1677                     /* fall through */
1678                 case 5:
1679                 case 6:
1680                     result->f.prot |= PAGE_READ | PAGE_EXEC;
1681                     break;
1682                 case 7:
1683                     /* for v7M, same as 6; for R profile a reserved value */
1684                     if (arm_feature(env, ARM_FEATURE_M)) {
1685                         result->f.prot |= PAGE_READ | PAGE_EXEC;
1686                         break;
1687                     }
1688                     /* fall through */
1689                 default:
1690                     qemu_log_mask(LOG_GUEST_ERROR,
1691                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1692                                   PRIx32 "\n", n, ap);
1693                 }
1694             }
1695 
1696             /* execute never */
1697             if (xn) {
1698                 result->f.prot &= ~PAGE_EXEC;
1699             }
1700         }
1701     }
1702 
1703     fi->type = ARMFault_Permission;
1704     fi->level = 1;
1705     return !(result->f.prot & (1 << access_type));
1706 }
1707 
1708 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1709                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1710                        bool secure, GetPhysAddrResult *result,
1711                        ARMMMUFaultInfo *fi, uint32_t *mregion)
1712 {
1713     /*
1714      * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1715      * that a full phys-to-virt translation does).
1716      * mregion is (if not NULL) set to the region number which matched,
1717      * or -1 if no region number is returned (MPU off, address did not
1718      * hit a region, address hit in multiple regions).
1719      * If the region hit doesn't cover the entire TARGET_PAGE the address
1720      * is within, then we set the result page_size to 1 to force the
1721      * memory system to use a subpage.
1722      */
1723     ARMCPU *cpu = env_archcpu(env);
1724     bool is_user = regime_is_user(env, mmu_idx);
1725     int n;
1726     int matchregion = -1;
1727     bool hit = false;
1728     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1729     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1730 
1731     result->f.lg_page_size = TARGET_PAGE_BITS;
1732     result->f.phys_addr = address;
1733     result->f.prot = 0;
1734     if (mregion) {
1735         *mregion = -1;
1736     }
1737 
1738     /*
1739      * Unlike the ARM ARM pseudocode, we don't need to check whether this
1740      * was an exception vector read from the vector table (which is always
1741      * done using the default system address map), because those accesses
1742      * are done in arm_v7m_load_vector(), which always does a direct
1743      * read using address_space_ldl(), rather than going via this function.
1744      */
1745     if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
1746         hit = true;
1747     } else if (m_is_ppb_region(env, address)) {
1748         hit = true;
1749     } else {
1750         if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1751             hit = true;
1752         }
1753 
1754         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1755             /* region search */
1756             /*
1757              * Note that the base address is bits [31:5] from the register
1758              * with bits [4:0] all zeroes, but the limit address is bits
1759              * [31:5] from the register with bits [4:0] all ones.
1760              */
1761             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
1762             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
1763 
1764             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
1765                 /* Region disabled */
1766                 continue;
1767             }
1768 
1769             if (address < base || address > limit) {
1770                 /*
1771                  * Address not in this region. We must check whether the
1772                  * region covers addresses in the same page as our address.
1773                  * In that case we must not report a size that covers the
1774                  * whole page for a subsequent hit against a different MPU
1775                  * region or the background region, because it would result in
1776                  * incorrect TLB hits for subsequent accesses to addresses that
1777                  * are in this MPU region.
1778                  */
1779                 if (limit >= base &&
1780                     ranges_overlap(base, limit - base + 1,
1781                                    addr_page_base,
1782                                    TARGET_PAGE_SIZE)) {
1783                     result->f.lg_page_size = 0;
1784                 }
1785                 continue;
1786             }
1787 
1788             if (base > addr_page_base || limit < addr_page_limit) {
1789                 result->f.lg_page_size = 0;
1790             }
1791 
1792             if (matchregion != -1) {
1793                 /*
1794                  * Multiple regions match -- always a failure (unlike
1795                  * PMSAv7 where highest-numbered-region wins)
1796                  */
1797                 fi->type = ARMFault_Permission;
1798                 fi->level = 1;
1799                 return true;
1800             }
1801 
1802             matchregion = n;
1803             hit = true;
1804         }
1805     }
1806 
1807     if (!hit) {
1808         /* background fault */
1809         fi->type = ARMFault_Background;
1810         return true;
1811     }
1812 
1813     if (matchregion == -1) {
1814         /* hit using the background region */
1815         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
1816     } else {
1817         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
1818         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
1819         bool pxn = false;
1820 
1821         if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1822             pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
1823         }
1824 
1825         if (m_is_system_region(env, address)) {
1826             /* System space is always execute never */
1827             xn = 1;
1828         }
1829 
1830         result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
1831         if (result->f.prot && !xn && !(pxn && !is_user)) {
1832             result->f.prot |= PAGE_EXEC;
1833         }
1834         /*
1835          * We don't need to look the attribute up in the MAIR0/MAIR1
1836          * registers because that only tells us about cacheability.
1837          */
1838         if (mregion) {
1839             *mregion = matchregion;
1840         }
1841     }
1842 
1843     fi->type = ARMFault_Permission;
1844     fi->level = 1;
1845     return !(result->f.prot & (1 << access_type));
1846 }
1847 
1848 static bool v8m_is_sau_exempt(CPUARMState *env,
1849                               uint32_t address, MMUAccessType access_type)
1850 {
1851     /*
1852      * The architecture specifies that certain address ranges are
1853      * exempt from v8M SAU/IDAU checks.
1854      */
1855     return
1856         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
1857         (address >= 0xe0000000 && address <= 0xe0002fff) ||
1858         (address >= 0xe000e000 && address <= 0xe000efff) ||
1859         (address >= 0xe002e000 && address <= 0xe002efff) ||
1860         (address >= 0xe0040000 && address <= 0xe0041fff) ||
1861         (address >= 0xe00ff000 && address <= 0xe00fffff);
1862 }
1863 
1864 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1865                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1866                          bool is_secure, V8M_SAttributes *sattrs)
1867 {
1868     /*
1869      * Look up the security attributes for this address. Compare the
1870      * pseudocode SecurityCheck() function.
1871      * We assume the caller has zero-initialized *sattrs.
1872      */
1873     ARMCPU *cpu = env_archcpu(env);
1874     int r;
1875     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
1876     int idau_region = IREGION_NOTVALID;
1877     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1878     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1879 
1880     if (cpu->idau) {
1881         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
1882         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
1883 
1884         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
1885                    &idau_nsc);
1886     }
1887 
1888     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
1889         /* 0xf0000000..0xffffffff is always S for insn fetches */
1890         return;
1891     }
1892 
1893     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
1894         sattrs->ns = !is_secure;
1895         return;
1896     }
1897 
1898     if (idau_region != IREGION_NOTVALID) {
1899         sattrs->irvalid = true;
1900         sattrs->iregion = idau_region;
1901     }
1902 
1903     switch (env->sau.ctrl & 3) {
1904     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
1905         break;
1906     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
1907         sattrs->ns = true;
1908         break;
1909     default: /* SAU.ENABLE == 1 */
1910         for (r = 0; r < cpu->sau_sregion; r++) {
1911             if (env->sau.rlar[r] & 1) {
1912                 uint32_t base = env->sau.rbar[r] & ~0x1f;
1913                 uint32_t limit = env->sau.rlar[r] | 0x1f;
1914 
1915                 if (base <= address && limit >= address) {
1916                     if (base > addr_page_base || limit < addr_page_limit) {
1917                         sattrs->subpage = true;
1918                     }
1919                     if (sattrs->srvalid) {
1920                         /*
1921                          * If we hit in more than one region then we must report
1922                          * as Secure, not NS-Callable, with no valid region
1923                          * number info.
1924                          */
1925                         sattrs->ns = false;
1926                         sattrs->nsc = false;
1927                         sattrs->sregion = 0;
1928                         sattrs->srvalid = false;
1929                         break;
1930                     } else {
1931                         if (env->sau.rlar[r] & 2) {
1932                             sattrs->nsc = true;
1933                         } else {
1934                             sattrs->ns = true;
1935                         }
1936                         sattrs->srvalid = true;
1937                         sattrs->sregion = r;
1938                     }
1939                 } else {
1940                     /*
1941                      * Address not in this region. We must check whether the
1942                      * region covers addresses in the same page as our address.
1943                      * In that case we must not report a size that covers the
1944                      * whole page for a subsequent hit against a different MPU
1945                      * region or the background region, because it would result
1946                      * in incorrect TLB hits for subsequent accesses to
1947                      * addresses that are in this MPU region.
1948                      */
1949                     if (limit >= base &&
1950                         ranges_overlap(base, limit - base + 1,
1951                                        addr_page_base,
1952                                        TARGET_PAGE_SIZE)) {
1953                         sattrs->subpage = true;
1954                     }
1955                 }
1956             }
1957         }
1958         break;
1959     }
1960 
1961     /*
1962      * The IDAU will override the SAU lookup results if it specifies
1963      * higher security than the SAU does.
1964      */
1965     if (!idau_ns) {
1966         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
1967             sattrs->ns = false;
1968             sattrs->nsc = idau_nsc;
1969         }
1970     }
1971 }
1972 
1973 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
1974                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1975                                  bool secure, GetPhysAddrResult *result,
1976                                  ARMMMUFaultInfo *fi)
1977 {
1978     V8M_SAttributes sattrs = {};
1979     bool ret;
1980 
1981     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1982         v8m_security_lookup(env, address, access_type, mmu_idx,
1983                             secure, &sattrs);
1984         if (access_type == MMU_INST_FETCH) {
1985             /*
1986              * Instruction fetches always use the MMU bank and the
1987              * transaction attribute determined by the fetch address,
1988              * regardless of CPU state. This is painful for QEMU
1989              * to handle, because it would mean we need to encode
1990              * into the mmu_idx not just the (user, negpri) information
1991              * for the current security state but also that for the
1992              * other security state, which would balloon the number
1993              * of mmu_idx values needed alarmingly.
1994              * Fortunately we can avoid this because it's not actually
1995              * possible to arbitrarily execute code from memory with
1996              * the wrong security attribute: it will always generate
1997              * an exception of some kind or another, apart from the
1998              * special case of an NS CPU executing an SG instruction
1999              * in S&NSC memory. So we always just fail the translation
2000              * here and sort things out in the exception handler
2001              * (including possibly emulating an SG instruction).
2002              */
2003             if (sattrs.ns != !secure) {
2004                 if (sattrs.nsc) {
2005                     fi->type = ARMFault_QEMU_NSCExec;
2006                 } else {
2007                     fi->type = ARMFault_QEMU_SFault;
2008                 }
2009                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2010                 result->f.phys_addr = address;
2011                 result->f.prot = 0;
2012                 return true;
2013             }
2014         } else {
2015             /*
2016              * For data accesses we always use the MMU bank indicated
2017              * by the current CPU state, but the security attributes
2018              * might downgrade a secure access to nonsecure.
2019              */
2020             if (sattrs.ns) {
2021                 result->f.attrs.secure = false;
2022             } else if (!secure) {
2023                 /*
2024                  * NS access to S memory must fault.
2025                  * Architecturally we should first check whether the
2026                  * MPU information for this address indicates that we
2027                  * are doing an unaligned access to Device memory, which
2028                  * should generate a UsageFault instead. QEMU does not
2029                  * currently check for that kind of unaligned access though.
2030                  * If we added it we would need to do so as a special case
2031                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2032                  */
2033                 fi->type = ARMFault_QEMU_SFault;
2034                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
2035                 result->f.phys_addr = address;
2036                 result->f.prot = 0;
2037                 return true;
2038             }
2039         }
2040     }
2041 
2042     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2043                             result, fi, NULL);
2044     if (sattrs.subpage) {
2045         result->f.lg_page_size = 0;
2046     }
2047     return ret;
2048 }
2049 
2050 /*
2051  * Translate from the 4-bit stage 2 representation of
2052  * memory attributes (without cache-allocation hints) to
2053  * the 8-bit representation of the stage 1 MAIR registers
2054  * (which includes allocation hints).
2055  *
2056  * ref: shared/translation/attrs/S2AttrDecode()
2057  *      .../S2ConvertAttrsHints()
2058  */
2059 static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
2060 {
2061     uint8_t hiattr = extract32(s2attrs, 2, 2);
2062     uint8_t loattr = extract32(s2attrs, 0, 2);
2063     uint8_t hihint = 0, lohint = 0;
2064 
2065     if (hiattr != 0) { /* normal memory */
2066         if (hcr & HCR_CD) { /* cache disabled */
2067             hiattr = loattr = 1; /* non-cacheable */
2068         } else {
2069             if (hiattr != 1) { /* Write-through or write-back */
2070                 hihint = 3; /* RW allocate */
2071             }
2072             if (loattr != 1) { /* Write-through or write-back */
2073                 lohint = 3; /* RW allocate */
2074             }
2075         }
2076     }
2077 
2078     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2079 }
2080 
2081 /*
2082  * Combine either inner or outer cacheability attributes for normal
2083  * memory, according to table D4-42 and pseudocode procedure
2084  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2085  *
2086  * NB: only stage 1 includes allocation hints (RW bits), leading to
2087  * some asymmetry.
2088  */
2089 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2090 {
2091     if (s1 == 4 || s2 == 4) {
2092         /* non-cacheable has precedence */
2093         return 4;
2094     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2095         /* stage 1 write-through takes precedence */
2096         return s1;
2097     } else if (extract32(s2, 2, 2) == 2) {
2098         /* stage 2 write-through takes precedence, but the allocation hint
2099          * is still taken from stage 1
2100          */
2101         return (2 << 2) | extract32(s1, 0, 2);
2102     } else { /* write-back */
2103         return s1;
2104     }
2105 }
2106 
2107 /*
2108  * Combine the memory type and cacheability attributes of
2109  * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2110  * combined attributes in MAIR_EL1 format.
2111  */
2112 static uint8_t combined_attrs_nofwb(uint64_t hcr,
2113                                     ARMCacheAttrs s1, ARMCacheAttrs s2)
2114 {
2115     uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2116 
2117     s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
2118 
2119     s1lo = extract32(s1.attrs, 0, 4);
2120     s2lo = extract32(s2_mair_attrs, 0, 4);
2121     s1hi = extract32(s1.attrs, 4, 4);
2122     s2hi = extract32(s2_mair_attrs, 4, 4);
2123 
2124     /* Combine memory type and cacheability attributes */
2125     if (s1hi == 0 || s2hi == 0) {
2126         /* Device has precedence over normal */
2127         if (s1lo == 0 || s2lo == 0) {
2128             /* nGnRnE has precedence over anything */
2129             ret_attrs = 0;
2130         } else if (s1lo == 4 || s2lo == 4) {
2131             /* non-Reordering has precedence over Reordering */
2132             ret_attrs = 4;  /* nGnRE */
2133         } else if (s1lo == 8 || s2lo == 8) {
2134             /* non-Gathering has precedence over Gathering */
2135             ret_attrs = 8;  /* nGRE */
2136         } else {
2137             ret_attrs = 0xc; /* GRE */
2138         }
2139     } else { /* Normal memory */
2140         /* Outer/inner cacheability combine independently */
2141         ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2142                   | combine_cacheattr_nibble(s1lo, s2lo);
2143     }
2144     return ret_attrs;
2145 }
2146 
2147 static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2148 {
2149     /*
2150      * Given the 4 bits specifying the outer or inner cacheability
2151      * in MAIR format, return a value specifying Normal Write-Back,
2152      * with the allocation and transient hints taken from the input
2153      * if the input specified some kind of cacheable attribute.
2154      */
2155     if (attr == 0 || attr == 4) {
2156         /*
2157          * 0 == an UNPREDICTABLE encoding
2158          * 4 == Non-cacheable
2159          * Either way, force Write-Back RW allocate non-transient
2160          */
2161         return 0xf;
2162     }
2163     /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2164     return attr | 4;
2165 }
2166 
2167 /*
2168  * Combine the memory type and cacheability attributes of
2169  * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2170  * combined attributes in MAIR_EL1 format.
2171  */
2172 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
2173 {
2174     switch (s2.attrs) {
2175     case 7:
2176         /* Use stage 1 attributes */
2177         return s1.attrs;
2178     case 6:
2179         /*
2180          * Force Normal Write-Back. Note that if S1 is Normal cacheable
2181          * then we take the allocation hints from it; otherwise it is
2182          * RW allocate, non-transient.
2183          */
2184         if ((s1.attrs & 0xf0) == 0) {
2185             /* S1 is Device */
2186             return 0xff;
2187         }
2188         /* Need to check the Inner and Outer nibbles separately */
2189         return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2190             force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2191     case 5:
2192         /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2193         if ((s1.attrs & 0xf0) == 0) {
2194             return s1.attrs;
2195         }
2196         return 0x44;
2197     case 0 ... 3:
2198         /* Force Device, of subtype specified by S2 */
2199         return s2.attrs << 2;
2200     default:
2201         /*
2202          * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2203          * arbitrarily force Device.
2204          */
2205         return 0;
2206     }
2207 }
2208 
2209 /*
2210  * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2211  * and CombineS1S2Desc()
2212  *
2213  * @env:     CPUARMState
2214  * @s1:      Attributes from stage 1 walk
2215  * @s2:      Attributes from stage 2 walk
2216  */
2217 static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
2218                                         ARMCacheAttrs s1, ARMCacheAttrs s2)
2219 {
2220     ARMCacheAttrs ret;
2221     bool tagged = false;
2222 
2223     assert(s2.is_s2_format && !s1.is_s2_format);
2224     ret.is_s2_format = false;
2225 
2226     if (s1.attrs == 0xf0) {
2227         tagged = true;
2228         s1.attrs = 0xff;
2229     }
2230 
2231     /* Combine shareability attributes (table D4-43) */
2232     if (s1.shareability == 2 || s2.shareability == 2) {
2233         /* if either are outer-shareable, the result is outer-shareable */
2234         ret.shareability = 2;
2235     } else if (s1.shareability == 3 || s2.shareability == 3) {
2236         /* if either are inner-shareable, the result is inner-shareable */
2237         ret.shareability = 3;
2238     } else {
2239         /* both non-shareable */
2240         ret.shareability = 0;
2241     }
2242 
2243     /* Combine memory type and cacheability attributes */
2244     if (hcr & HCR_FWB) {
2245         ret.attrs = combined_attrs_fwb(s1, s2);
2246     } else {
2247         ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
2248     }
2249 
2250     /*
2251      * Any location for which the resultant memory type is any
2252      * type of Device memory is always treated as Outer Shareable.
2253      * Any location for which the resultant memory type is Normal
2254      * Inner Non-cacheable, Outer Non-cacheable is always treated
2255      * as Outer Shareable.
2256      * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2257      */
2258     if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2259         ret.shareability = 2;
2260     }
2261 
2262     /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2263     if (tagged && ret.attrs == 0xff) {
2264         ret.attrs = 0xf0;
2265     }
2266 
2267     return ret;
2268 }
2269 
2270 /*
2271  * MMU disabled.  S1 addresses within aa64 translation regimes are
2272  * still checked for bounds -- see AArch64.S1DisabledOutput().
2273  */
2274 static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
2275                                    MMUAccessType access_type,
2276                                    ARMMMUIdx mmu_idx, bool is_secure,
2277                                    GetPhysAddrResult *result,
2278                                    ARMMMUFaultInfo *fi)
2279 {
2280     uint8_t memattr = 0x00;    /* Device nGnRnE */
2281     uint8_t shareability = 0;  /* non-sharable */
2282 
2283     if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
2284         int r_el = regime_el(env, mmu_idx);
2285 
2286         if (arm_el_is_aa64(env, r_el)) {
2287             int pamax = arm_pamax(env_archcpu(env));
2288             uint64_t tcr = env->cp15.tcr_el[r_el];
2289             int addrtop, tbi;
2290 
2291             tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2292             if (access_type == MMU_INST_FETCH) {
2293                 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2294             }
2295             tbi = (tbi >> extract64(address, 55, 1)) & 1;
2296             addrtop = (tbi ? 55 : 63);
2297 
2298             if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2299                 fi->type = ARMFault_AddressSize;
2300                 fi->level = 0;
2301                 fi->stage2 = false;
2302                 return 1;
2303             }
2304 
2305             /*
2306              * When TBI is disabled, we've just validated that all of the
2307              * bits above PAMax are zero, so logically we only need to
2308              * clear the top byte for TBI.  But it's clearer to follow
2309              * the pseudocode set of addrdesc.paddress.
2310              */
2311             address = extract64(address, 0, 52);
2312         }
2313 
2314         /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2315         if (r_el == 1) {
2316             uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2317             if (hcr & HCR_DC) {
2318                 if (hcr & HCR_DCT) {
2319                     memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
2320                 } else {
2321                     memattr = 0xff;  /* Normal, WB, RWA */
2322                 }
2323             }
2324         }
2325         if (memattr == 0 && access_type == MMU_INST_FETCH) {
2326             if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2327                 memattr = 0xee;  /* Normal, WT, RA, NT */
2328             } else {
2329                 memattr = 0x44;  /* Normal, NC, No */
2330             }
2331             shareability = 2; /* outer sharable */
2332         }
2333         result->cacheattrs.is_s2_format = false;
2334     }
2335 
2336     result->f.phys_addr = address;
2337     result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2338     result->f.lg_page_size = TARGET_PAGE_BITS;
2339     result->cacheattrs.shareability = shareability;
2340     result->cacheattrs.attrs = memattr;
2341     return 0;
2342 }
2343 
2344 bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
2345                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
2346                                bool is_secure, GetPhysAddrResult *result,
2347                                ARMMMUFaultInfo *fi)
2348 {
2349     ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
2350 
2351     if (mmu_idx != s1_mmu_idx) {
2352         /*
2353          * Call ourselves recursively to do the stage 1 and then stage 2
2354          * translations if mmu_idx is a two-stage regime.
2355          */
2356         if (arm_feature(env, ARM_FEATURE_EL2)) {
2357             hwaddr ipa;
2358             int s1_prot;
2359             int ret;
2360             bool ipa_secure, s2walk_secure;
2361             ARMCacheAttrs cacheattrs1;
2362             ARMMMUIdx s2_mmu_idx;
2363             bool is_el0;
2364             uint64_t hcr;
2365 
2366             ret = get_phys_addr_with_secure(env, address, access_type,
2367                                             s1_mmu_idx, is_secure, result, fi);
2368 
2369             /* If S1 fails or S2 is disabled, return early.  */
2370             if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2,
2371                                                    is_secure)) {
2372                 return ret;
2373             }
2374 
2375             ipa = result->f.phys_addr;
2376             ipa_secure = result->f.attrs.secure;
2377             if (is_secure) {
2378                 /* Select TCR based on the NS bit from the S1 walk. */
2379                 s2walk_secure = !(ipa_secure
2380                                   ? env->cp15.vstcr_el2 & VSTCR_SW
2381                                   : env->cp15.vtcr_el2 & VTCR_NSW);
2382             } else {
2383                 assert(!ipa_secure);
2384                 s2walk_secure = false;
2385             }
2386 
2387             s2_mmu_idx = (s2walk_secure
2388                           ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
2389             is_el0 = mmu_idx == ARMMMUIdx_E10_0;
2390 
2391             /*
2392              * S1 is done, now do S2 translation.
2393              * Save the stage1 results so that we may merge
2394              * prot and cacheattrs later.
2395              */
2396             s1_prot = result->f.prot;
2397             cacheattrs1 = result->cacheattrs;
2398             memset(result, 0, sizeof(*result));
2399 
2400             ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
2401                                      s2walk_secure, is_el0, result, fi);
2402             fi->s2addr = ipa;
2403 
2404             /* Combine the S1 and S2 perms.  */
2405             result->f.prot &= s1_prot;
2406 
2407             /* If S2 fails, return early.  */
2408             if (ret) {
2409                 return ret;
2410             }
2411 
2412             /* Combine the S1 and S2 cache attributes. */
2413             hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2414             if (hcr & HCR_DC) {
2415                 /*
2416                  * HCR.DC forces the first stage attributes to
2417                  *  Normal Non-Shareable,
2418                  *  Inner Write-Back Read-Allocate Write-Allocate,
2419                  *  Outer Write-Back Read-Allocate Write-Allocate.
2420                  * Do not overwrite Tagged within attrs.
2421                  */
2422                 if (cacheattrs1.attrs != 0xf0) {
2423                     cacheattrs1.attrs = 0xff;
2424                 }
2425                 cacheattrs1.shareability = 0;
2426             }
2427             result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
2428                                                     result->cacheattrs);
2429 
2430             /*
2431              * Check if IPA translates to secure or non-secure PA space.
2432              * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
2433              */
2434             result->f.attrs.secure =
2435                 (is_secure
2436                  && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
2437                  && (ipa_secure
2438                      || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
2439 
2440             return 0;
2441         } else {
2442             /*
2443              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
2444              */
2445             mmu_idx = stage_1_mmu_idx(mmu_idx);
2446         }
2447     }
2448 
2449     /*
2450      * The page table entries may downgrade secure to non-secure, but
2451      * cannot upgrade an non-secure translation regime's attributes
2452      * to secure.
2453      */
2454     result->f.attrs.secure = is_secure;
2455     result->f.attrs.user = regime_is_user(env, mmu_idx);
2456 
2457     /*
2458      * Fast Context Switch Extension. This doesn't exist at all in v8.
2459      * In v7 and earlier it affects all stage 1 translations.
2460      */
2461     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2462         && !arm_feature(env, ARM_FEATURE_V8)) {
2463         if (regime_el(env, mmu_idx) == 3) {
2464             address += env->cp15.fcseidr_s;
2465         } else {
2466             address += env->cp15.fcseidr_ns;
2467         }
2468     }
2469 
2470     if (arm_feature(env, ARM_FEATURE_PMSA)) {
2471         bool ret;
2472         result->f.lg_page_size = TARGET_PAGE_BITS;
2473 
2474         if (arm_feature(env, ARM_FEATURE_V8)) {
2475             /* PMSAv8 */
2476             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2477                                        is_secure, result, fi);
2478         } else if (arm_feature(env, ARM_FEATURE_V7)) {
2479             /* PMSAv7 */
2480             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2481                                        is_secure, result, fi);
2482         } else {
2483             /* Pre-v7 MPU */
2484             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2485                                        is_secure, result, fi);
2486         }
2487         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2488                       " mmu_idx %u -> %s (prot %c%c%c)\n",
2489                       access_type == MMU_DATA_LOAD ? "reading" :
2490                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2491                       (uint32_t)address, mmu_idx,
2492                       ret ? "Miss" : "Hit",
2493                       result->f.prot & PAGE_READ ? 'r' : '-',
2494                       result->f.prot & PAGE_WRITE ? 'w' : '-',
2495                       result->f.prot & PAGE_EXEC ? 'x' : '-');
2496 
2497         return ret;
2498     }
2499 
2500     /* Definitely a real MMU, not an MPU */
2501 
2502     if (regime_translation_disabled(env, mmu_idx, is_secure)) {
2503         return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2504                                       is_secure, result, fi);
2505     }
2506     if (regime_using_lpae_format(env, mmu_idx)) {
2507         return get_phys_addr_lpae(env, address, access_type, mmu_idx,
2508                                   is_secure, false, result, fi);
2509     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
2510         return get_phys_addr_v6(env, address, access_type, mmu_idx,
2511                                 is_secure, result, fi);
2512     } else {
2513         return get_phys_addr_v5(env, address, access_type, mmu_idx,
2514                                 is_secure, result, fi);
2515     }
2516 }
2517 
2518 bool get_phys_addr(CPUARMState *env, target_ulong address,
2519                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
2520                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
2521 {
2522     bool is_secure;
2523 
2524     switch (mmu_idx) {
2525     case ARMMMUIdx_E10_0:
2526     case ARMMMUIdx_E10_1:
2527     case ARMMMUIdx_E10_1_PAN:
2528     case ARMMMUIdx_E20_0:
2529     case ARMMMUIdx_E20_2:
2530     case ARMMMUIdx_E20_2_PAN:
2531     case ARMMMUIdx_Stage1_E0:
2532     case ARMMMUIdx_Stage1_E1:
2533     case ARMMMUIdx_Stage1_E1_PAN:
2534     case ARMMMUIdx_E2:
2535         is_secure = arm_is_secure_below_el3(env);
2536         break;
2537     case ARMMMUIdx_Stage2:
2538     case ARMMMUIdx_MPrivNegPri:
2539     case ARMMMUIdx_MUserNegPri:
2540     case ARMMMUIdx_MPriv:
2541     case ARMMMUIdx_MUser:
2542         is_secure = false;
2543         break;
2544     case ARMMMUIdx_E3:
2545     case ARMMMUIdx_Stage2_S:
2546     case ARMMMUIdx_MSPrivNegPri:
2547     case ARMMMUIdx_MSUserNegPri:
2548     case ARMMMUIdx_MSPriv:
2549     case ARMMMUIdx_MSUser:
2550         is_secure = true;
2551         break;
2552     default:
2553         g_assert_not_reached();
2554     }
2555     return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
2556                                      is_secure, result, fi);
2557 }
2558 
2559 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2560                                          MemTxAttrs *attrs)
2561 {
2562     ARMCPU *cpu = ARM_CPU(cs);
2563     CPUARMState *env = &cpu->env;
2564     GetPhysAddrResult res = {};
2565     ARMMMUFaultInfo fi = {};
2566     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
2567     bool ret;
2568 
2569     ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi);
2570     *attrs = res.f.attrs;
2571 
2572     if (ret) {
2573         return -1;
2574     }
2575     return res.f.phys_addr;
2576 }
2577