xref: /openbmc/qemu/target/arm/ptw.c (revision 8fae3910)
1 /*
2  * ARM page table walking.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/range.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "idau.h"
15 
16 
17 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
18                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
19                                bool s1_is_el0, GetPhysAddrResult *result,
20                                ARMMMUFaultInfo *fi)
21     __attribute__((nonnull));
22 
23 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
24 static const uint8_t pamax_map[] = {
25     [0] = 32,
26     [1] = 36,
27     [2] = 40,
28     [3] = 42,
29     [4] = 44,
30     [5] = 48,
31     [6] = 52,
32 };
33 
34 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
35 unsigned int arm_pamax(ARMCPU *cpu)
36 {
37     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
38         unsigned int parange =
39             FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
40 
41         /*
42          * id_aa64mmfr0 is a read-only register so values outside of the
43          * supported mappings can be considered an implementation error.
44          */
45         assert(parange < ARRAY_SIZE(pamax_map));
46         return pamax_map[parange];
47     }
48 
49     /*
50      * In machvirt_init, we call arm_pamax on a cpu that is not fully
51      * initialized, so we can't rely on the propagation done in realize.
52      */
53     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
54         arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
55         /* v7 with LPAE */
56         return 40;
57     }
58     /* Anything else */
59     return 32;
60 }
61 
62 /*
63  * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
64  */
65 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
66 {
67     switch (mmu_idx) {
68     case ARMMMUIdx_SE10_0:
69         return ARMMMUIdx_Stage1_SE0;
70     case ARMMMUIdx_SE10_1:
71         return ARMMMUIdx_Stage1_SE1;
72     case ARMMMUIdx_SE10_1_PAN:
73         return ARMMMUIdx_Stage1_SE1_PAN;
74     case ARMMMUIdx_E10_0:
75         return ARMMMUIdx_Stage1_E0;
76     case ARMMMUIdx_E10_1:
77         return ARMMMUIdx_Stage1_E1;
78     case ARMMMUIdx_E10_1_PAN:
79         return ARMMMUIdx_Stage1_E1_PAN;
80     default:
81         return mmu_idx;
82     }
83 }
84 
85 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
86 {
87     return stage_1_mmu_idx(arm_mmu_idx(env));
88 }
89 
90 static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
91 {
92     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
93 }
94 
95 static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
96 {
97     switch (mmu_idx) {
98     case ARMMMUIdx_SE10_0:
99     case ARMMMUIdx_E20_0:
100     case ARMMMUIdx_SE20_0:
101     case ARMMMUIdx_Stage1_E0:
102     case ARMMMUIdx_Stage1_SE0:
103     case ARMMMUIdx_MUser:
104     case ARMMMUIdx_MSUser:
105     case ARMMMUIdx_MUserNegPri:
106     case ARMMMUIdx_MSUserNegPri:
107         return true;
108     default:
109         return false;
110     case ARMMMUIdx_E10_0:
111     case ARMMMUIdx_E10_1:
112     case ARMMMUIdx_E10_1_PAN:
113         g_assert_not_reached();
114     }
115 }
116 
117 /* Return the TTBR associated with this translation regime */
118 static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
119 {
120     if (mmu_idx == ARMMMUIdx_Stage2) {
121         return env->cp15.vttbr_el2;
122     }
123     if (mmu_idx == ARMMMUIdx_Stage2_S) {
124         return env->cp15.vsttbr_el2;
125     }
126     if (ttbrn == 0) {
127         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
128     } else {
129         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
130     }
131 }
132 
133 /* Return true if the specified stage of address translation is disabled */
134 static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx)
135 {
136     uint64_t hcr_el2;
137 
138     if (arm_feature(env, ARM_FEATURE_M)) {
139         switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
140                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
141         case R_V7M_MPU_CTRL_ENABLE_MASK:
142             /* Enabled, but not for HardFault and NMI */
143             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
144         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
145             /* Enabled for all cases */
146             return false;
147         case 0:
148         default:
149             /*
150              * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
151              * we warned about that in armv7m_nvic.c when the guest set it.
152              */
153             return true;
154         }
155     }
156 
157     hcr_el2 = arm_hcr_el2_eff(env);
158 
159     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
160         /* HCR.DC means HCR.VM behaves as 1 */
161         return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
162     }
163 
164     if (hcr_el2 & HCR_TGE) {
165         /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
166         if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
167             return true;
168         }
169     }
170 
171     if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
172         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
173         return true;
174     }
175 
176     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
177 }
178 
179 static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
180 {
181     /*
182      * For an S1 page table walk, the stage 1 attributes are always
183      * some form of "this is Normal memory". The combined S1+S2
184      * attributes are therefore only Device if stage 2 specifies Device.
185      * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
186      * ie when cacheattrs.attrs bits [3:2] are 0b00.
187      * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
188      * when cacheattrs.attrs bit [2] is 0.
189      */
190     assert(cacheattrs.is_s2_format);
191     if (arm_hcr_el2_eff(env) & HCR_FWB) {
192         return (cacheattrs.attrs & 0x4) == 0;
193     } else {
194         return (cacheattrs.attrs & 0xc) == 0;
195     }
196 }
197 
198 /* Translate a S1 pagetable walk through S2 if needed.  */
199 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
200                                hwaddr addr, bool *is_secure,
201                                ARMMMUFaultInfo *fi)
202 {
203     if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
204         !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
205         ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
206                                           : ARMMMUIdx_Stage2;
207         GetPhysAddrResult s2 = {};
208         int ret;
209 
210         ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
211                                  &s2, fi);
212         if (ret) {
213             assert(fi->type != ARMFault_None);
214             fi->s2addr = addr;
215             fi->stage2 = true;
216             fi->s1ptw = true;
217             fi->s1ns = !*is_secure;
218             return ~0;
219         }
220         if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
221             ptw_attrs_are_device(env, s2.cacheattrs)) {
222             /*
223              * PTW set and S1 walk touched S2 Device memory:
224              * generate Permission fault.
225              */
226             fi->type = ARMFault_Permission;
227             fi->s2addr = addr;
228             fi->stage2 = true;
229             fi->s1ptw = true;
230             fi->s1ns = !*is_secure;
231             return ~0;
232         }
233 
234         if (arm_is_secure_below_el3(env)) {
235             /* Check if page table walk is to secure or non-secure PA space. */
236             if (*is_secure) {
237                 *is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
238             } else {
239                 *is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
240             }
241         } else {
242             assert(!*is_secure);
243         }
244 
245         addr = s2.phys;
246     }
247     return addr;
248 }
249 
250 /* All loads done in the course of a page table walk go through here. */
251 static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
252                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
253 {
254     CPUState *cs = env_cpu(env);
255     MemTxAttrs attrs = {};
256     MemTxResult result = MEMTX_OK;
257     AddressSpace *as;
258     uint32_t data;
259 
260     addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
261     attrs.secure = is_secure;
262     as = arm_addressspace(cs, attrs);
263     if (fi->s1ptw) {
264         return 0;
265     }
266     if (regime_translation_big_endian(env, mmu_idx)) {
267         data = address_space_ldl_be(as, addr, attrs, &result);
268     } else {
269         data = address_space_ldl_le(as, addr, attrs, &result);
270     }
271     if (result == MEMTX_OK) {
272         return data;
273     }
274     fi->type = ARMFault_SyncExternalOnWalk;
275     fi->ea = arm_extabort_type(result);
276     return 0;
277 }
278 
279 static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
280                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
281 {
282     CPUState *cs = env_cpu(env);
283     MemTxAttrs attrs = {};
284     MemTxResult result = MEMTX_OK;
285     AddressSpace *as;
286     uint64_t data;
287 
288     addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
289     attrs.secure = is_secure;
290     as = arm_addressspace(cs, attrs);
291     if (fi->s1ptw) {
292         return 0;
293     }
294     if (regime_translation_big_endian(env, mmu_idx)) {
295         data = address_space_ldq_be(as, addr, attrs, &result);
296     } else {
297         data = address_space_ldq_le(as, addr, attrs, &result);
298     }
299     if (result == MEMTX_OK) {
300         return data;
301     }
302     fi->type = ARMFault_SyncExternalOnWalk;
303     fi->ea = arm_extabort_type(result);
304     return 0;
305 }
306 
307 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
308                                      uint32_t *table, uint32_t address)
309 {
310     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
311     uint64_t tcr = regime_tcr(env, mmu_idx);
312     int maskshift = extract32(tcr, 0, 3);
313     uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
314     uint32_t base_mask;
315 
316     if (address & mask) {
317         if (tcr & TTBCR_PD1) {
318             /* Translation table walk disabled for TTBR1 */
319             return false;
320         }
321         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
322     } else {
323         if (tcr & TTBCR_PD0) {
324             /* Translation table walk disabled for TTBR0 */
325             return false;
326         }
327         base_mask = ~((uint32_t)0x3fffu >> maskshift);
328         *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
329     }
330     *table |= (address >> 18) & 0x3ffc;
331     return true;
332 }
333 
334 /*
335  * Translate section/page access permissions to page R/W protection flags
336  * @env:         CPUARMState
337  * @mmu_idx:     MMU index indicating required translation regime
338  * @ap:          The 3-bit access permissions (AP[2:0])
339  * @domain_prot: The 2-bit domain access permissions
340  */
341 static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
342                          int ap, int domain_prot)
343 {
344     bool is_user = regime_is_user(env, mmu_idx);
345 
346     if (domain_prot == 3) {
347         return PAGE_READ | PAGE_WRITE;
348     }
349 
350     switch (ap) {
351     case 0:
352         if (arm_feature(env, ARM_FEATURE_V7)) {
353             return 0;
354         }
355         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
356         case SCTLR_S:
357             return is_user ? 0 : PAGE_READ;
358         case SCTLR_R:
359             return PAGE_READ;
360         default:
361             return 0;
362         }
363     case 1:
364         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
365     case 2:
366         if (is_user) {
367             return PAGE_READ;
368         } else {
369             return PAGE_READ | PAGE_WRITE;
370         }
371     case 3:
372         return PAGE_READ | PAGE_WRITE;
373     case 4: /* Reserved.  */
374         return 0;
375     case 5:
376         return is_user ? 0 : PAGE_READ;
377     case 6:
378         return PAGE_READ;
379     case 7:
380         if (!arm_feature(env, ARM_FEATURE_V6K)) {
381             return 0;
382         }
383         return PAGE_READ;
384     default:
385         g_assert_not_reached();
386     }
387 }
388 
389 /*
390  * Translate section/page access permissions to page R/W protection flags.
391  * @ap:      The 2-bit simple AP (AP[2:1])
392  * @is_user: TRUE if accessing from PL0
393  */
394 static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
395 {
396     switch (ap) {
397     case 0:
398         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
399     case 1:
400         return PAGE_READ | PAGE_WRITE;
401     case 2:
402         return is_user ? 0 : PAGE_READ;
403     case 3:
404         return PAGE_READ;
405     default:
406         g_assert_not_reached();
407     }
408 }
409 
410 static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
411 {
412     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
413 }
414 
415 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
416                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
417                              bool is_secure, GetPhysAddrResult *result,
418                              ARMMMUFaultInfo *fi)
419 {
420     int level = 1;
421     uint32_t table;
422     uint32_t desc;
423     int type;
424     int ap;
425     int domain = 0;
426     int domain_prot;
427     hwaddr phys_addr;
428     uint32_t dacr;
429 
430     /* Pagetable walk.  */
431     /* Lookup l1 descriptor.  */
432     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
433         /* Section translation fault if page walk is disabled by PD0 or PD1 */
434         fi->type = ARMFault_Translation;
435         goto do_fault;
436     }
437     desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
438     if (fi->type != ARMFault_None) {
439         goto do_fault;
440     }
441     type = (desc & 3);
442     domain = (desc >> 5) & 0x0f;
443     if (regime_el(env, mmu_idx) == 1) {
444         dacr = env->cp15.dacr_ns;
445     } else {
446         dacr = env->cp15.dacr_s;
447     }
448     domain_prot = (dacr >> (domain * 2)) & 3;
449     if (type == 0) {
450         /* Section translation fault.  */
451         fi->type = ARMFault_Translation;
452         goto do_fault;
453     }
454     if (type != 2) {
455         level = 2;
456     }
457     if (domain_prot == 0 || domain_prot == 2) {
458         fi->type = ARMFault_Domain;
459         goto do_fault;
460     }
461     if (type == 2) {
462         /* 1Mb section.  */
463         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
464         ap = (desc >> 10) & 3;
465         result->page_size = 1024 * 1024;
466     } else {
467         /* Lookup l2 entry.  */
468         if (type == 1) {
469             /* Coarse pagetable.  */
470             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
471         } else {
472             /* Fine pagetable.  */
473             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
474         }
475         desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
476         if (fi->type != ARMFault_None) {
477             goto do_fault;
478         }
479         switch (desc & 3) {
480         case 0: /* Page translation fault.  */
481             fi->type = ARMFault_Translation;
482             goto do_fault;
483         case 1: /* 64k page.  */
484             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
485             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
486             result->page_size = 0x10000;
487             break;
488         case 2: /* 4k page.  */
489             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
490             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
491             result->page_size = 0x1000;
492             break;
493         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
494             if (type == 1) {
495                 /* ARMv6/XScale extended small page format */
496                 if (arm_feature(env, ARM_FEATURE_XSCALE)
497                     || arm_feature(env, ARM_FEATURE_V6)) {
498                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
499                     result->page_size = 0x1000;
500                 } else {
501                     /*
502                      * UNPREDICTABLE in ARMv5; we choose to take a
503                      * page translation fault.
504                      */
505                     fi->type = ARMFault_Translation;
506                     goto do_fault;
507                 }
508             } else {
509                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
510                 result->page_size = 0x400;
511             }
512             ap = (desc >> 4) & 3;
513             break;
514         default:
515             /* Never happens, but compiler isn't smart enough to tell.  */
516             g_assert_not_reached();
517         }
518     }
519     result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
520     result->prot |= result->prot ? PAGE_EXEC : 0;
521     if (!(result->prot & (1 << access_type))) {
522         /* Access permission fault.  */
523         fi->type = ARMFault_Permission;
524         goto do_fault;
525     }
526     result->phys = phys_addr;
527     return false;
528 do_fault:
529     fi->domain = domain;
530     fi->level = level;
531     return true;
532 }
533 
534 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
535                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
536                              bool is_secure, GetPhysAddrResult *result,
537                              ARMMMUFaultInfo *fi)
538 {
539     ARMCPU *cpu = env_archcpu(env);
540     int level = 1;
541     uint32_t table;
542     uint32_t desc;
543     uint32_t xn;
544     uint32_t pxn = 0;
545     int type;
546     int ap;
547     int domain = 0;
548     int domain_prot;
549     hwaddr phys_addr;
550     uint32_t dacr;
551     bool ns;
552 
553     /* Pagetable walk.  */
554     /* Lookup l1 descriptor.  */
555     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
556         /* Section translation fault if page walk is disabled by PD0 or PD1 */
557         fi->type = ARMFault_Translation;
558         goto do_fault;
559     }
560     desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
561     if (fi->type != ARMFault_None) {
562         goto do_fault;
563     }
564     type = (desc & 3);
565     if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
566         /* Section translation fault, or attempt to use the encoding
567          * which is Reserved on implementations without PXN.
568          */
569         fi->type = ARMFault_Translation;
570         goto do_fault;
571     }
572     if ((type == 1) || !(desc & (1 << 18))) {
573         /* Page or Section.  */
574         domain = (desc >> 5) & 0x0f;
575     }
576     if (regime_el(env, mmu_idx) == 1) {
577         dacr = env->cp15.dacr_ns;
578     } else {
579         dacr = env->cp15.dacr_s;
580     }
581     if (type == 1) {
582         level = 2;
583     }
584     domain_prot = (dacr >> (domain * 2)) & 3;
585     if (domain_prot == 0 || domain_prot == 2) {
586         /* Section or Page domain fault */
587         fi->type = ARMFault_Domain;
588         goto do_fault;
589     }
590     if (type != 1) {
591         if (desc & (1 << 18)) {
592             /* Supersection.  */
593             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
594             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
595             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
596             result->page_size = 0x1000000;
597         } else {
598             /* Section.  */
599             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
600             result->page_size = 0x100000;
601         }
602         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
603         xn = desc & (1 << 4);
604         pxn = desc & 1;
605         ns = extract32(desc, 19, 1);
606     } else {
607         if (cpu_isar_feature(aa32_pxn, cpu)) {
608             pxn = (desc >> 2) & 1;
609         }
610         ns = extract32(desc, 3, 1);
611         /* Lookup l2 entry.  */
612         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
613         desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
614         if (fi->type != ARMFault_None) {
615             goto do_fault;
616         }
617         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
618         switch (desc & 3) {
619         case 0: /* Page translation fault.  */
620             fi->type = ARMFault_Translation;
621             goto do_fault;
622         case 1: /* 64k page.  */
623             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
624             xn = desc & (1 << 15);
625             result->page_size = 0x10000;
626             break;
627         case 2: case 3: /* 4k page.  */
628             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
629             xn = desc & 1;
630             result->page_size = 0x1000;
631             break;
632         default:
633             /* Never happens, but compiler isn't smart enough to tell.  */
634             g_assert_not_reached();
635         }
636     }
637     if (domain_prot == 3) {
638         result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
639     } else {
640         if (pxn && !regime_is_user(env, mmu_idx)) {
641             xn = 1;
642         }
643         if (xn && access_type == MMU_INST_FETCH) {
644             fi->type = ARMFault_Permission;
645             goto do_fault;
646         }
647 
648         if (arm_feature(env, ARM_FEATURE_V6K) &&
649                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
650             /* The simplified model uses AP[0] as an access control bit.  */
651             if ((ap & 1) == 0) {
652                 /* Access flag fault.  */
653                 fi->type = ARMFault_AccessFlag;
654                 goto do_fault;
655             }
656             result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
657         } else {
658             result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
659         }
660         if (result->prot && !xn) {
661             result->prot |= PAGE_EXEC;
662         }
663         if (!(result->prot & (1 << access_type))) {
664             /* Access permission fault.  */
665             fi->type = ARMFault_Permission;
666             goto do_fault;
667         }
668     }
669     if (ns) {
670         /* The NS bit will (as required by the architecture) have no effect if
671          * the CPU doesn't support TZ or this is a non-secure translation
672          * regime, because the attribute will already be non-secure.
673          */
674         result->attrs.secure = false;
675     }
676     result->phys = phys_addr;
677     return false;
678 do_fault:
679     fi->domain = domain;
680     fi->level = level;
681     return true;
682 }
683 
684 /*
685  * Translate S2 section/page access permissions to protection flags
686  * @env:     CPUARMState
687  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
688  * @xn:      XN (execute-never) bits
689  * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
690  */
691 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
692 {
693     int prot = 0;
694 
695     if (s2ap & 1) {
696         prot |= PAGE_READ;
697     }
698     if (s2ap & 2) {
699         prot |= PAGE_WRITE;
700     }
701 
702     if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
703         switch (xn) {
704         case 0:
705             prot |= PAGE_EXEC;
706             break;
707         case 1:
708             if (s1_is_el0) {
709                 prot |= PAGE_EXEC;
710             }
711             break;
712         case 2:
713             break;
714         case 3:
715             if (!s1_is_el0) {
716                 prot |= PAGE_EXEC;
717             }
718             break;
719         default:
720             g_assert_not_reached();
721         }
722     } else {
723         if (!extract32(xn, 1, 1)) {
724             if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
725                 prot |= PAGE_EXEC;
726             }
727         }
728     }
729     return prot;
730 }
731 
732 /*
733  * Translate section/page access permissions to protection flags
734  * @env:     CPUARMState
735  * @mmu_idx: MMU index indicating required translation regime
736  * @is_aa64: TRUE if AArch64
737  * @ap:      The 2-bit simple AP (AP[2:1])
738  * @ns:      NS (non-secure) bit
739  * @xn:      XN (execute-never) bit
740  * @pxn:     PXN (privileged execute-never) bit
741  */
742 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
743                       int ap, int ns, int xn, int pxn)
744 {
745     bool is_user = regime_is_user(env, mmu_idx);
746     int prot_rw, user_rw;
747     bool have_wxn;
748     int wxn = 0;
749 
750     assert(mmu_idx != ARMMMUIdx_Stage2);
751     assert(mmu_idx != ARMMMUIdx_Stage2_S);
752 
753     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
754     if (is_user) {
755         prot_rw = user_rw;
756     } else {
757         if (user_rw && regime_is_pan(env, mmu_idx)) {
758             /* PAN forbids data accesses but doesn't affect insn fetch */
759             prot_rw = 0;
760         } else {
761             prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
762         }
763     }
764 
765     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
766         return prot_rw;
767     }
768 
769     /* TODO have_wxn should be replaced with
770      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
771      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
772      * compatible processors have EL2, which is required for [U]WXN.
773      */
774     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
775 
776     if (have_wxn) {
777         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
778     }
779 
780     if (is_aa64) {
781         if (regime_has_2_ranges(mmu_idx) && !is_user) {
782             xn = pxn || (user_rw & PAGE_WRITE);
783         }
784     } else if (arm_feature(env, ARM_FEATURE_V7)) {
785         switch (regime_el(env, mmu_idx)) {
786         case 1:
787         case 3:
788             if (is_user) {
789                 xn = xn || !(user_rw & PAGE_READ);
790             } else {
791                 int uwxn = 0;
792                 if (have_wxn) {
793                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
794                 }
795                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
796                      (uwxn && (user_rw & PAGE_WRITE));
797             }
798             break;
799         case 2:
800             break;
801         }
802     } else {
803         xn = wxn = 0;
804     }
805 
806     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
807         return prot_rw;
808     }
809     return prot_rw | PAGE_EXEC;
810 }
811 
812 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
813                                           ARMMMUIdx mmu_idx)
814 {
815     uint64_t tcr = regime_tcr(env, mmu_idx);
816     uint32_t el = regime_el(env, mmu_idx);
817     int select, tsz;
818     bool epd, hpd;
819 
820     assert(mmu_idx != ARMMMUIdx_Stage2_S);
821 
822     if (mmu_idx == ARMMMUIdx_Stage2) {
823         /* VTCR */
824         bool sext = extract32(tcr, 4, 1);
825         bool sign = extract32(tcr, 3, 1);
826 
827         /*
828          * If the sign-extend bit is not the same as t0sz[3], the result
829          * is unpredictable. Flag this as a guest error.
830          */
831         if (sign != sext) {
832             qemu_log_mask(LOG_GUEST_ERROR,
833                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
834         }
835         tsz = sextract32(tcr, 0, 4) + 8;
836         select = 0;
837         hpd = false;
838         epd = false;
839     } else if (el == 2) {
840         /* HTCR */
841         tsz = extract32(tcr, 0, 3);
842         select = 0;
843         hpd = extract64(tcr, 24, 1);
844         epd = false;
845     } else {
846         int t0sz = extract32(tcr, 0, 3);
847         int t1sz = extract32(tcr, 16, 3);
848 
849         if (t1sz == 0) {
850             select = va > (0xffffffffu >> t0sz);
851         } else {
852             /* Note that we will detect errors later.  */
853             select = va >= ~(0xffffffffu >> t1sz);
854         }
855         if (!select) {
856             tsz = t0sz;
857             epd = extract32(tcr, 7, 1);
858             hpd = extract64(tcr, 41, 1);
859         } else {
860             tsz = t1sz;
861             epd = extract32(tcr, 23, 1);
862             hpd = extract64(tcr, 42, 1);
863         }
864         /* For aarch32, hpd0 is not enabled without t2e as well.  */
865         hpd &= extract32(tcr, 6, 1);
866     }
867 
868     return (ARMVAParameters) {
869         .tsz = tsz,
870         .select = select,
871         .epd = epd,
872         .hpd = hpd,
873     };
874 }
875 
876 /*
877  * check_s2_mmu_setup
878  * @cpu:        ARMCPU
879  * @is_aa64:    True if the translation regime is in AArch64 state
880  * @startlevel: Suggested starting level
881  * @inputsize:  Bitsize of IPAs
882  * @stride:     Page-table stride (See the ARM ARM)
883  *
884  * Returns true if the suggested S2 translation parameters are OK and
885  * false otherwise.
886  */
887 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
888                                int inputsize, int stride, int outputsize)
889 {
890     const int grainsize = stride + 3;
891     int startsizecheck;
892 
893     /*
894      * Negative levels are usually not allowed...
895      * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
896      * begins with level -1.  Note that previous feature tests will have
897      * eliminated this combination if it is not enabled.
898      */
899     if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
900         return false;
901     }
902 
903     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
904     if (startsizecheck < 1 || startsizecheck > stride + 4) {
905         return false;
906     }
907 
908     if (is_aa64) {
909         switch (stride) {
910         case 13: /* 64KB Pages.  */
911             if (level == 0 || (level == 1 && outputsize <= 42)) {
912                 return false;
913             }
914             break;
915         case 11: /* 16KB Pages.  */
916             if (level == 0 || (level == 1 && outputsize <= 40)) {
917                 return false;
918             }
919             break;
920         case 9: /* 4KB Pages.  */
921             if (level == 0 && outputsize <= 42) {
922                 return false;
923             }
924             break;
925         default:
926             g_assert_not_reached();
927         }
928 
929         /* Inputsize checks.  */
930         if (inputsize > outputsize &&
931             (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
932             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
933             return false;
934         }
935     } else {
936         /* AArch32 only supports 4KB pages. Assert on that.  */
937         assert(stride == 9);
938 
939         if (level == 0) {
940             return false;
941         }
942     }
943     return true;
944 }
945 
946 /**
947  * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
948  *
949  * Returns false if the translation was successful. Otherwise, phys_ptr,
950  * attrs, prot and page_size may not be filled in, and the populated fsr
951  * value provides information on why the translation aborted, in the format
952  * of a long-format DFSR/IFSR fault register, with the following caveat:
953  * the WnR bit is never set (the caller must do this).
954  *
955  * @env: CPUARMState
956  * @address: virtual address to get physical address for
957  * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
958  * @mmu_idx: MMU index indicating required translation regime
959  * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page
960  *             table walk), must be true if this is stage 2 of a stage 1+2
961  *             walk for an EL0 access. If @mmu_idx is anything else,
962  *             @s1_is_el0 is ignored.
963  * @result: set on translation success,
964  * @fi: set to fault info if the translation fails
965  */
966 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
967                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
968                                bool s1_is_el0, GetPhysAddrResult *result,
969                                ARMMMUFaultInfo *fi)
970 {
971     ARMCPU *cpu = env_archcpu(env);
972     /* Read an LPAE long-descriptor translation table. */
973     ARMFaultType fault_type = ARMFault_Translation;
974     uint32_t level;
975     ARMVAParameters param;
976     uint64_t ttbr;
977     hwaddr descaddr, indexmask, indexmask_grainsize;
978     uint32_t tableattrs;
979     target_ulong page_size;
980     uint32_t attrs;
981     int32_t stride;
982     int addrsize, inputsize, outputsize;
983     uint64_t tcr = regime_tcr(env, mmu_idx);
984     int ap, ns, xn, pxn;
985     uint32_t el = regime_el(env, mmu_idx);
986     uint64_t descaddrmask;
987     bool aarch64 = arm_el_is_aa64(env, el);
988     bool guarded = false;
989 
990     /* TODO: This code does not support shareability levels. */
991     if (aarch64) {
992         int ps;
993 
994         param = aa64_va_parameters(env, address, mmu_idx,
995                                    access_type != MMU_INST_FETCH);
996         level = 0;
997 
998         /*
999          * If TxSZ is programmed to a value larger than the maximum,
1000          * or smaller than the effective minimum, it is IMPLEMENTATION
1001          * DEFINED whether we behave as if the field were programmed
1002          * within bounds, or if a level 0 Translation fault is generated.
1003          *
1004          * With FEAT_LVA, fault on less than minimum becomes required,
1005          * so our choice is to always raise the fault.
1006          */
1007         if (param.tsz_oob) {
1008             fault_type = ARMFault_Translation;
1009             goto do_fault;
1010         }
1011 
1012         addrsize = 64 - 8 * param.tbi;
1013         inputsize = 64 - param.tsz;
1014 
1015         /*
1016          * Bound PS by PARANGE to find the effective output address size.
1017          * ID_AA64MMFR0 is a read-only register so values outside of the
1018          * supported mappings can be considered an implementation error.
1019          */
1020         ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1021         ps = MIN(ps, param.ps);
1022         assert(ps < ARRAY_SIZE(pamax_map));
1023         outputsize = pamax_map[ps];
1024     } else {
1025         param = aa32_va_parameters(env, address, mmu_idx);
1026         level = 1;
1027         addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1028         inputsize = addrsize - param.tsz;
1029         outputsize = 40;
1030     }
1031 
1032     /*
1033      * We determined the region when collecting the parameters, but we
1034      * have not yet validated that the address is valid for the region.
1035      * Extract the top bits and verify that they all match select.
1036      *
1037      * For aa32, if inputsize == addrsize, then we have selected the
1038      * region by exclusion in aa32_va_parameters and there is no more
1039      * validation to do here.
1040      */
1041     if (inputsize < addrsize) {
1042         target_ulong top_bits = sextract64(address, inputsize,
1043                                            addrsize - inputsize);
1044         if (-top_bits != param.select) {
1045             /* The gap between the two regions is a Translation fault */
1046             fault_type = ARMFault_Translation;
1047             goto do_fault;
1048         }
1049     }
1050 
1051     if (param.using64k) {
1052         stride = 13;
1053     } else if (param.using16k) {
1054         stride = 11;
1055     } else {
1056         stride = 9;
1057     }
1058 
1059     /*
1060      * Note that QEMU ignores shareability and cacheability attributes,
1061      * so we don't need to do anything with the SH, ORGN, IRGN fields
1062      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
1063      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1064      * implement any ASID-like capability so we can ignore it (instead
1065      * we will always flush the TLB any time the ASID is changed).
1066      */
1067     ttbr = regime_ttbr(env, mmu_idx, param.select);
1068 
1069     /*
1070      * Here we should have set up all the parameters for the translation:
1071      * inputsize, ttbr, epd, stride, tbi
1072      */
1073 
1074     if (param.epd) {
1075         /*
1076          * Translation table walk disabled => Translation fault on TLB miss
1077          * Note: This is always 0 on 64-bit EL2 and EL3.
1078          */
1079         goto do_fault;
1080     }
1081 
1082     if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
1083         /*
1084          * The starting level depends on the virtual address size (which can
1085          * be up to 48 bits) and the translation granule size. It indicates
1086          * the number of strides (stride bits at a time) needed to
1087          * consume the bits of the input address. In the pseudocode this is:
1088          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
1089          * where their 'inputsize' is our 'inputsize', 'grainsize' is
1090          * our 'stride + 3' and 'stride' is our 'stride'.
1091          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1092          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1093          * = 4 - (inputsize - 4) / stride;
1094          */
1095         level = 4 - (inputsize - 4) / stride;
1096     } else {
1097         /*
1098          * For stage 2 translations the starting level is specified by the
1099          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1100          */
1101         uint32_t sl0 = extract32(tcr, 6, 2);
1102         uint32_t sl2 = extract64(tcr, 33, 1);
1103         uint32_t startlevel;
1104         bool ok;
1105 
1106         /* SL2 is RES0 unless DS=1 & 4kb granule. */
1107         if (param.ds && stride == 9 && sl2) {
1108             if (sl0 != 0) {
1109                 level = 0;
1110                 fault_type = ARMFault_Translation;
1111                 goto do_fault;
1112             }
1113             startlevel = -1;
1114         } else if (!aarch64 || stride == 9) {
1115             /* AArch32 or 4KB pages */
1116             startlevel = 2 - sl0;
1117 
1118             if (cpu_isar_feature(aa64_st, cpu)) {
1119                 startlevel &= 3;
1120             }
1121         } else {
1122             /* 16KB or 64KB pages */
1123             startlevel = 3 - sl0;
1124         }
1125 
1126         /* Check that the starting level is valid. */
1127         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1128                                 inputsize, stride, outputsize);
1129         if (!ok) {
1130             fault_type = ARMFault_Translation;
1131             goto do_fault;
1132         }
1133         level = startlevel;
1134     }
1135 
1136     indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1137     indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1138 
1139     /* Now we can extract the actual base address from the TTBR */
1140     descaddr = extract64(ttbr, 0, 48);
1141 
1142     /*
1143      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1144      *
1145      * Otherwise, if the base address is out of range, raise AddressSizeFault.
1146      * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1147      * but we've just cleared the bits above 47, so simplify the test.
1148      */
1149     if (outputsize > 48) {
1150         descaddr |= extract64(ttbr, 2, 4) << 48;
1151     } else if (descaddr >> outputsize) {
1152         level = 0;
1153         fault_type = ARMFault_AddressSize;
1154         goto do_fault;
1155     }
1156 
1157     /*
1158      * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1159      * and also to mask out CnP (bit 0) which could validly be non-zero.
1160      */
1161     descaddr &= ~indexmask;
1162 
1163     /*
1164      * For AArch32, the address field in the descriptor goes up to bit 39
1165      * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
1166      * or an AddressSize fault is raised.  So for v8 we extract those SBZ
1167      * bits as part of the address, which will be checked via outputsize.
1168      * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1169      * the highest bits of a 52-bit output are placed elsewhere.
1170      */
1171     if (param.ds) {
1172         descaddrmask = MAKE_64BIT_MASK(0, 50);
1173     } else if (arm_feature(env, ARM_FEATURE_V8)) {
1174         descaddrmask = MAKE_64BIT_MASK(0, 48);
1175     } else {
1176         descaddrmask = MAKE_64BIT_MASK(0, 40);
1177     }
1178     descaddrmask &= ~indexmask_grainsize;
1179 
1180     /*
1181      * Secure accesses start with the page table in secure memory and
1182      * can be downgraded to non-secure at any step. Non-secure accesses
1183      * remain non-secure. We implement this by just ORing in the NSTable/NS
1184      * bits at each step.
1185      */
1186     tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
1187     for (;;) {
1188         uint64_t descriptor;
1189         bool nstable;
1190 
1191         descaddr |= (address >> (stride * (4 - level))) & indexmask;
1192         descaddr &= ~7ULL;
1193         nstable = extract32(tableattrs, 4, 1);
1194         descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi);
1195         if (fi->type != ARMFault_None) {
1196             goto do_fault;
1197         }
1198 
1199         if (!(descriptor & 1) ||
1200             (!(descriptor & 2) && (level == 3))) {
1201             /* Invalid, or the Reserved level 3 encoding */
1202             goto do_fault;
1203         }
1204 
1205         descaddr = descriptor & descaddrmask;
1206 
1207         /*
1208          * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1209          * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
1210          * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
1211          * raise AddressSizeFault.
1212          */
1213         if (outputsize > 48) {
1214             if (param.ds) {
1215                 descaddr |= extract64(descriptor, 8, 2) << 50;
1216             } else {
1217                 descaddr |= extract64(descriptor, 12, 4) << 48;
1218             }
1219         } else if (descaddr >> outputsize) {
1220             fault_type = ARMFault_AddressSize;
1221             goto do_fault;
1222         }
1223 
1224         if ((descriptor & 2) && (level < 3)) {
1225             /*
1226              * Table entry. The top five bits are attributes which may
1227              * propagate down through lower levels of the table (and
1228              * which are all arranged so that 0 means "no effect", so
1229              * we can gather them up by ORing in the bits at each level).
1230              */
1231             tableattrs |= extract64(descriptor, 59, 5);
1232             level++;
1233             indexmask = indexmask_grainsize;
1234             continue;
1235         }
1236         /*
1237          * Block entry at level 1 or 2, or page entry at level 3.
1238          * These are basically the same thing, although the number
1239          * of bits we pull in from the vaddr varies. Note that although
1240          * descaddrmask masks enough of the low bits of the descriptor
1241          * to give a correct page or table address, the address field
1242          * in a block descriptor is smaller; so we need to explicitly
1243          * clear the lower bits here before ORing in the low vaddr bits.
1244          */
1245         page_size = (1ULL << ((stride * (4 - level)) + 3));
1246         descaddr &= ~(hwaddr)(page_size - 1);
1247         descaddr |= (address & (page_size - 1));
1248         /* Extract attributes from the descriptor */
1249         attrs = extract64(descriptor, 2, 10)
1250             | (extract64(descriptor, 52, 12) << 10);
1251 
1252         if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1253             /* Stage 2 table descriptors do not include any attribute fields */
1254             break;
1255         }
1256         /* Merge in attributes from table descriptors */
1257         attrs |= nstable << 3; /* NS */
1258         guarded = extract64(descriptor, 50, 1);  /* GP */
1259         if (param.hpd) {
1260             /* HPD disables all the table attributes except NSTable.  */
1261             break;
1262         }
1263         attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
1264         /*
1265          * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1266          * means "force PL1 access only", which means forcing AP[1] to 0.
1267          */
1268         attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
1269         attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
1270         break;
1271     }
1272     /*
1273      * Here descaddr is the final physical address, and attributes
1274      * are all in attrs.
1275      */
1276     fault_type = ARMFault_AccessFlag;
1277     if ((attrs & (1 << 8)) == 0) {
1278         /* Access flag */
1279         goto do_fault;
1280     }
1281 
1282     ap = extract32(attrs, 4, 2);
1283 
1284     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1285         ns = mmu_idx == ARMMMUIdx_Stage2;
1286         xn = extract32(attrs, 11, 2);
1287         result->prot = get_S2prot(env, ap, xn, s1_is_el0);
1288     } else {
1289         ns = extract32(attrs, 3, 1);
1290         xn = extract32(attrs, 12, 1);
1291         pxn = extract32(attrs, 11, 1);
1292         result->prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
1293     }
1294 
1295     fault_type = ARMFault_Permission;
1296     if (!(result->prot & (1 << access_type))) {
1297         goto do_fault;
1298     }
1299 
1300     if (ns) {
1301         /*
1302          * The NS bit will (as required by the architecture) have no effect if
1303          * the CPU doesn't support TZ or this is a non-secure translation
1304          * regime, because the attribute will already be non-secure.
1305          */
1306         result->attrs.secure = false;
1307     }
1308     /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
1309     if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
1310         arm_tlb_bti_gp(&result->attrs) = true;
1311     }
1312 
1313     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1314         result->cacheattrs.is_s2_format = true;
1315         result->cacheattrs.attrs = extract32(attrs, 0, 4);
1316     } else {
1317         /* Index into MAIR registers for cache attributes */
1318         uint8_t attrindx = extract32(attrs, 0, 3);
1319         uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1320         assert(attrindx <= 7);
1321         result->cacheattrs.is_s2_format = false;
1322         result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
1323     }
1324 
1325     /*
1326      * For FEAT_LPA2 and effective DS, the SH field in the attributes
1327      * was re-purposed for output address bits.  The SH attribute in
1328      * that case comes from TCR_ELx, which we extracted earlier.
1329      */
1330     if (param.ds) {
1331         result->cacheattrs.shareability = param.sh;
1332     } else {
1333         result->cacheattrs.shareability = extract32(attrs, 6, 2);
1334     }
1335 
1336     result->phys = descaddr;
1337     result->page_size = page_size;
1338     return false;
1339 
1340 do_fault:
1341     fi->type = fault_type;
1342     fi->level = level;
1343     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
1344     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
1345                                mmu_idx == ARMMMUIdx_Stage2_S);
1346     fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1347     return true;
1348 }
1349 
1350 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1351                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1352                                  bool is_secure, GetPhysAddrResult *result,
1353                                  ARMMMUFaultInfo *fi)
1354 {
1355     int n;
1356     uint32_t mask;
1357     uint32_t base;
1358     bool is_user = regime_is_user(env, mmu_idx);
1359 
1360     if (regime_translation_disabled(env, mmu_idx)) {
1361         /* MPU disabled.  */
1362         result->phys = address;
1363         result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1364         return false;
1365     }
1366 
1367     result->phys = address;
1368     for (n = 7; n >= 0; n--) {
1369         base = env->cp15.c6_region[n];
1370         if ((base & 1) == 0) {
1371             continue;
1372         }
1373         mask = 1 << ((base >> 1) & 0x1f);
1374         /* Keep this shift separate from the above to avoid an
1375            (undefined) << 32.  */
1376         mask = (mask << 1) - 1;
1377         if (((base ^ address) & ~mask) == 0) {
1378             break;
1379         }
1380     }
1381     if (n < 0) {
1382         fi->type = ARMFault_Background;
1383         return true;
1384     }
1385 
1386     if (access_type == MMU_INST_FETCH) {
1387         mask = env->cp15.pmsav5_insn_ap;
1388     } else {
1389         mask = env->cp15.pmsav5_data_ap;
1390     }
1391     mask = (mask >> (n * 4)) & 0xf;
1392     switch (mask) {
1393     case 0:
1394         fi->type = ARMFault_Permission;
1395         fi->level = 1;
1396         return true;
1397     case 1:
1398         if (is_user) {
1399             fi->type = ARMFault_Permission;
1400             fi->level = 1;
1401             return true;
1402         }
1403         result->prot = PAGE_READ | PAGE_WRITE;
1404         break;
1405     case 2:
1406         result->prot = PAGE_READ;
1407         if (!is_user) {
1408             result->prot |= PAGE_WRITE;
1409         }
1410         break;
1411     case 3:
1412         result->prot = PAGE_READ | PAGE_WRITE;
1413         break;
1414     case 5:
1415         if (is_user) {
1416             fi->type = ARMFault_Permission;
1417             fi->level = 1;
1418             return true;
1419         }
1420         result->prot = PAGE_READ;
1421         break;
1422     case 6:
1423         result->prot = PAGE_READ;
1424         break;
1425     default:
1426         /* Bad permission.  */
1427         fi->type = ARMFault_Permission;
1428         fi->level = 1;
1429         return true;
1430     }
1431     result->prot |= PAGE_EXEC;
1432     return false;
1433 }
1434 
1435 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
1436                                          int32_t address, int *prot)
1437 {
1438     if (!arm_feature(env, ARM_FEATURE_M)) {
1439         *prot = PAGE_READ | PAGE_WRITE;
1440         switch (address) {
1441         case 0xF0000000 ... 0xFFFFFFFF:
1442             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1443                 /* hivecs execing is ok */
1444                 *prot |= PAGE_EXEC;
1445             }
1446             break;
1447         case 0x00000000 ... 0x7FFFFFFF:
1448             *prot |= PAGE_EXEC;
1449             break;
1450         }
1451     } else {
1452         /* Default system address map for M profile cores.
1453          * The architecture specifies which regions are execute-never;
1454          * at the MPU level no other checks are defined.
1455          */
1456         switch (address) {
1457         case 0x00000000 ... 0x1fffffff: /* ROM */
1458         case 0x20000000 ... 0x3fffffff: /* SRAM */
1459         case 0x60000000 ... 0x7fffffff: /* RAM */
1460         case 0x80000000 ... 0x9fffffff: /* RAM */
1461             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1462             break;
1463         case 0x40000000 ... 0x5fffffff: /* Peripheral */
1464         case 0xa0000000 ... 0xbfffffff: /* Device */
1465         case 0xc0000000 ... 0xdfffffff: /* Device */
1466         case 0xe0000000 ... 0xffffffff: /* System */
1467             *prot = PAGE_READ | PAGE_WRITE;
1468             break;
1469         default:
1470             g_assert_not_reached();
1471         }
1472     }
1473 }
1474 
1475 static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1476 {
1477     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1478     return arm_feature(env, ARM_FEATURE_M) &&
1479         extract32(address, 20, 12) == 0xe00;
1480 }
1481 
1482 static bool m_is_system_region(CPUARMState *env, uint32_t address)
1483 {
1484     /*
1485      * True if address is in the M profile system region
1486      * 0xe0000000 - 0xffffffff
1487      */
1488     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1489 }
1490 
1491 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1492                                          bool is_secure, bool is_user)
1493 {
1494     /*
1495      * Return true if we should use the default memory map as a
1496      * "background" region if there are no hits against any MPU regions.
1497      */
1498     CPUARMState *env = &cpu->env;
1499 
1500     if (is_user) {
1501         return false;
1502     }
1503 
1504     if (arm_feature(env, ARM_FEATURE_M)) {
1505         return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1506     } else {
1507         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1508     }
1509 }
1510 
1511 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1512                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1513                                  bool secure, GetPhysAddrResult *result,
1514                                  ARMMMUFaultInfo *fi)
1515 {
1516     ARMCPU *cpu = env_archcpu(env);
1517     int n;
1518     bool is_user = regime_is_user(env, mmu_idx);
1519 
1520     result->phys = address;
1521     result->page_size = TARGET_PAGE_SIZE;
1522     result->prot = 0;
1523 
1524     if (regime_translation_disabled(env, mmu_idx) ||
1525         m_is_ppb_region(env, address)) {
1526         /*
1527          * MPU disabled or M profile PPB access: use default memory map.
1528          * The other case which uses the default memory map in the
1529          * v7M ARM ARM pseudocode is exception vector reads from the vector
1530          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1531          * which always does a direct read using address_space_ldl(), rather
1532          * than going via this function, so we don't need to check that here.
1533          */
1534         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
1535     } else { /* MPU enabled */
1536         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1537             /* region search */
1538             uint32_t base = env->pmsav7.drbar[n];
1539             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1540             uint32_t rmask;
1541             bool srdis = false;
1542 
1543             if (!(env->pmsav7.drsr[n] & 0x1)) {
1544                 continue;
1545             }
1546 
1547             if (!rsize) {
1548                 qemu_log_mask(LOG_GUEST_ERROR,
1549                               "DRSR[%d]: Rsize field cannot be 0\n", n);
1550                 continue;
1551             }
1552             rsize++;
1553             rmask = (1ull << rsize) - 1;
1554 
1555             if (base & rmask) {
1556                 qemu_log_mask(LOG_GUEST_ERROR,
1557                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1558                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
1559                               n, base, rmask);
1560                 continue;
1561             }
1562 
1563             if (address < base || address > base + rmask) {
1564                 /*
1565                  * Address not in this region. We must check whether the
1566                  * region covers addresses in the same page as our address.
1567                  * In that case we must not report a size that covers the
1568                  * whole page for a subsequent hit against a different MPU
1569                  * region or the background region, because it would result in
1570                  * incorrect TLB hits for subsequent accesses to addresses that
1571                  * are in this MPU region.
1572                  */
1573                 if (ranges_overlap(base, rmask,
1574                                    address & TARGET_PAGE_MASK,
1575                                    TARGET_PAGE_SIZE)) {
1576                     result->page_size = 1;
1577                 }
1578                 continue;
1579             }
1580 
1581             /* Region matched */
1582 
1583             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
1584                 int i, snd;
1585                 uint32_t srdis_mask;
1586 
1587                 rsize -= 3; /* sub region size (power of 2) */
1588                 snd = ((address - base) >> rsize) & 0x7;
1589                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1590 
1591                 srdis_mask = srdis ? 0x3 : 0x0;
1592                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1593                     /*
1594                      * This will check in groups of 2, 4 and then 8, whether
1595                      * the subregion bits are consistent. rsize is incremented
1596                      * back up to give the region size, considering consistent
1597                      * adjacent subregions as one region. Stop testing if rsize
1598                      * is already big enough for an entire QEMU page.
1599                      */
1600                     int snd_rounded = snd & ~(i - 1);
1601                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1602                                                      snd_rounded + 8, i);
1603                     if (srdis_mask ^ srdis_multi) {
1604                         break;
1605                     }
1606                     srdis_mask = (srdis_mask << i) | srdis_mask;
1607                     rsize++;
1608                 }
1609             }
1610             if (srdis) {
1611                 continue;
1612             }
1613             if (rsize < TARGET_PAGE_BITS) {
1614                 result->page_size = 1 << rsize;
1615             }
1616             break;
1617         }
1618 
1619         if (n == -1) { /* no hits */
1620             if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1621                 /* background fault */
1622                 fi->type = ARMFault_Background;
1623                 return true;
1624             }
1625             get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
1626         } else { /* a MPU hit! */
1627             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1628             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1629 
1630             if (m_is_system_region(env, address)) {
1631                 /* System space is always execute never */
1632                 xn = 1;
1633             }
1634 
1635             if (is_user) { /* User mode AP bit decoding */
1636                 switch (ap) {
1637                 case 0:
1638                 case 1:
1639                 case 5:
1640                     break; /* no access */
1641                 case 3:
1642                     result->prot |= PAGE_WRITE;
1643                     /* fall through */
1644                 case 2:
1645                 case 6:
1646                     result->prot |= PAGE_READ | PAGE_EXEC;
1647                     break;
1648                 case 7:
1649                     /* for v7M, same as 6; for R profile a reserved value */
1650                     if (arm_feature(env, ARM_FEATURE_M)) {
1651                         result->prot |= PAGE_READ | PAGE_EXEC;
1652                         break;
1653                     }
1654                     /* fall through */
1655                 default:
1656                     qemu_log_mask(LOG_GUEST_ERROR,
1657                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1658                                   PRIx32 "\n", n, ap);
1659                 }
1660             } else { /* Priv. mode AP bits decoding */
1661                 switch (ap) {
1662                 case 0:
1663                     break; /* no access */
1664                 case 1:
1665                 case 2:
1666                 case 3:
1667                     result->prot |= PAGE_WRITE;
1668                     /* fall through */
1669                 case 5:
1670                 case 6:
1671                     result->prot |= PAGE_READ | PAGE_EXEC;
1672                     break;
1673                 case 7:
1674                     /* for v7M, same as 6; for R profile a reserved value */
1675                     if (arm_feature(env, ARM_FEATURE_M)) {
1676                         result->prot |= PAGE_READ | PAGE_EXEC;
1677                         break;
1678                     }
1679                     /* fall through */
1680                 default:
1681                     qemu_log_mask(LOG_GUEST_ERROR,
1682                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1683                                   PRIx32 "\n", n, ap);
1684                 }
1685             }
1686 
1687             /* execute never */
1688             if (xn) {
1689                 result->prot &= ~PAGE_EXEC;
1690             }
1691         }
1692     }
1693 
1694     fi->type = ARMFault_Permission;
1695     fi->level = 1;
1696     return !(result->prot & (1 << access_type));
1697 }
1698 
1699 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1700                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1701                        bool secure, GetPhysAddrResult *result,
1702                        ARMMMUFaultInfo *fi, uint32_t *mregion)
1703 {
1704     /*
1705      * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1706      * that a full phys-to-virt translation does).
1707      * mregion is (if not NULL) set to the region number which matched,
1708      * or -1 if no region number is returned (MPU off, address did not
1709      * hit a region, address hit in multiple regions).
1710      * If the region hit doesn't cover the entire TARGET_PAGE the address
1711      * is within, then we set the result page_size to 1 to force the
1712      * memory system to use a subpage.
1713      */
1714     ARMCPU *cpu = env_archcpu(env);
1715     bool is_user = regime_is_user(env, mmu_idx);
1716     int n;
1717     int matchregion = -1;
1718     bool hit = false;
1719     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1720     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1721 
1722     result->page_size = TARGET_PAGE_SIZE;
1723     result->phys = address;
1724     result->prot = 0;
1725     if (mregion) {
1726         *mregion = -1;
1727     }
1728 
1729     /*
1730      * Unlike the ARM ARM pseudocode, we don't need to check whether this
1731      * was an exception vector read from the vector table (which is always
1732      * done using the default system address map), because those accesses
1733      * are done in arm_v7m_load_vector(), which always does a direct
1734      * read using address_space_ldl(), rather than going via this function.
1735      */
1736     if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
1737         hit = true;
1738     } else if (m_is_ppb_region(env, address)) {
1739         hit = true;
1740     } else {
1741         if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1742             hit = true;
1743         }
1744 
1745         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1746             /* region search */
1747             /*
1748              * Note that the base address is bits [31:5] from the register
1749              * with bits [4:0] all zeroes, but the limit address is bits
1750              * [31:5] from the register with bits [4:0] all ones.
1751              */
1752             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
1753             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
1754 
1755             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
1756                 /* Region disabled */
1757                 continue;
1758             }
1759 
1760             if (address < base || address > limit) {
1761                 /*
1762                  * Address not in this region. We must check whether the
1763                  * region covers addresses in the same page as our address.
1764                  * In that case we must not report a size that covers the
1765                  * whole page for a subsequent hit against a different MPU
1766                  * region or the background region, because it would result in
1767                  * incorrect TLB hits for subsequent accesses to addresses that
1768                  * are in this MPU region.
1769                  */
1770                 if (limit >= base &&
1771                     ranges_overlap(base, limit - base + 1,
1772                                    addr_page_base,
1773                                    TARGET_PAGE_SIZE)) {
1774                     result->page_size = 1;
1775                 }
1776                 continue;
1777             }
1778 
1779             if (base > addr_page_base || limit < addr_page_limit) {
1780                 result->page_size = 1;
1781             }
1782 
1783             if (matchregion != -1) {
1784                 /*
1785                  * Multiple regions match -- always a failure (unlike
1786                  * PMSAv7 where highest-numbered-region wins)
1787                  */
1788                 fi->type = ARMFault_Permission;
1789                 fi->level = 1;
1790                 return true;
1791             }
1792 
1793             matchregion = n;
1794             hit = true;
1795         }
1796     }
1797 
1798     if (!hit) {
1799         /* background fault */
1800         fi->type = ARMFault_Background;
1801         return true;
1802     }
1803 
1804     if (matchregion == -1) {
1805         /* hit using the background region */
1806         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
1807     } else {
1808         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
1809         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
1810         bool pxn = false;
1811 
1812         if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1813             pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
1814         }
1815 
1816         if (m_is_system_region(env, address)) {
1817             /* System space is always execute never */
1818             xn = 1;
1819         }
1820 
1821         result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
1822         if (result->prot && !xn && !(pxn && !is_user)) {
1823             result->prot |= PAGE_EXEC;
1824         }
1825         /*
1826          * We don't need to look the attribute up in the MAIR0/MAIR1
1827          * registers because that only tells us about cacheability.
1828          */
1829         if (mregion) {
1830             *mregion = matchregion;
1831         }
1832     }
1833 
1834     fi->type = ARMFault_Permission;
1835     fi->level = 1;
1836     return !(result->prot & (1 << access_type));
1837 }
1838 
1839 static bool v8m_is_sau_exempt(CPUARMState *env,
1840                               uint32_t address, MMUAccessType access_type)
1841 {
1842     /*
1843      * The architecture specifies that certain address ranges are
1844      * exempt from v8M SAU/IDAU checks.
1845      */
1846     return
1847         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
1848         (address >= 0xe0000000 && address <= 0xe0002fff) ||
1849         (address >= 0xe000e000 && address <= 0xe000efff) ||
1850         (address >= 0xe002e000 && address <= 0xe002efff) ||
1851         (address >= 0xe0040000 && address <= 0xe0041fff) ||
1852         (address >= 0xe00ff000 && address <= 0xe00fffff);
1853 }
1854 
1855 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1856                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1857                          bool is_secure, V8M_SAttributes *sattrs)
1858 {
1859     /*
1860      * Look up the security attributes for this address. Compare the
1861      * pseudocode SecurityCheck() function.
1862      * We assume the caller has zero-initialized *sattrs.
1863      */
1864     ARMCPU *cpu = env_archcpu(env);
1865     int r;
1866     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
1867     int idau_region = IREGION_NOTVALID;
1868     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1869     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1870 
1871     if (cpu->idau) {
1872         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
1873         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
1874 
1875         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
1876                    &idau_nsc);
1877     }
1878 
1879     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
1880         /* 0xf0000000..0xffffffff is always S for insn fetches */
1881         return;
1882     }
1883 
1884     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
1885         sattrs->ns = !is_secure;
1886         return;
1887     }
1888 
1889     if (idau_region != IREGION_NOTVALID) {
1890         sattrs->irvalid = true;
1891         sattrs->iregion = idau_region;
1892     }
1893 
1894     switch (env->sau.ctrl & 3) {
1895     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
1896         break;
1897     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
1898         sattrs->ns = true;
1899         break;
1900     default: /* SAU.ENABLE == 1 */
1901         for (r = 0; r < cpu->sau_sregion; r++) {
1902             if (env->sau.rlar[r] & 1) {
1903                 uint32_t base = env->sau.rbar[r] & ~0x1f;
1904                 uint32_t limit = env->sau.rlar[r] | 0x1f;
1905 
1906                 if (base <= address && limit >= address) {
1907                     if (base > addr_page_base || limit < addr_page_limit) {
1908                         sattrs->subpage = true;
1909                     }
1910                     if (sattrs->srvalid) {
1911                         /*
1912                          * If we hit in more than one region then we must report
1913                          * as Secure, not NS-Callable, with no valid region
1914                          * number info.
1915                          */
1916                         sattrs->ns = false;
1917                         sattrs->nsc = false;
1918                         sattrs->sregion = 0;
1919                         sattrs->srvalid = false;
1920                         break;
1921                     } else {
1922                         if (env->sau.rlar[r] & 2) {
1923                             sattrs->nsc = true;
1924                         } else {
1925                             sattrs->ns = true;
1926                         }
1927                         sattrs->srvalid = true;
1928                         sattrs->sregion = r;
1929                     }
1930                 } else {
1931                     /*
1932                      * Address not in this region. We must check whether the
1933                      * region covers addresses in the same page as our address.
1934                      * In that case we must not report a size that covers the
1935                      * whole page for a subsequent hit against a different MPU
1936                      * region or the background region, because it would result
1937                      * in incorrect TLB hits for subsequent accesses to
1938                      * addresses that are in this MPU region.
1939                      */
1940                     if (limit >= base &&
1941                         ranges_overlap(base, limit - base + 1,
1942                                        addr_page_base,
1943                                        TARGET_PAGE_SIZE)) {
1944                         sattrs->subpage = true;
1945                     }
1946                 }
1947             }
1948         }
1949         break;
1950     }
1951 
1952     /*
1953      * The IDAU will override the SAU lookup results if it specifies
1954      * higher security than the SAU does.
1955      */
1956     if (!idau_ns) {
1957         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
1958             sattrs->ns = false;
1959             sattrs->nsc = idau_nsc;
1960         }
1961     }
1962 }
1963 
1964 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
1965                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1966                                  bool secure, GetPhysAddrResult *result,
1967                                  ARMMMUFaultInfo *fi)
1968 {
1969     V8M_SAttributes sattrs = {};
1970     bool ret;
1971 
1972     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1973         v8m_security_lookup(env, address, access_type, mmu_idx,
1974                             secure, &sattrs);
1975         if (access_type == MMU_INST_FETCH) {
1976             /*
1977              * Instruction fetches always use the MMU bank and the
1978              * transaction attribute determined by the fetch address,
1979              * regardless of CPU state. This is painful for QEMU
1980              * to handle, because it would mean we need to encode
1981              * into the mmu_idx not just the (user, negpri) information
1982              * for the current security state but also that for the
1983              * other security state, which would balloon the number
1984              * of mmu_idx values needed alarmingly.
1985              * Fortunately we can avoid this because it's not actually
1986              * possible to arbitrarily execute code from memory with
1987              * the wrong security attribute: it will always generate
1988              * an exception of some kind or another, apart from the
1989              * special case of an NS CPU executing an SG instruction
1990              * in S&NSC memory. So we always just fail the translation
1991              * here and sort things out in the exception handler
1992              * (including possibly emulating an SG instruction).
1993              */
1994             if (sattrs.ns != !secure) {
1995                 if (sattrs.nsc) {
1996                     fi->type = ARMFault_QEMU_NSCExec;
1997                 } else {
1998                     fi->type = ARMFault_QEMU_SFault;
1999                 }
2000                 result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
2001                 result->phys = address;
2002                 result->prot = 0;
2003                 return true;
2004             }
2005         } else {
2006             /*
2007              * For data accesses we always use the MMU bank indicated
2008              * by the current CPU state, but the security attributes
2009              * might downgrade a secure access to nonsecure.
2010              */
2011             if (sattrs.ns) {
2012                 result->attrs.secure = false;
2013             } else if (!secure) {
2014                 /*
2015                  * NS access to S memory must fault.
2016                  * Architecturally we should first check whether the
2017                  * MPU information for this address indicates that we
2018                  * are doing an unaligned access to Device memory, which
2019                  * should generate a UsageFault instead. QEMU does not
2020                  * currently check for that kind of unaligned access though.
2021                  * If we added it we would need to do so as a special case
2022                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2023                  */
2024                 fi->type = ARMFault_QEMU_SFault;
2025                 result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
2026                 result->phys = address;
2027                 result->prot = 0;
2028                 return true;
2029             }
2030         }
2031     }
2032 
2033     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2034                             result, fi, NULL);
2035     if (sattrs.subpage) {
2036         result->page_size = 1;
2037     }
2038     return ret;
2039 }
2040 
2041 /*
2042  * Translate from the 4-bit stage 2 representation of
2043  * memory attributes (without cache-allocation hints) to
2044  * the 8-bit representation of the stage 1 MAIR registers
2045  * (which includes allocation hints).
2046  *
2047  * ref: shared/translation/attrs/S2AttrDecode()
2048  *      .../S2ConvertAttrsHints()
2049  */
2050 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
2051 {
2052     uint8_t hiattr = extract32(s2attrs, 2, 2);
2053     uint8_t loattr = extract32(s2attrs, 0, 2);
2054     uint8_t hihint = 0, lohint = 0;
2055 
2056     if (hiattr != 0) { /* normal memory */
2057         if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
2058             hiattr = loattr = 1; /* non-cacheable */
2059         } else {
2060             if (hiattr != 1) { /* Write-through or write-back */
2061                 hihint = 3; /* RW allocate */
2062             }
2063             if (loattr != 1) { /* Write-through or write-back */
2064                 lohint = 3; /* RW allocate */
2065             }
2066         }
2067     }
2068 
2069     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2070 }
2071 
2072 /*
2073  * Combine either inner or outer cacheability attributes for normal
2074  * memory, according to table D4-42 and pseudocode procedure
2075  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2076  *
2077  * NB: only stage 1 includes allocation hints (RW bits), leading to
2078  * some asymmetry.
2079  */
2080 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2081 {
2082     if (s1 == 4 || s2 == 4) {
2083         /* non-cacheable has precedence */
2084         return 4;
2085     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2086         /* stage 1 write-through takes precedence */
2087         return s1;
2088     } else if (extract32(s2, 2, 2) == 2) {
2089         /* stage 2 write-through takes precedence, but the allocation hint
2090          * is still taken from stage 1
2091          */
2092         return (2 << 2) | extract32(s1, 0, 2);
2093     } else { /* write-back */
2094         return s1;
2095     }
2096 }
2097 
2098 /*
2099  * Combine the memory type and cacheability attributes of
2100  * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2101  * combined attributes in MAIR_EL1 format.
2102  */
2103 static uint8_t combined_attrs_nofwb(CPUARMState *env,
2104                                     ARMCacheAttrs s1, ARMCacheAttrs s2)
2105 {
2106     uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2107 
2108     s2_mair_attrs = convert_stage2_attrs(env, s2.attrs);
2109 
2110     s1lo = extract32(s1.attrs, 0, 4);
2111     s2lo = extract32(s2_mair_attrs, 0, 4);
2112     s1hi = extract32(s1.attrs, 4, 4);
2113     s2hi = extract32(s2_mair_attrs, 4, 4);
2114 
2115     /* Combine memory type and cacheability attributes */
2116     if (s1hi == 0 || s2hi == 0) {
2117         /* Device has precedence over normal */
2118         if (s1lo == 0 || s2lo == 0) {
2119             /* nGnRnE has precedence over anything */
2120             ret_attrs = 0;
2121         } else if (s1lo == 4 || s2lo == 4) {
2122             /* non-Reordering has precedence over Reordering */
2123             ret_attrs = 4;  /* nGnRE */
2124         } else if (s1lo == 8 || s2lo == 8) {
2125             /* non-Gathering has precedence over Gathering */
2126             ret_attrs = 8;  /* nGRE */
2127         } else {
2128             ret_attrs = 0xc; /* GRE */
2129         }
2130     } else { /* Normal memory */
2131         /* Outer/inner cacheability combine independently */
2132         ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2133                   | combine_cacheattr_nibble(s1lo, s2lo);
2134     }
2135     return ret_attrs;
2136 }
2137 
2138 static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2139 {
2140     /*
2141      * Given the 4 bits specifying the outer or inner cacheability
2142      * in MAIR format, return a value specifying Normal Write-Back,
2143      * with the allocation and transient hints taken from the input
2144      * if the input specified some kind of cacheable attribute.
2145      */
2146     if (attr == 0 || attr == 4) {
2147         /*
2148          * 0 == an UNPREDICTABLE encoding
2149          * 4 == Non-cacheable
2150          * Either way, force Write-Back RW allocate non-transient
2151          */
2152         return 0xf;
2153     }
2154     /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2155     return attr | 4;
2156 }
2157 
2158 /*
2159  * Combine the memory type and cacheability attributes of
2160  * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2161  * combined attributes in MAIR_EL1 format.
2162  */
2163 static uint8_t combined_attrs_fwb(CPUARMState *env,
2164                                   ARMCacheAttrs s1, ARMCacheAttrs s2)
2165 {
2166     switch (s2.attrs) {
2167     case 7:
2168         /* Use stage 1 attributes */
2169         return s1.attrs;
2170     case 6:
2171         /*
2172          * Force Normal Write-Back. Note that if S1 is Normal cacheable
2173          * then we take the allocation hints from it; otherwise it is
2174          * RW allocate, non-transient.
2175          */
2176         if ((s1.attrs & 0xf0) == 0) {
2177             /* S1 is Device */
2178             return 0xff;
2179         }
2180         /* Need to check the Inner and Outer nibbles separately */
2181         return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2182             force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2183     case 5:
2184         /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2185         if ((s1.attrs & 0xf0) == 0) {
2186             return s1.attrs;
2187         }
2188         return 0x44;
2189     case 0 ... 3:
2190         /* Force Device, of subtype specified by S2 */
2191         return s2.attrs << 2;
2192     default:
2193         /*
2194          * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2195          * arbitrarily force Device.
2196          */
2197         return 0;
2198     }
2199 }
2200 
2201 /*
2202  * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2203  * and CombineS1S2Desc()
2204  *
2205  * @env:     CPUARMState
2206  * @s1:      Attributes from stage 1 walk
2207  * @s2:      Attributes from stage 2 walk
2208  */
2209 static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
2210                                         ARMCacheAttrs s1, ARMCacheAttrs s2)
2211 {
2212     ARMCacheAttrs ret;
2213     bool tagged = false;
2214 
2215     assert(s2.is_s2_format && !s1.is_s2_format);
2216     ret.is_s2_format = false;
2217 
2218     if (s1.attrs == 0xf0) {
2219         tagged = true;
2220         s1.attrs = 0xff;
2221     }
2222 
2223     /* Combine shareability attributes (table D4-43) */
2224     if (s1.shareability == 2 || s2.shareability == 2) {
2225         /* if either are outer-shareable, the result is outer-shareable */
2226         ret.shareability = 2;
2227     } else if (s1.shareability == 3 || s2.shareability == 3) {
2228         /* if either are inner-shareable, the result is inner-shareable */
2229         ret.shareability = 3;
2230     } else {
2231         /* both non-shareable */
2232         ret.shareability = 0;
2233     }
2234 
2235     /* Combine memory type and cacheability attributes */
2236     if (arm_hcr_el2_eff(env) & HCR_FWB) {
2237         ret.attrs = combined_attrs_fwb(env, s1, s2);
2238     } else {
2239         ret.attrs = combined_attrs_nofwb(env, s1, s2);
2240     }
2241 
2242     /*
2243      * Any location for which the resultant memory type is any
2244      * type of Device memory is always treated as Outer Shareable.
2245      * Any location for which the resultant memory type is Normal
2246      * Inner Non-cacheable, Outer Non-cacheable is always treated
2247      * as Outer Shareable.
2248      * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2249      */
2250     if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2251         ret.shareability = 2;
2252     }
2253 
2254     /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2255     if (tagged && ret.attrs == 0xff) {
2256         ret.attrs = 0xf0;
2257     }
2258 
2259     return ret;
2260 }
2261 
2262 /**
2263  * get_phys_addr - get the physical address for this virtual address
2264  *
2265  * Find the physical address corresponding to the given virtual address,
2266  * by doing a translation table walk on MMU based systems or using the
2267  * MPU state on MPU based systems.
2268  *
2269  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
2270  * prot and page_size may not be filled in, and the populated fsr value provides
2271  * information on why the translation aborted, in the format of a
2272  * DFSR/IFSR fault register, with the following caveats:
2273  *  * we honour the short vs long DFSR format differences.
2274  *  * the WnR bit is never set (the caller must do this).
2275  *  * for PSMAv5 based systems we don't bother to return a full FSR format
2276  *    value.
2277  *
2278  * @env: CPUARMState
2279  * @address: virtual address to get physical address for
2280  * @access_type: 0 for read, 1 for write, 2 for execute
2281  * @mmu_idx: MMU index indicating required translation regime
2282  * @result: set on translation success.
2283  * @fi: set to fault info if the translation fails
2284  */
2285 bool get_phys_addr(CPUARMState *env, target_ulong address,
2286                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
2287                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
2288 {
2289     ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
2290     bool is_secure = regime_is_secure(env, mmu_idx);
2291 
2292     if (mmu_idx != s1_mmu_idx) {
2293         /*
2294          * Call ourselves recursively to do the stage 1 and then stage 2
2295          * translations if mmu_idx is a two-stage regime.
2296          */
2297         if (arm_feature(env, ARM_FEATURE_EL2)) {
2298             hwaddr ipa;
2299             int s1_prot;
2300             int ret;
2301             bool ipa_secure;
2302             ARMCacheAttrs cacheattrs1;
2303             ARMMMUIdx s2_mmu_idx;
2304             bool is_el0;
2305 
2306             ret = get_phys_addr(env, address, access_type, s1_mmu_idx,
2307                                 result, fi);
2308 
2309             /* If S1 fails or S2 is disabled, return early.  */
2310             if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
2311                 return ret;
2312             }
2313 
2314             ipa = result->phys;
2315             ipa_secure = result->attrs.secure;
2316             if (arm_is_secure_below_el3(env)) {
2317                 if (ipa_secure) {
2318                     result->attrs.secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
2319                 } else {
2320                     result->attrs.secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
2321                 }
2322             } else {
2323                 assert(!ipa_secure);
2324             }
2325 
2326             s2_mmu_idx = (result->attrs.secure
2327                           ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
2328             is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
2329 
2330             /*
2331              * S1 is done, now do S2 translation.
2332              * Save the stage1 results so that we may merge
2333              * prot and cacheattrs later.
2334              */
2335             s1_prot = result->prot;
2336             cacheattrs1 = result->cacheattrs;
2337             memset(result, 0, sizeof(*result));
2338 
2339             ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
2340                                      is_el0, result, fi);
2341             fi->s2addr = ipa;
2342 
2343             /* Combine the S1 and S2 perms.  */
2344             result->prot &= s1_prot;
2345 
2346             /* If S2 fails, return early.  */
2347             if (ret) {
2348                 return ret;
2349             }
2350 
2351             /* Combine the S1 and S2 cache attributes. */
2352             if (arm_hcr_el2_eff(env) & HCR_DC) {
2353                 /*
2354                  * HCR.DC forces the first stage attributes to
2355                  *  Normal Non-Shareable,
2356                  *  Inner Write-Back Read-Allocate Write-Allocate,
2357                  *  Outer Write-Back Read-Allocate Write-Allocate.
2358                  * Do not overwrite Tagged within attrs.
2359                  */
2360                 if (cacheattrs1.attrs != 0xf0) {
2361                     cacheattrs1.attrs = 0xff;
2362                 }
2363                 cacheattrs1.shareability = 0;
2364             }
2365             result->cacheattrs = combine_cacheattrs(env, cacheattrs1,
2366                                                     result->cacheattrs);
2367 
2368             /* Check if IPA translates to secure or non-secure PA space. */
2369             if (arm_is_secure_below_el3(env)) {
2370                 if (ipa_secure) {
2371                     result->attrs.secure =
2372                         !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
2373                 } else {
2374                     result->attrs.secure =
2375                         !((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
2376                         || (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
2377                 }
2378             }
2379             return 0;
2380         } else {
2381             /*
2382              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
2383              */
2384             mmu_idx = stage_1_mmu_idx(mmu_idx);
2385         }
2386     }
2387 
2388     /*
2389      * The page table entries may downgrade secure to non-secure, but
2390      * cannot upgrade an non-secure translation regime's attributes
2391      * to secure.
2392      */
2393     result->attrs.secure = is_secure;
2394     result->attrs.user = regime_is_user(env, mmu_idx);
2395 
2396     /*
2397      * Fast Context Switch Extension. This doesn't exist at all in v8.
2398      * In v7 and earlier it affects all stage 1 translations.
2399      */
2400     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2401         && !arm_feature(env, ARM_FEATURE_V8)) {
2402         if (regime_el(env, mmu_idx) == 3) {
2403             address += env->cp15.fcseidr_s;
2404         } else {
2405             address += env->cp15.fcseidr_ns;
2406         }
2407     }
2408 
2409     if (arm_feature(env, ARM_FEATURE_PMSA)) {
2410         bool ret;
2411         result->page_size = TARGET_PAGE_SIZE;
2412 
2413         if (arm_feature(env, ARM_FEATURE_V8)) {
2414             /* PMSAv8 */
2415             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2416                                        is_secure, result, fi);
2417         } else if (arm_feature(env, ARM_FEATURE_V7)) {
2418             /* PMSAv7 */
2419             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2420                                        is_secure, result, fi);
2421         } else {
2422             /* Pre-v7 MPU */
2423             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2424                                        is_secure, result, fi);
2425         }
2426         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2427                       " mmu_idx %u -> %s (prot %c%c%c)\n",
2428                       access_type == MMU_DATA_LOAD ? "reading" :
2429                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2430                       (uint32_t)address, mmu_idx,
2431                       ret ? "Miss" : "Hit",
2432                       result->prot & PAGE_READ ? 'r' : '-',
2433                       result->prot & PAGE_WRITE ? 'w' : '-',
2434                       result->prot & PAGE_EXEC ? 'x' : '-');
2435 
2436         return ret;
2437     }
2438 
2439     /* Definitely a real MMU, not an MPU */
2440 
2441     if (regime_translation_disabled(env, mmu_idx)) {
2442         uint64_t hcr;
2443         uint8_t memattr;
2444 
2445         /*
2446          * MMU disabled.  S1 addresses within aa64 translation regimes are
2447          * still checked for bounds -- see AArch64.TranslateAddressS1Off.
2448          */
2449         if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
2450             int r_el = regime_el(env, mmu_idx);
2451             if (arm_el_is_aa64(env, r_el)) {
2452                 int pamax = arm_pamax(env_archcpu(env));
2453                 uint64_t tcr = env->cp15.tcr_el[r_el];
2454                 int addrtop, tbi;
2455 
2456                 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2457                 if (access_type == MMU_INST_FETCH) {
2458                     tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2459                 }
2460                 tbi = (tbi >> extract64(address, 55, 1)) & 1;
2461                 addrtop = (tbi ? 55 : 63);
2462 
2463                 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2464                     fi->type = ARMFault_AddressSize;
2465                     fi->level = 0;
2466                     fi->stage2 = false;
2467                     return 1;
2468                 }
2469 
2470                 /*
2471                  * When TBI is disabled, we've just validated that all of the
2472                  * bits above PAMax are zero, so logically we only need to
2473                  * clear the top byte for TBI.  But it's clearer to follow
2474                  * the pseudocode set of addrdesc.paddress.
2475                  */
2476                 address = extract64(address, 0, 52);
2477             }
2478         }
2479         result->phys = address;
2480         result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2481         result->page_size = TARGET_PAGE_SIZE;
2482 
2483         /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2484         hcr = arm_hcr_el2_eff(env);
2485         result->cacheattrs.shareability = 0;
2486         result->cacheattrs.is_s2_format = false;
2487         if (hcr & HCR_DC) {
2488             if (hcr & HCR_DCT) {
2489                 memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
2490             } else {
2491                 memattr = 0xff;  /* Normal, WB, RWA */
2492             }
2493         } else if (access_type == MMU_INST_FETCH) {
2494             if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2495                 memattr = 0xee;  /* Normal, WT, RA, NT */
2496             } else {
2497                 memattr = 0x44;  /* Normal, NC, No */
2498             }
2499             result->cacheattrs.shareability = 2; /* outer sharable */
2500         } else {
2501             memattr = 0x00;      /* Device, nGnRnE */
2502         }
2503         result->cacheattrs.attrs = memattr;
2504         return 0;
2505     }
2506 
2507     if (regime_using_lpae_format(env, mmu_idx)) {
2508         return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
2509                                   result, fi);
2510     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
2511         return get_phys_addr_v6(env, address, access_type, mmu_idx,
2512                                 is_secure, result, fi);
2513     } else {
2514         return get_phys_addr_v5(env, address, access_type, mmu_idx,
2515                                 is_secure, result, fi);
2516     }
2517 }
2518 
2519 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2520                                          MemTxAttrs *attrs)
2521 {
2522     ARMCPU *cpu = ARM_CPU(cs);
2523     CPUARMState *env = &cpu->env;
2524     GetPhysAddrResult res = {};
2525     ARMMMUFaultInfo fi = {};
2526     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
2527     bool ret;
2528 
2529     ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi);
2530     *attrs = res.attrs;
2531 
2532     if (ret) {
2533         return -1;
2534     }
2535     return res.phys;
2536 }
2537