xref: /openbmc/qemu/target/arm/ptw.c (revision a6caeee8)
1 /*
2  * ARM page table walking.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "qemu/range.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "idau.h"
15 
16 
17 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
18                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
19                                bool s1_is_el0, hwaddr *phys_ptr,
20                                MemTxAttrs *txattrs, int *prot,
21                                target_ulong *page_size_ptr,
22                                ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
23     __attribute__((nonnull));
24 
25 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
26 static const uint8_t pamax_map[] = {
27     [0] = 32,
28     [1] = 36,
29     [2] = 40,
30     [3] = 42,
31     [4] = 44,
32     [5] = 48,
33     [6] = 52,
34 };
35 
36 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
37 unsigned int arm_pamax(ARMCPU *cpu)
38 {
39     unsigned int parange =
40         FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
41 
42     /*
43      * id_aa64mmfr0 is a read-only register so values outside of the
44      * supported mappings can be considered an implementation error.
45      */
46     assert(parange < ARRAY_SIZE(pamax_map));
47     return pamax_map[parange];
48 }
49 
50 /*
51  * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
52  */
53 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
54 {
55     switch (mmu_idx) {
56     case ARMMMUIdx_SE10_0:
57         return ARMMMUIdx_Stage1_SE0;
58     case ARMMMUIdx_SE10_1:
59         return ARMMMUIdx_Stage1_SE1;
60     case ARMMMUIdx_SE10_1_PAN:
61         return ARMMMUIdx_Stage1_SE1_PAN;
62     case ARMMMUIdx_E10_0:
63         return ARMMMUIdx_Stage1_E0;
64     case ARMMMUIdx_E10_1:
65         return ARMMMUIdx_Stage1_E1;
66     case ARMMMUIdx_E10_1_PAN:
67         return ARMMMUIdx_Stage1_E1_PAN;
68     default:
69         return mmu_idx;
70     }
71 }
72 
73 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
74 {
75     return stage_1_mmu_idx(arm_mmu_idx(env));
76 }
77 
78 static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
79 {
80     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
81 }
82 
83 static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
84 {
85     switch (mmu_idx) {
86     case ARMMMUIdx_SE10_0:
87     case ARMMMUIdx_E20_0:
88     case ARMMMUIdx_SE20_0:
89     case ARMMMUIdx_Stage1_E0:
90     case ARMMMUIdx_Stage1_SE0:
91     case ARMMMUIdx_MUser:
92     case ARMMMUIdx_MSUser:
93     case ARMMMUIdx_MUserNegPri:
94     case ARMMMUIdx_MSUserNegPri:
95         return true;
96     default:
97         return false;
98     case ARMMMUIdx_E10_0:
99     case ARMMMUIdx_E10_1:
100     case ARMMMUIdx_E10_1_PAN:
101         g_assert_not_reached();
102     }
103 }
104 
105 /* Return the TTBR associated with this translation regime */
106 static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
107 {
108     if (mmu_idx == ARMMMUIdx_Stage2) {
109         return env->cp15.vttbr_el2;
110     }
111     if (mmu_idx == ARMMMUIdx_Stage2_S) {
112         return env->cp15.vsttbr_el2;
113     }
114     if (ttbrn == 0) {
115         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
116     } else {
117         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
118     }
119 }
120 
121 /* Return true if the specified stage of address translation is disabled */
122 static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx)
123 {
124     uint64_t hcr_el2;
125 
126     if (arm_feature(env, ARM_FEATURE_M)) {
127         switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
128                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
129         case R_V7M_MPU_CTRL_ENABLE_MASK:
130             /* Enabled, but not for HardFault and NMI */
131             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
132         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
133             /* Enabled for all cases */
134             return false;
135         case 0:
136         default:
137             /*
138              * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
139              * we warned about that in armv7m_nvic.c when the guest set it.
140              */
141             return true;
142         }
143     }
144 
145     hcr_el2 = arm_hcr_el2_eff(env);
146 
147     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
148         /* HCR.DC means HCR.VM behaves as 1 */
149         return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
150     }
151 
152     if (hcr_el2 & HCR_TGE) {
153         /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
154         if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
155             return true;
156         }
157     }
158 
159     if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
160         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
161         return true;
162     }
163 
164     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
165 }
166 
167 static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
168 {
169     /*
170      * For an S1 page table walk, the stage 1 attributes are always
171      * some form of "this is Normal memory". The combined S1+S2
172      * attributes are therefore only Device if stage 2 specifies Device.
173      * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
174      * ie when cacheattrs.attrs bits [3:2] are 0b00.
175      * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
176      * when cacheattrs.attrs bit [2] is 0.
177      */
178     assert(cacheattrs.is_s2_format);
179     if (arm_hcr_el2_eff(env) & HCR_FWB) {
180         return (cacheattrs.attrs & 0x4) == 0;
181     } else {
182         return (cacheattrs.attrs & 0xc) == 0;
183     }
184 }
185 
186 /* Translate a S1 pagetable walk through S2 if needed.  */
187 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
188                                hwaddr addr, bool *is_secure,
189                                ARMMMUFaultInfo *fi)
190 {
191     if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
192         !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
193         target_ulong s2size;
194         hwaddr s2pa;
195         int s2prot;
196         int ret;
197         ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
198                                           : ARMMMUIdx_Stage2;
199         ARMCacheAttrs cacheattrs = {};
200         MemTxAttrs txattrs = {};
201 
202         ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
203                                  &s2pa, &txattrs, &s2prot, &s2size, fi,
204                                  &cacheattrs);
205         if (ret) {
206             assert(fi->type != ARMFault_None);
207             fi->s2addr = addr;
208             fi->stage2 = true;
209             fi->s1ptw = true;
210             fi->s1ns = !*is_secure;
211             return ~0;
212         }
213         if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
214             ptw_attrs_are_device(env, cacheattrs)) {
215             /*
216              * PTW set and S1 walk touched S2 Device memory:
217              * generate Permission fault.
218              */
219             fi->type = ARMFault_Permission;
220             fi->s2addr = addr;
221             fi->stage2 = true;
222             fi->s1ptw = true;
223             fi->s1ns = !*is_secure;
224             return ~0;
225         }
226 
227         if (arm_is_secure_below_el3(env)) {
228             /* Check if page table walk is to secure or non-secure PA space. */
229             if (*is_secure) {
230                 *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
231             } else {
232                 *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
233             }
234         } else {
235             assert(!*is_secure);
236         }
237 
238         addr = s2pa;
239     }
240     return addr;
241 }
242 
243 /* All loads done in the course of a page table walk go through here. */
244 static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
245                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
246 {
247     CPUState *cs = env_cpu(env);
248     MemTxAttrs attrs = {};
249     MemTxResult result = MEMTX_OK;
250     AddressSpace *as;
251     uint32_t data;
252 
253     addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
254     attrs.secure = is_secure;
255     as = arm_addressspace(cs, attrs);
256     if (fi->s1ptw) {
257         return 0;
258     }
259     if (regime_translation_big_endian(env, mmu_idx)) {
260         data = address_space_ldl_be(as, addr, attrs, &result);
261     } else {
262         data = address_space_ldl_le(as, addr, attrs, &result);
263     }
264     if (result == MEMTX_OK) {
265         return data;
266     }
267     fi->type = ARMFault_SyncExternalOnWalk;
268     fi->ea = arm_extabort_type(result);
269     return 0;
270 }
271 
272 static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
273                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
274 {
275     CPUState *cs = env_cpu(env);
276     MemTxAttrs attrs = {};
277     MemTxResult result = MEMTX_OK;
278     AddressSpace *as;
279     uint64_t data;
280 
281     addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
282     attrs.secure = is_secure;
283     as = arm_addressspace(cs, attrs);
284     if (fi->s1ptw) {
285         return 0;
286     }
287     if (regime_translation_big_endian(env, mmu_idx)) {
288         data = address_space_ldq_be(as, addr, attrs, &result);
289     } else {
290         data = address_space_ldq_le(as, addr, attrs, &result);
291     }
292     if (result == MEMTX_OK) {
293         return data;
294     }
295     fi->type = ARMFault_SyncExternalOnWalk;
296     fi->ea = arm_extabort_type(result);
297     return 0;
298 }
299 
300 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
301                                      uint32_t *table, uint32_t address)
302 {
303     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
304     TCR *tcr = regime_tcr(env, mmu_idx);
305 
306     if (address & tcr->mask) {
307         if (tcr->raw_tcr & TTBCR_PD1) {
308             /* Translation table walk disabled for TTBR1 */
309             return false;
310         }
311         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
312     } else {
313         if (tcr->raw_tcr & TTBCR_PD0) {
314             /* Translation table walk disabled for TTBR0 */
315             return false;
316         }
317         *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
318     }
319     *table |= (address >> 18) & 0x3ffc;
320     return true;
321 }
322 
323 /*
324  * Translate section/page access permissions to page R/W protection flags
325  * @env:         CPUARMState
326  * @mmu_idx:     MMU index indicating required translation regime
327  * @ap:          The 3-bit access permissions (AP[2:0])
328  * @domain_prot: The 2-bit domain access permissions
329  */
330 static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
331                          int ap, int domain_prot)
332 {
333     bool is_user = regime_is_user(env, mmu_idx);
334 
335     if (domain_prot == 3) {
336         return PAGE_READ | PAGE_WRITE;
337     }
338 
339     switch (ap) {
340     case 0:
341         if (arm_feature(env, ARM_FEATURE_V7)) {
342             return 0;
343         }
344         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
345         case SCTLR_S:
346             return is_user ? 0 : PAGE_READ;
347         case SCTLR_R:
348             return PAGE_READ;
349         default:
350             return 0;
351         }
352     case 1:
353         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
354     case 2:
355         if (is_user) {
356             return PAGE_READ;
357         } else {
358             return PAGE_READ | PAGE_WRITE;
359         }
360     case 3:
361         return PAGE_READ | PAGE_WRITE;
362     case 4: /* Reserved.  */
363         return 0;
364     case 5:
365         return is_user ? 0 : PAGE_READ;
366     case 6:
367         return PAGE_READ;
368     case 7:
369         if (!arm_feature(env, ARM_FEATURE_V6K)) {
370             return 0;
371         }
372         return PAGE_READ;
373     default:
374         g_assert_not_reached();
375     }
376 }
377 
378 /*
379  * Translate section/page access permissions to page R/W protection flags.
380  * @ap:      The 2-bit simple AP (AP[2:1])
381  * @is_user: TRUE if accessing from PL0
382  */
383 static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
384 {
385     switch (ap) {
386     case 0:
387         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
388     case 1:
389         return PAGE_READ | PAGE_WRITE;
390     case 2:
391         return is_user ? 0 : PAGE_READ;
392     case 3:
393         return PAGE_READ;
394     default:
395         g_assert_not_reached();
396     }
397 }
398 
399 static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
400 {
401     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
402 }
403 
404 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
405                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
406                              hwaddr *phys_ptr, int *prot,
407                              target_ulong *page_size,
408                              ARMMMUFaultInfo *fi)
409 {
410     int level = 1;
411     uint32_t table;
412     uint32_t desc;
413     int type;
414     int ap;
415     int domain = 0;
416     int domain_prot;
417     hwaddr phys_addr;
418     uint32_t dacr;
419 
420     /* Pagetable walk.  */
421     /* Lookup l1 descriptor.  */
422     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
423         /* Section translation fault if page walk is disabled by PD0 or PD1 */
424         fi->type = ARMFault_Translation;
425         goto do_fault;
426     }
427     desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
428                        mmu_idx, fi);
429     if (fi->type != ARMFault_None) {
430         goto do_fault;
431     }
432     type = (desc & 3);
433     domain = (desc >> 5) & 0x0f;
434     if (regime_el(env, mmu_idx) == 1) {
435         dacr = env->cp15.dacr_ns;
436     } else {
437         dacr = env->cp15.dacr_s;
438     }
439     domain_prot = (dacr >> (domain * 2)) & 3;
440     if (type == 0) {
441         /* Section translation fault.  */
442         fi->type = ARMFault_Translation;
443         goto do_fault;
444     }
445     if (type != 2) {
446         level = 2;
447     }
448     if (domain_prot == 0 || domain_prot == 2) {
449         fi->type = ARMFault_Domain;
450         goto do_fault;
451     }
452     if (type == 2) {
453         /* 1Mb section.  */
454         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
455         ap = (desc >> 10) & 3;
456         *page_size = 1024 * 1024;
457     } else {
458         /* Lookup l2 entry.  */
459         if (type == 1) {
460             /* Coarse pagetable.  */
461             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
462         } else {
463             /* Fine pagetable.  */
464             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
465         }
466         desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
467                            mmu_idx, fi);
468         if (fi->type != ARMFault_None) {
469             goto do_fault;
470         }
471         switch (desc & 3) {
472         case 0: /* Page translation fault.  */
473             fi->type = ARMFault_Translation;
474             goto do_fault;
475         case 1: /* 64k page.  */
476             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
477             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
478             *page_size = 0x10000;
479             break;
480         case 2: /* 4k page.  */
481             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
482             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
483             *page_size = 0x1000;
484             break;
485         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
486             if (type == 1) {
487                 /* ARMv6/XScale extended small page format */
488                 if (arm_feature(env, ARM_FEATURE_XSCALE)
489                     || arm_feature(env, ARM_FEATURE_V6)) {
490                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
491                     *page_size = 0x1000;
492                 } else {
493                     /*
494                      * UNPREDICTABLE in ARMv5; we choose to take a
495                      * page translation fault.
496                      */
497                     fi->type = ARMFault_Translation;
498                     goto do_fault;
499                 }
500             } else {
501                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
502                 *page_size = 0x400;
503             }
504             ap = (desc >> 4) & 3;
505             break;
506         default:
507             /* Never happens, but compiler isn't smart enough to tell.  */
508             g_assert_not_reached();
509         }
510     }
511     *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
512     *prot |= *prot ? PAGE_EXEC : 0;
513     if (!(*prot & (1 << access_type))) {
514         /* Access permission fault.  */
515         fi->type = ARMFault_Permission;
516         goto do_fault;
517     }
518     *phys_ptr = phys_addr;
519     return false;
520 do_fault:
521     fi->domain = domain;
522     fi->level = level;
523     return true;
524 }
525 
526 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
527                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
528                              hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
529                              target_ulong *page_size, ARMMMUFaultInfo *fi)
530 {
531     ARMCPU *cpu = env_archcpu(env);
532     int level = 1;
533     uint32_t table;
534     uint32_t desc;
535     uint32_t xn;
536     uint32_t pxn = 0;
537     int type;
538     int ap;
539     int domain = 0;
540     int domain_prot;
541     hwaddr phys_addr;
542     uint32_t dacr;
543     bool ns;
544 
545     /* Pagetable walk.  */
546     /* Lookup l1 descriptor.  */
547     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
548         /* Section translation fault if page walk is disabled by PD0 or PD1 */
549         fi->type = ARMFault_Translation;
550         goto do_fault;
551     }
552     desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
553                        mmu_idx, fi);
554     if (fi->type != ARMFault_None) {
555         goto do_fault;
556     }
557     type = (desc & 3);
558     if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
559         /* Section translation fault, or attempt to use the encoding
560          * which is Reserved on implementations without PXN.
561          */
562         fi->type = ARMFault_Translation;
563         goto do_fault;
564     }
565     if ((type == 1) || !(desc & (1 << 18))) {
566         /* Page or Section.  */
567         domain = (desc >> 5) & 0x0f;
568     }
569     if (regime_el(env, mmu_idx) == 1) {
570         dacr = env->cp15.dacr_ns;
571     } else {
572         dacr = env->cp15.dacr_s;
573     }
574     if (type == 1) {
575         level = 2;
576     }
577     domain_prot = (dacr >> (domain * 2)) & 3;
578     if (domain_prot == 0 || domain_prot == 2) {
579         /* Section or Page domain fault */
580         fi->type = ARMFault_Domain;
581         goto do_fault;
582     }
583     if (type != 1) {
584         if (desc & (1 << 18)) {
585             /* Supersection.  */
586             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
587             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
588             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
589             *page_size = 0x1000000;
590         } else {
591             /* Section.  */
592             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
593             *page_size = 0x100000;
594         }
595         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
596         xn = desc & (1 << 4);
597         pxn = desc & 1;
598         ns = extract32(desc, 19, 1);
599     } else {
600         if (cpu_isar_feature(aa32_pxn, cpu)) {
601             pxn = (desc >> 2) & 1;
602         }
603         ns = extract32(desc, 3, 1);
604         /* Lookup l2 entry.  */
605         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
606         desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
607                            mmu_idx, fi);
608         if (fi->type != ARMFault_None) {
609             goto do_fault;
610         }
611         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
612         switch (desc & 3) {
613         case 0: /* Page translation fault.  */
614             fi->type = ARMFault_Translation;
615             goto do_fault;
616         case 1: /* 64k page.  */
617             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
618             xn = desc & (1 << 15);
619             *page_size = 0x10000;
620             break;
621         case 2: case 3: /* 4k page.  */
622             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
623             xn = desc & 1;
624             *page_size = 0x1000;
625             break;
626         default:
627             /* Never happens, but compiler isn't smart enough to tell.  */
628             g_assert_not_reached();
629         }
630     }
631     if (domain_prot == 3) {
632         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
633     } else {
634         if (pxn && !regime_is_user(env, mmu_idx)) {
635             xn = 1;
636         }
637         if (xn && access_type == MMU_INST_FETCH) {
638             fi->type = ARMFault_Permission;
639             goto do_fault;
640         }
641 
642         if (arm_feature(env, ARM_FEATURE_V6K) &&
643                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
644             /* The simplified model uses AP[0] as an access control bit.  */
645             if ((ap & 1) == 0) {
646                 /* Access flag fault.  */
647                 fi->type = ARMFault_AccessFlag;
648                 goto do_fault;
649             }
650             *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
651         } else {
652             *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
653         }
654         if (*prot && !xn) {
655             *prot |= PAGE_EXEC;
656         }
657         if (!(*prot & (1 << access_type))) {
658             /* Access permission fault.  */
659             fi->type = ARMFault_Permission;
660             goto do_fault;
661         }
662     }
663     if (ns) {
664         /* The NS bit will (as required by the architecture) have no effect if
665          * the CPU doesn't support TZ or this is a non-secure translation
666          * regime, because the attribute will already be non-secure.
667          */
668         attrs->secure = false;
669     }
670     *phys_ptr = phys_addr;
671     return false;
672 do_fault:
673     fi->domain = domain;
674     fi->level = level;
675     return true;
676 }
677 
678 /*
679  * Translate S2 section/page access permissions to protection flags
680  * @env:     CPUARMState
681  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
682  * @xn:      XN (execute-never) bits
683  * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
684  */
685 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
686 {
687     int prot = 0;
688 
689     if (s2ap & 1) {
690         prot |= PAGE_READ;
691     }
692     if (s2ap & 2) {
693         prot |= PAGE_WRITE;
694     }
695 
696     if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
697         switch (xn) {
698         case 0:
699             prot |= PAGE_EXEC;
700             break;
701         case 1:
702             if (s1_is_el0) {
703                 prot |= PAGE_EXEC;
704             }
705             break;
706         case 2:
707             break;
708         case 3:
709             if (!s1_is_el0) {
710                 prot |= PAGE_EXEC;
711             }
712             break;
713         default:
714             g_assert_not_reached();
715         }
716     } else {
717         if (!extract32(xn, 1, 1)) {
718             if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
719                 prot |= PAGE_EXEC;
720             }
721         }
722     }
723     return prot;
724 }
725 
726 /*
727  * Translate section/page access permissions to protection flags
728  * @env:     CPUARMState
729  * @mmu_idx: MMU index indicating required translation regime
730  * @is_aa64: TRUE if AArch64
731  * @ap:      The 2-bit simple AP (AP[2:1])
732  * @ns:      NS (non-secure) bit
733  * @xn:      XN (execute-never) bit
734  * @pxn:     PXN (privileged execute-never) bit
735  */
736 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
737                       int ap, int ns, int xn, int pxn)
738 {
739     bool is_user = regime_is_user(env, mmu_idx);
740     int prot_rw, user_rw;
741     bool have_wxn;
742     int wxn = 0;
743 
744     assert(mmu_idx != ARMMMUIdx_Stage2);
745     assert(mmu_idx != ARMMMUIdx_Stage2_S);
746 
747     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
748     if (is_user) {
749         prot_rw = user_rw;
750     } else {
751         if (user_rw && regime_is_pan(env, mmu_idx)) {
752             /* PAN forbids data accesses but doesn't affect insn fetch */
753             prot_rw = 0;
754         } else {
755             prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
756         }
757     }
758 
759     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
760         return prot_rw;
761     }
762 
763     /* TODO have_wxn should be replaced with
764      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
765      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
766      * compatible processors have EL2, which is required for [U]WXN.
767      */
768     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
769 
770     if (have_wxn) {
771         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
772     }
773 
774     if (is_aa64) {
775         if (regime_has_2_ranges(mmu_idx) && !is_user) {
776             xn = pxn || (user_rw & PAGE_WRITE);
777         }
778     } else if (arm_feature(env, ARM_FEATURE_V7)) {
779         switch (regime_el(env, mmu_idx)) {
780         case 1:
781         case 3:
782             if (is_user) {
783                 xn = xn || !(user_rw & PAGE_READ);
784             } else {
785                 int uwxn = 0;
786                 if (have_wxn) {
787                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
788                 }
789                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
790                      (uwxn && (user_rw & PAGE_WRITE));
791             }
792             break;
793         case 2:
794             break;
795         }
796     } else {
797         xn = wxn = 0;
798     }
799 
800     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
801         return prot_rw;
802     }
803     return prot_rw | PAGE_EXEC;
804 }
805 
806 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
807                                           ARMMMUIdx mmu_idx)
808 {
809     uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
810     uint32_t el = regime_el(env, mmu_idx);
811     int select, tsz;
812     bool epd, hpd;
813 
814     assert(mmu_idx != ARMMMUIdx_Stage2_S);
815 
816     if (mmu_idx == ARMMMUIdx_Stage2) {
817         /* VTCR */
818         bool sext = extract32(tcr, 4, 1);
819         bool sign = extract32(tcr, 3, 1);
820 
821         /*
822          * If the sign-extend bit is not the same as t0sz[3], the result
823          * is unpredictable. Flag this as a guest error.
824          */
825         if (sign != sext) {
826             qemu_log_mask(LOG_GUEST_ERROR,
827                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
828         }
829         tsz = sextract32(tcr, 0, 4) + 8;
830         select = 0;
831         hpd = false;
832         epd = false;
833     } else if (el == 2) {
834         /* HTCR */
835         tsz = extract32(tcr, 0, 3);
836         select = 0;
837         hpd = extract64(tcr, 24, 1);
838         epd = false;
839     } else {
840         int t0sz = extract32(tcr, 0, 3);
841         int t1sz = extract32(tcr, 16, 3);
842 
843         if (t1sz == 0) {
844             select = va > (0xffffffffu >> t0sz);
845         } else {
846             /* Note that we will detect errors later.  */
847             select = va >= ~(0xffffffffu >> t1sz);
848         }
849         if (!select) {
850             tsz = t0sz;
851             epd = extract32(tcr, 7, 1);
852             hpd = extract64(tcr, 41, 1);
853         } else {
854             tsz = t1sz;
855             epd = extract32(tcr, 23, 1);
856             hpd = extract64(tcr, 42, 1);
857         }
858         /* For aarch32, hpd0 is not enabled without t2e as well.  */
859         hpd &= extract32(tcr, 6, 1);
860     }
861 
862     return (ARMVAParameters) {
863         .tsz = tsz,
864         .select = select,
865         .epd = epd,
866         .hpd = hpd,
867     };
868 }
869 
870 /*
871  * check_s2_mmu_setup
872  * @cpu:        ARMCPU
873  * @is_aa64:    True if the translation regime is in AArch64 state
874  * @startlevel: Suggested starting level
875  * @inputsize:  Bitsize of IPAs
876  * @stride:     Page-table stride (See the ARM ARM)
877  *
878  * Returns true if the suggested S2 translation parameters are OK and
879  * false otherwise.
880  */
881 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
882                                int inputsize, int stride, int outputsize)
883 {
884     const int grainsize = stride + 3;
885     int startsizecheck;
886 
887     /*
888      * Negative levels are usually not allowed...
889      * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
890      * begins with level -1.  Note that previous feature tests will have
891      * eliminated this combination if it is not enabled.
892      */
893     if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
894         return false;
895     }
896 
897     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
898     if (startsizecheck < 1 || startsizecheck > stride + 4) {
899         return false;
900     }
901 
902     if (is_aa64) {
903         switch (stride) {
904         case 13: /* 64KB Pages.  */
905             if (level == 0 || (level == 1 && outputsize <= 42)) {
906                 return false;
907             }
908             break;
909         case 11: /* 16KB Pages.  */
910             if (level == 0 || (level == 1 && outputsize <= 40)) {
911                 return false;
912             }
913             break;
914         case 9: /* 4KB Pages.  */
915             if (level == 0 && outputsize <= 42) {
916                 return false;
917             }
918             break;
919         default:
920             g_assert_not_reached();
921         }
922 
923         /* Inputsize checks.  */
924         if (inputsize > outputsize &&
925             (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
926             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
927             return false;
928         }
929     } else {
930         /* AArch32 only supports 4KB pages. Assert on that.  */
931         assert(stride == 9);
932 
933         if (level == 0) {
934             return false;
935         }
936     }
937     return true;
938 }
939 
940 /**
941  * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
942  *
943  * Returns false if the translation was successful. Otherwise, phys_ptr,
944  * attrs, prot and page_size may not be filled in, and the populated fsr
945  * value provides information on why the translation aborted, in the format
946  * of a long-format DFSR/IFSR fault register, with the following caveat:
947  * the WnR bit is never set (the caller must do this).
948  *
949  * @env: CPUARMState
950  * @address: virtual address to get physical address for
951  * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
952  * @mmu_idx: MMU index indicating required translation regime
953  * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page
954  *             table walk), must be true if this is stage 2 of a stage 1+2
955  *             walk for an EL0 access. If @mmu_idx is anything else,
956  *             @s1_is_el0 is ignored.
957  * @phys_ptr: set to the physical address corresponding to the virtual address
958  * @attrs: set to the memory transaction attributes to use
959  * @prot: set to the permissions for the page containing phys_ptr
960  * @page_size_ptr: set to the size of the page containing phys_ptr
961  * @fi: set to fault info if the translation fails
962  * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
963  */
964 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
965                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
966                                bool s1_is_el0, hwaddr *phys_ptr,
967                                MemTxAttrs *txattrs, int *prot,
968                                target_ulong *page_size_ptr,
969                                ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
970 {
971     ARMCPU *cpu = env_archcpu(env);
972     /* Read an LPAE long-descriptor translation table. */
973     ARMFaultType fault_type = ARMFault_Translation;
974     uint32_t level;
975     ARMVAParameters param;
976     uint64_t ttbr;
977     hwaddr descaddr, indexmask, indexmask_grainsize;
978     uint32_t tableattrs;
979     target_ulong page_size;
980     uint32_t attrs;
981     int32_t stride;
982     int addrsize, inputsize, outputsize;
983     TCR *tcr = regime_tcr(env, mmu_idx);
984     int ap, ns, xn, pxn;
985     uint32_t el = regime_el(env, mmu_idx);
986     uint64_t descaddrmask;
987     bool aarch64 = arm_el_is_aa64(env, el);
988     bool guarded = false;
989 
990     /* TODO: This code does not support shareability levels. */
991     if (aarch64) {
992         int ps;
993 
994         param = aa64_va_parameters(env, address, mmu_idx,
995                                    access_type != MMU_INST_FETCH);
996         level = 0;
997 
998         /*
999          * If TxSZ is programmed to a value larger than the maximum,
1000          * or smaller than the effective minimum, it is IMPLEMENTATION
1001          * DEFINED whether we behave as if the field were programmed
1002          * within bounds, or if a level 0 Translation fault is generated.
1003          *
1004          * With FEAT_LVA, fault on less than minimum becomes required,
1005          * so our choice is to always raise the fault.
1006          */
1007         if (param.tsz_oob) {
1008             fault_type = ARMFault_Translation;
1009             goto do_fault;
1010         }
1011 
1012         addrsize = 64 - 8 * param.tbi;
1013         inputsize = 64 - param.tsz;
1014 
1015         /*
1016          * Bound PS by PARANGE to find the effective output address size.
1017          * ID_AA64MMFR0 is a read-only register so values outside of the
1018          * supported mappings can be considered an implementation error.
1019          */
1020         ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1021         ps = MIN(ps, param.ps);
1022         assert(ps < ARRAY_SIZE(pamax_map));
1023         outputsize = pamax_map[ps];
1024     } else {
1025         param = aa32_va_parameters(env, address, mmu_idx);
1026         level = 1;
1027         addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
1028         inputsize = addrsize - param.tsz;
1029         outputsize = 40;
1030     }
1031 
1032     /*
1033      * We determined the region when collecting the parameters, but we
1034      * have not yet validated that the address is valid for the region.
1035      * Extract the top bits and verify that they all match select.
1036      *
1037      * For aa32, if inputsize == addrsize, then we have selected the
1038      * region by exclusion in aa32_va_parameters and there is no more
1039      * validation to do here.
1040      */
1041     if (inputsize < addrsize) {
1042         target_ulong top_bits = sextract64(address, inputsize,
1043                                            addrsize - inputsize);
1044         if (-top_bits != param.select) {
1045             /* The gap between the two regions is a Translation fault */
1046             fault_type = ARMFault_Translation;
1047             goto do_fault;
1048         }
1049     }
1050 
1051     if (param.using64k) {
1052         stride = 13;
1053     } else if (param.using16k) {
1054         stride = 11;
1055     } else {
1056         stride = 9;
1057     }
1058 
1059     /*
1060      * Note that QEMU ignores shareability and cacheability attributes,
1061      * so we don't need to do anything with the SH, ORGN, IRGN fields
1062      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
1063      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
1064      * implement any ASID-like capability so we can ignore it (instead
1065      * we will always flush the TLB any time the ASID is changed).
1066      */
1067     ttbr = regime_ttbr(env, mmu_idx, param.select);
1068 
1069     /*
1070      * Here we should have set up all the parameters for the translation:
1071      * inputsize, ttbr, epd, stride, tbi
1072      */
1073 
1074     if (param.epd) {
1075         /*
1076          * Translation table walk disabled => Translation fault on TLB miss
1077          * Note: This is always 0 on 64-bit EL2 and EL3.
1078          */
1079         goto do_fault;
1080     }
1081 
1082     if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
1083         /*
1084          * The starting level depends on the virtual address size (which can
1085          * be up to 48 bits) and the translation granule size. It indicates
1086          * the number of strides (stride bits at a time) needed to
1087          * consume the bits of the input address. In the pseudocode this is:
1088          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
1089          * where their 'inputsize' is our 'inputsize', 'grainsize' is
1090          * our 'stride + 3' and 'stride' is our 'stride'.
1091          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
1092          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
1093          * = 4 - (inputsize - 4) / stride;
1094          */
1095         level = 4 - (inputsize - 4) / stride;
1096     } else {
1097         /*
1098          * For stage 2 translations the starting level is specified by the
1099          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
1100          */
1101         uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
1102         uint32_t sl2 = extract64(tcr->raw_tcr, 33, 1);
1103         uint32_t startlevel;
1104         bool ok;
1105 
1106         /* SL2 is RES0 unless DS=1 & 4kb granule. */
1107         if (param.ds && stride == 9 && sl2) {
1108             if (sl0 != 0) {
1109                 level = 0;
1110                 fault_type = ARMFault_Translation;
1111                 goto do_fault;
1112             }
1113             startlevel = -1;
1114         } else if (!aarch64 || stride == 9) {
1115             /* AArch32 or 4KB pages */
1116             startlevel = 2 - sl0;
1117 
1118             if (cpu_isar_feature(aa64_st, cpu)) {
1119                 startlevel &= 3;
1120             }
1121         } else {
1122             /* 16KB or 64KB pages */
1123             startlevel = 3 - sl0;
1124         }
1125 
1126         /* Check that the starting level is valid. */
1127         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
1128                                 inputsize, stride, outputsize);
1129         if (!ok) {
1130             fault_type = ARMFault_Translation;
1131             goto do_fault;
1132         }
1133         level = startlevel;
1134     }
1135 
1136     indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
1137     indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
1138 
1139     /* Now we can extract the actual base address from the TTBR */
1140     descaddr = extract64(ttbr, 0, 48);
1141 
1142     /*
1143      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
1144      *
1145      * Otherwise, if the base address is out of range, raise AddressSizeFault.
1146      * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
1147      * but we've just cleared the bits above 47, so simplify the test.
1148      */
1149     if (outputsize > 48) {
1150         descaddr |= extract64(ttbr, 2, 4) << 48;
1151     } else if (descaddr >> outputsize) {
1152         level = 0;
1153         fault_type = ARMFault_AddressSize;
1154         goto do_fault;
1155     }
1156 
1157     /*
1158      * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
1159      * and also to mask out CnP (bit 0) which could validly be non-zero.
1160      */
1161     descaddr &= ~indexmask;
1162 
1163     /*
1164      * For AArch32, the address field in the descriptor goes up to bit 39
1165      * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
1166      * or an AddressSize fault is raised.  So for v8 we extract those SBZ
1167      * bits as part of the address, which will be checked via outputsize.
1168      * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
1169      * the highest bits of a 52-bit output are placed elsewhere.
1170      */
1171     if (param.ds) {
1172         descaddrmask = MAKE_64BIT_MASK(0, 50);
1173     } else if (arm_feature(env, ARM_FEATURE_V8)) {
1174         descaddrmask = MAKE_64BIT_MASK(0, 48);
1175     } else {
1176         descaddrmask = MAKE_64BIT_MASK(0, 40);
1177     }
1178     descaddrmask &= ~indexmask_grainsize;
1179 
1180     /*
1181      * Secure accesses start with the page table in secure memory and
1182      * can be downgraded to non-secure at any step. Non-secure accesses
1183      * remain non-secure. We implement this by just ORing in the NSTable/NS
1184      * bits at each step.
1185      */
1186     tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
1187     for (;;) {
1188         uint64_t descriptor;
1189         bool nstable;
1190 
1191         descaddr |= (address >> (stride * (4 - level))) & indexmask;
1192         descaddr &= ~7ULL;
1193         nstable = extract32(tableattrs, 4, 1);
1194         descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi);
1195         if (fi->type != ARMFault_None) {
1196             goto do_fault;
1197         }
1198 
1199         if (!(descriptor & 1) ||
1200             (!(descriptor & 2) && (level == 3))) {
1201             /* Invalid, or the Reserved level 3 encoding */
1202             goto do_fault;
1203         }
1204 
1205         descaddr = descriptor & descaddrmask;
1206 
1207         /*
1208          * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
1209          * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
1210          * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
1211          * raise AddressSizeFault.
1212          */
1213         if (outputsize > 48) {
1214             if (param.ds) {
1215                 descaddr |= extract64(descriptor, 8, 2) << 50;
1216             } else {
1217                 descaddr |= extract64(descriptor, 12, 4) << 48;
1218             }
1219         } else if (descaddr >> outputsize) {
1220             fault_type = ARMFault_AddressSize;
1221             goto do_fault;
1222         }
1223 
1224         if ((descriptor & 2) && (level < 3)) {
1225             /*
1226              * Table entry. The top five bits are attributes which may
1227              * propagate down through lower levels of the table (and
1228              * which are all arranged so that 0 means "no effect", so
1229              * we can gather them up by ORing in the bits at each level).
1230              */
1231             tableattrs |= extract64(descriptor, 59, 5);
1232             level++;
1233             indexmask = indexmask_grainsize;
1234             continue;
1235         }
1236         /*
1237          * Block entry at level 1 or 2, or page entry at level 3.
1238          * These are basically the same thing, although the number
1239          * of bits we pull in from the vaddr varies. Note that although
1240          * descaddrmask masks enough of the low bits of the descriptor
1241          * to give a correct page or table address, the address field
1242          * in a block descriptor is smaller; so we need to explicitly
1243          * clear the lower bits here before ORing in the low vaddr bits.
1244          */
1245         page_size = (1ULL << ((stride * (4 - level)) + 3));
1246         descaddr &= ~(page_size - 1);
1247         descaddr |= (address & (page_size - 1));
1248         /* Extract attributes from the descriptor */
1249         attrs = extract64(descriptor, 2, 10)
1250             | (extract64(descriptor, 52, 12) << 10);
1251 
1252         if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1253             /* Stage 2 table descriptors do not include any attribute fields */
1254             break;
1255         }
1256         /* Merge in attributes from table descriptors */
1257         attrs |= nstable << 3; /* NS */
1258         guarded = extract64(descriptor, 50, 1);  /* GP */
1259         if (param.hpd) {
1260             /* HPD disables all the table attributes except NSTable.  */
1261             break;
1262         }
1263         attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
1264         /*
1265          * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
1266          * means "force PL1 access only", which means forcing AP[1] to 0.
1267          */
1268         attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
1269         attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
1270         break;
1271     }
1272     /*
1273      * Here descaddr is the final physical address, and attributes
1274      * are all in attrs.
1275      */
1276     fault_type = ARMFault_AccessFlag;
1277     if ((attrs & (1 << 8)) == 0) {
1278         /* Access flag */
1279         goto do_fault;
1280     }
1281 
1282     ap = extract32(attrs, 4, 2);
1283 
1284     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1285         ns = mmu_idx == ARMMMUIdx_Stage2;
1286         xn = extract32(attrs, 11, 2);
1287         *prot = get_S2prot(env, ap, xn, s1_is_el0);
1288     } else {
1289         ns = extract32(attrs, 3, 1);
1290         xn = extract32(attrs, 12, 1);
1291         pxn = extract32(attrs, 11, 1);
1292         *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
1293     }
1294 
1295     fault_type = ARMFault_Permission;
1296     if (!(*prot & (1 << access_type))) {
1297         goto do_fault;
1298     }
1299 
1300     if (ns) {
1301         /*
1302          * The NS bit will (as required by the architecture) have no effect if
1303          * the CPU doesn't support TZ or this is a non-secure translation
1304          * regime, because the attribute will already be non-secure.
1305          */
1306         txattrs->secure = false;
1307     }
1308     /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
1309     if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
1310         arm_tlb_bti_gp(txattrs) = true;
1311     }
1312 
1313     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
1314         cacheattrs->is_s2_format = true;
1315         cacheattrs->attrs = extract32(attrs, 0, 4);
1316     } else {
1317         /* Index into MAIR registers for cache attributes */
1318         uint8_t attrindx = extract32(attrs, 0, 3);
1319         uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
1320         assert(attrindx <= 7);
1321         cacheattrs->is_s2_format = false;
1322         cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
1323     }
1324 
1325     /*
1326      * For FEAT_LPA2 and effective DS, the SH field in the attributes
1327      * was re-purposed for output address bits.  The SH attribute in
1328      * that case comes from TCR_ELx, which we extracted earlier.
1329      */
1330     if (param.ds) {
1331         cacheattrs->shareability = param.sh;
1332     } else {
1333         cacheattrs->shareability = extract32(attrs, 6, 2);
1334     }
1335 
1336     *phys_ptr = descaddr;
1337     *page_size_ptr = page_size;
1338     return false;
1339 
1340 do_fault:
1341     fi->type = fault_type;
1342     fi->level = level;
1343     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
1344     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
1345                                mmu_idx == ARMMMUIdx_Stage2_S);
1346     fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
1347     return true;
1348 }
1349 
1350 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
1351                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1352                                  hwaddr *phys_ptr, int *prot,
1353                                  ARMMMUFaultInfo *fi)
1354 {
1355     int n;
1356     uint32_t mask;
1357     uint32_t base;
1358     bool is_user = regime_is_user(env, mmu_idx);
1359 
1360     if (regime_translation_disabled(env, mmu_idx)) {
1361         /* MPU disabled.  */
1362         *phys_ptr = address;
1363         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1364         return false;
1365     }
1366 
1367     *phys_ptr = address;
1368     for (n = 7; n >= 0; n--) {
1369         base = env->cp15.c6_region[n];
1370         if ((base & 1) == 0) {
1371             continue;
1372         }
1373         mask = 1 << ((base >> 1) & 0x1f);
1374         /* Keep this shift separate from the above to avoid an
1375            (undefined) << 32.  */
1376         mask = (mask << 1) - 1;
1377         if (((base ^ address) & ~mask) == 0) {
1378             break;
1379         }
1380     }
1381     if (n < 0) {
1382         fi->type = ARMFault_Background;
1383         return true;
1384     }
1385 
1386     if (access_type == MMU_INST_FETCH) {
1387         mask = env->cp15.pmsav5_insn_ap;
1388     } else {
1389         mask = env->cp15.pmsav5_data_ap;
1390     }
1391     mask = (mask >> (n * 4)) & 0xf;
1392     switch (mask) {
1393     case 0:
1394         fi->type = ARMFault_Permission;
1395         fi->level = 1;
1396         return true;
1397     case 1:
1398         if (is_user) {
1399             fi->type = ARMFault_Permission;
1400             fi->level = 1;
1401             return true;
1402         }
1403         *prot = PAGE_READ | PAGE_WRITE;
1404         break;
1405     case 2:
1406         *prot = PAGE_READ;
1407         if (!is_user) {
1408             *prot |= PAGE_WRITE;
1409         }
1410         break;
1411     case 3:
1412         *prot = PAGE_READ | PAGE_WRITE;
1413         break;
1414     case 5:
1415         if (is_user) {
1416             fi->type = ARMFault_Permission;
1417             fi->level = 1;
1418             return true;
1419         }
1420         *prot = PAGE_READ;
1421         break;
1422     case 6:
1423         *prot = PAGE_READ;
1424         break;
1425     default:
1426         /* Bad permission.  */
1427         fi->type = ARMFault_Permission;
1428         fi->level = 1;
1429         return true;
1430     }
1431     *prot |= PAGE_EXEC;
1432     return false;
1433 }
1434 
1435 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
1436                                          int32_t address, int *prot)
1437 {
1438     if (!arm_feature(env, ARM_FEATURE_M)) {
1439         *prot = PAGE_READ | PAGE_WRITE;
1440         switch (address) {
1441         case 0xF0000000 ... 0xFFFFFFFF:
1442             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
1443                 /* hivecs execing is ok */
1444                 *prot |= PAGE_EXEC;
1445             }
1446             break;
1447         case 0x00000000 ... 0x7FFFFFFF:
1448             *prot |= PAGE_EXEC;
1449             break;
1450         }
1451     } else {
1452         /* Default system address map for M profile cores.
1453          * The architecture specifies which regions are execute-never;
1454          * at the MPU level no other checks are defined.
1455          */
1456         switch (address) {
1457         case 0x00000000 ... 0x1fffffff: /* ROM */
1458         case 0x20000000 ... 0x3fffffff: /* SRAM */
1459         case 0x60000000 ... 0x7fffffff: /* RAM */
1460         case 0x80000000 ... 0x9fffffff: /* RAM */
1461             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1462             break;
1463         case 0x40000000 ... 0x5fffffff: /* Peripheral */
1464         case 0xa0000000 ... 0xbfffffff: /* Device */
1465         case 0xc0000000 ... 0xdfffffff: /* Device */
1466         case 0xe0000000 ... 0xffffffff: /* System */
1467             *prot = PAGE_READ | PAGE_WRITE;
1468             break;
1469         default:
1470             g_assert_not_reached();
1471         }
1472     }
1473 }
1474 
1475 static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
1476 {
1477     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
1478     return arm_feature(env, ARM_FEATURE_M) &&
1479         extract32(address, 20, 12) == 0xe00;
1480 }
1481 
1482 static bool m_is_system_region(CPUARMState *env, uint32_t address)
1483 {
1484     /*
1485      * True if address is in the M profile system region
1486      * 0xe0000000 - 0xffffffff
1487      */
1488     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
1489 }
1490 
1491 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
1492                                          bool is_user)
1493 {
1494     /*
1495      * Return true if we should use the default memory map as a
1496      * "background" region if there are no hits against any MPU regions.
1497      */
1498     CPUARMState *env = &cpu->env;
1499 
1500     if (is_user) {
1501         return false;
1502     }
1503 
1504     if (arm_feature(env, ARM_FEATURE_M)) {
1505         return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
1506             & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1507     } else {
1508         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1509     }
1510 }
1511 
1512 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
1513                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1514                                  hwaddr *phys_ptr, int *prot,
1515                                  target_ulong *page_size,
1516                                  ARMMMUFaultInfo *fi)
1517 {
1518     ARMCPU *cpu = env_archcpu(env);
1519     int n;
1520     bool is_user = regime_is_user(env, mmu_idx);
1521 
1522     *phys_ptr = address;
1523     *page_size = TARGET_PAGE_SIZE;
1524     *prot = 0;
1525 
1526     if (regime_translation_disabled(env, mmu_idx) ||
1527         m_is_ppb_region(env, address)) {
1528         /*
1529          * MPU disabled or M profile PPB access: use default memory map.
1530          * The other case which uses the default memory map in the
1531          * v7M ARM ARM pseudocode is exception vector reads from the vector
1532          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
1533          * which always does a direct read using address_space_ldl(), rather
1534          * than going via this function, so we don't need to check that here.
1535          */
1536         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1537     } else { /* MPU enabled */
1538         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1539             /* region search */
1540             uint32_t base = env->pmsav7.drbar[n];
1541             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
1542             uint32_t rmask;
1543             bool srdis = false;
1544 
1545             if (!(env->pmsav7.drsr[n] & 0x1)) {
1546                 continue;
1547             }
1548 
1549             if (!rsize) {
1550                 qemu_log_mask(LOG_GUEST_ERROR,
1551                               "DRSR[%d]: Rsize field cannot be 0\n", n);
1552                 continue;
1553             }
1554             rsize++;
1555             rmask = (1ull << rsize) - 1;
1556 
1557             if (base & rmask) {
1558                 qemu_log_mask(LOG_GUEST_ERROR,
1559                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
1560                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
1561                               n, base, rmask);
1562                 continue;
1563             }
1564 
1565             if (address < base || address > base + rmask) {
1566                 /*
1567                  * Address not in this region. We must check whether the
1568                  * region covers addresses in the same page as our address.
1569                  * In that case we must not report a size that covers the
1570                  * whole page for a subsequent hit against a different MPU
1571                  * region or the background region, because it would result in
1572                  * incorrect TLB hits for subsequent accesses to addresses that
1573                  * are in this MPU region.
1574                  */
1575                 if (ranges_overlap(base, rmask,
1576                                    address & TARGET_PAGE_MASK,
1577                                    TARGET_PAGE_SIZE)) {
1578                     *page_size = 1;
1579                 }
1580                 continue;
1581             }
1582 
1583             /* Region matched */
1584 
1585             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
1586                 int i, snd;
1587                 uint32_t srdis_mask;
1588 
1589                 rsize -= 3; /* sub region size (power of 2) */
1590                 snd = ((address - base) >> rsize) & 0x7;
1591                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
1592 
1593                 srdis_mask = srdis ? 0x3 : 0x0;
1594                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
1595                     /*
1596                      * This will check in groups of 2, 4 and then 8, whether
1597                      * the subregion bits are consistent. rsize is incremented
1598                      * back up to give the region size, considering consistent
1599                      * adjacent subregions as one region. Stop testing if rsize
1600                      * is already big enough for an entire QEMU page.
1601                      */
1602                     int snd_rounded = snd & ~(i - 1);
1603                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
1604                                                      snd_rounded + 8, i);
1605                     if (srdis_mask ^ srdis_multi) {
1606                         break;
1607                     }
1608                     srdis_mask = (srdis_mask << i) | srdis_mask;
1609                     rsize++;
1610                 }
1611             }
1612             if (srdis) {
1613                 continue;
1614             }
1615             if (rsize < TARGET_PAGE_BITS) {
1616                 *page_size = 1 << rsize;
1617             }
1618             break;
1619         }
1620 
1621         if (n == -1) { /* no hits */
1622             if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
1623                 /* background fault */
1624                 fi->type = ARMFault_Background;
1625                 return true;
1626             }
1627             get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1628         } else { /* a MPU hit! */
1629             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
1630             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
1631 
1632             if (m_is_system_region(env, address)) {
1633                 /* System space is always execute never */
1634                 xn = 1;
1635             }
1636 
1637             if (is_user) { /* User mode AP bit decoding */
1638                 switch (ap) {
1639                 case 0:
1640                 case 1:
1641                 case 5:
1642                     break; /* no access */
1643                 case 3:
1644                     *prot |= PAGE_WRITE;
1645                     /* fall through */
1646                 case 2:
1647                 case 6:
1648                     *prot |= PAGE_READ | PAGE_EXEC;
1649                     break;
1650                 case 7:
1651                     /* for v7M, same as 6; for R profile a reserved value */
1652                     if (arm_feature(env, ARM_FEATURE_M)) {
1653                         *prot |= PAGE_READ | PAGE_EXEC;
1654                         break;
1655                     }
1656                     /* fall through */
1657                 default:
1658                     qemu_log_mask(LOG_GUEST_ERROR,
1659                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1660                                   PRIx32 "\n", n, ap);
1661                 }
1662             } else { /* Priv. mode AP bits decoding */
1663                 switch (ap) {
1664                 case 0:
1665                     break; /* no access */
1666                 case 1:
1667                 case 2:
1668                 case 3:
1669                     *prot |= PAGE_WRITE;
1670                     /* fall through */
1671                 case 5:
1672                 case 6:
1673                     *prot |= PAGE_READ | PAGE_EXEC;
1674                     break;
1675                 case 7:
1676                     /* for v7M, same as 6; for R profile a reserved value */
1677                     if (arm_feature(env, ARM_FEATURE_M)) {
1678                         *prot |= PAGE_READ | PAGE_EXEC;
1679                         break;
1680                     }
1681                     /* fall through */
1682                 default:
1683                     qemu_log_mask(LOG_GUEST_ERROR,
1684                                   "DRACR[%d]: Bad value for AP bits: 0x%"
1685                                   PRIx32 "\n", n, ap);
1686                 }
1687             }
1688 
1689             /* execute never */
1690             if (xn) {
1691                 *prot &= ~PAGE_EXEC;
1692             }
1693         }
1694     }
1695 
1696     fi->type = ARMFault_Permission;
1697     fi->level = 1;
1698     return !(*prot & (1 << access_type));
1699 }
1700 
1701 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1702                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1703                        hwaddr *phys_ptr, MemTxAttrs *txattrs,
1704                        int *prot, bool *is_subpage,
1705                        ARMMMUFaultInfo *fi, uint32_t *mregion)
1706 {
1707     /*
1708      * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1709      * that a full phys-to-virt translation does).
1710      * mregion is (if not NULL) set to the region number which matched,
1711      * or -1 if no region number is returned (MPU off, address did not
1712      * hit a region, address hit in multiple regions).
1713      * We set is_subpage to true if the region hit doesn't cover the
1714      * entire TARGET_PAGE the address is within.
1715      */
1716     ARMCPU *cpu = env_archcpu(env);
1717     bool is_user = regime_is_user(env, mmu_idx);
1718     uint32_t secure = regime_is_secure(env, mmu_idx);
1719     int n;
1720     int matchregion = -1;
1721     bool hit = false;
1722     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1723     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1724 
1725     *is_subpage = false;
1726     *phys_ptr = address;
1727     *prot = 0;
1728     if (mregion) {
1729         *mregion = -1;
1730     }
1731 
1732     /*
1733      * Unlike the ARM ARM pseudocode, we don't need to check whether this
1734      * was an exception vector read from the vector table (which is always
1735      * done using the default system address map), because those accesses
1736      * are done in arm_v7m_load_vector(), which always does a direct
1737      * read using address_space_ldl(), rather than going via this function.
1738      */
1739     if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
1740         hit = true;
1741     } else if (m_is_ppb_region(env, address)) {
1742         hit = true;
1743     } else {
1744         if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
1745             hit = true;
1746         }
1747 
1748         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1749             /* region search */
1750             /*
1751              * Note that the base address is bits [31:5] from the register
1752              * with bits [4:0] all zeroes, but the limit address is bits
1753              * [31:5] from the register with bits [4:0] all ones.
1754              */
1755             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
1756             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
1757 
1758             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
1759                 /* Region disabled */
1760                 continue;
1761             }
1762 
1763             if (address < base || address > limit) {
1764                 /*
1765                  * Address not in this region. We must check whether the
1766                  * region covers addresses in the same page as our address.
1767                  * In that case we must not report a size that covers the
1768                  * whole page for a subsequent hit against a different MPU
1769                  * region or the background region, because it would result in
1770                  * incorrect TLB hits for subsequent accesses to addresses that
1771                  * are in this MPU region.
1772                  */
1773                 if (limit >= base &&
1774                     ranges_overlap(base, limit - base + 1,
1775                                    addr_page_base,
1776                                    TARGET_PAGE_SIZE)) {
1777                     *is_subpage = true;
1778                 }
1779                 continue;
1780             }
1781 
1782             if (base > addr_page_base || limit < addr_page_limit) {
1783                 *is_subpage = true;
1784             }
1785 
1786             if (matchregion != -1) {
1787                 /*
1788                  * Multiple regions match -- always a failure (unlike
1789                  * PMSAv7 where highest-numbered-region wins)
1790                  */
1791                 fi->type = ARMFault_Permission;
1792                 fi->level = 1;
1793                 return true;
1794             }
1795 
1796             matchregion = n;
1797             hit = true;
1798         }
1799     }
1800 
1801     if (!hit) {
1802         /* background fault */
1803         fi->type = ARMFault_Background;
1804         return true;
1805     }
1806 
1807     if (matchregion == -1) {
1808         /* hit using the background region */
1809         get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
1810     } else {
1811         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
1812         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
1813         bool pxn = false;
1814 
1815         if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1816             pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
1817         }
1818 
1819         if (m_is_system_region(env, address)) {
1820             /* System space is always execute never */
1821             xn = 1;
1822         }
1823 
1824         *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
1825         if (*prot && !xn && !(pxn && !is_user)) {
1826             *prot |= PAGE_EXEC;
1827         }
1828         /*
1829          * We don't need to look the attribute up in the MAIR0/MAIR1
1830          * registers because that only tells us about cacheability.
1831          */
1832         if (mregion) {
1833             *mregion = matchregion;
1834         }
1835     }
1836 
1837     fi->type = ARMFault_Permission;
1838     fi->level = 1;
1839     return !(*prot & (1 << access_type));
1840 }
1841 
1842 static bool v8m_is_sau_exempt(CPUARMState *env,
1843                               uint32_t address, MMUAccessType access_type)
1844 {
1845     /*
1846      * The architecture specifies that certain address ranges are
1847      * exempt from v8M SAU/IDAU checks.
1848      */
1849     return
1850         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
1851         (address >= 0xe0000000 && address <= 0xe0002fff) ||
1852         (address >= 0xe000e000 && address <= 0xe000efff) ||
1853         (address >= 0xe002e000 && address <= 0xe002efff) ||
1854         (address >= 0xe0040000 && address <= 0xe0041fff) ||
1855         (address >= 0xe00ff000 && address <= 0xe00fffff);
1856 }
1857 
1858 void v8m_security_lookup(CPUARMState *env, uint32_t address,
1859                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
1860                                 V8M_SAttributes *sattrs)
1861 {
1862     /*
1863      * Look up the security attributes for this address. Compare the
1864      * pseudocode SecurityCheck() function.
1865      * We assume the caller has zero-initialized *sattrs.
1866      */
1867     ARMCPU *cpu = env_archcpu(env);
1868     int r;
1869     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
1870     int idau_region = IREGION_NOTVALID;
1871     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1872     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1873 
1874     if (cpu->idau) {
1875         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
1876         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
1877 
1878         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
1879                    &idau_nsc);
1880     }
1881 
1882     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
1883         /* 0xf0000000..0xffffffff is always S for insn fetches */
1884         return;
1885     }
1886 
1887     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
1888         sattrs->ns = !regime_is_secure(env, mmu_idx);
1889         return;
1890     }
1891 
1892     if (idau_region != IREGION_NOTVALID) {
1893         sattrs->irvalid = true;
1894         sattrs->iregion = idau_region;
1895     }
1896 
1897     switch (env->sau.ctrl & 3) {
1898     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
1899         break;
1900     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
1901         sattrs->ns = true;
1902         break;
1903     default: /* SAU.ENABLE == 1 */
1904         for (r = 0; r < cpu->sau_sregion; r++) {
1905             if (env->sau.rlar[r] & 1) {
1906                 uint32_t base = env->sau.rbar[r] & ~0x1f;
1907                 uint32_t limit = env->sau.rlar[r] | 0x1f;
1908 
1909                 if (base <= address && limit >= address) {
1910                     if (base > addr_page_base || limit < addr_page_limit) {
1911                         sattrs->subpage = true;
1912                     }
1913                     if (sattrs->srvalid) {
1914                         /*
1915                          * If we hit in more than one region then we must report
1916                          * as Secure, not NS-Callable, with no valid region
1917                          * number info.
1918                          */
1919                         sattrs->ns = false;
1920                         sattrs->nsc = false;
1921                         sattrs->sregion = 0;
1922                         sattrs->srvalid = false;
1923                         break;
1924                     } else {
1925                         if (env->sau.rlar[r] & 2) {
1926                             sattrs->nsc = true;
1927                         } else {
1928                             sattrs->ns = true;
1929                         }
1930                         sattrs->srvalid = true;
1931                         sattrs->sregion = r;
1932                     }
1933                 } else {
1934                     /*
1935                      * Address not in this region. We must check whether the
1936                      * region covers addresses in the same page as our address.
1937                      * In that case we must not report a size that covers the
1938                      * whole page for a subsequent hit against a different MPU
1939                      * region or the background region, because it would result
1940                      * in incorrect TLB hits for subsequent accesses to
1941                      * addresses that are in this MPU region.
1942                      */
1943                     if (limit >= base &&
1944                         ranges_overlap(base, limit - base + 1,
1945                                        addr_page_base,
1946                                        TARGET_PAGE_SIZE)) {
1947                         sattrs->subpage = true;
1948                     }
1949                 }
1950             }
1951         }
1952         break;
1953     }
1954 
1955     /*
1956      * The IDAU will override the SAU lookup results if it specifies
1957      * higher security than the SAU does.
1958      */
1959     if (!idau_ns) {
1960         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
1961             sattrs->ns = false;
1962             sattrs->nsc = idau_nsc;
1963         }
1964     }
1965 }
1966 
1967 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
1968                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1969                                  hwaddr *phys_ptr, MemTxAttrs *txattrs,
1970                                  int *prot, target_ulong *page_size,
1971                                  ARMMMUFaultInfo *fi)
1972 {
1973     uint32_t secure = regime_is_secure(env, mmu_idx);
1974     V8M_SAttributes sattrs = {};
1975     bool ret;
1976     bool mpu_is_subpage;
1977 
1978     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1979         v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
1980         if (access_type == MMU_INST_FETCH) {
1981             /*
1982              * Instruction fetches always use the MMU bank and the
1983              * transaction attribute determined by the fetch address,
1984              * regardless of CPU state. This is painful for QEMU
1985              * to handle, because it would mean we need to encode
1986              * into the mmu_idx not just the (user, negpri) information
1987              * for the current security state but also that for the
1988              * other security state, which would balloon the number
1989              * of mmu_idx values needed alarmingly.
1990              * Fortunately we can avoid this because it's not actually
1991              * possible to arbitrarily execute code from memory with
1992              * the wrong security attribute: it will always generate
1993              * an exception of some kind or another, apart from the
1994              * special case of an NS CPU executing an SG instruction
1995              * in S&NSC memory. So we always just fail the translation
1996              * here and sort things out in the exception handler
1997              * (including possibly emulating an SG instruction).
1998              */
1999             if (sattrs.ns != !secure) {
2000                 if (sattrs.nsc) {
2001                     fi->type = ARMFault_QEMU_NSCExec;
2002                 } else {
2003                     fi->type = ARMFault_QEMU_SFault;
2004                 }
2005                 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
2006                 *phys_ptr = address;
2007                 *prot = 0;
2008                 return true;
2009             }
2010         } else {
2011             /*
2012              * For data accesses we always use the MMU bank indicated
2013              * by the current CPU state, but the security attributes
2014              * might downgrade a secure access to nonsecure.
2015              */
2016             if (sattrs.ns) {
2017                 txattrs->secure = false;
2018             } else if (!secure) {
2019                 /*
2020                  * NS access to S memory must fault.
2021                  * Architecturally we should first check whether the
2022                  * MPU information for this address indicates that we
2023                  * are doing an unaligned access to Device memory, which
2024                  * should generate a UsageFault instead. QEMU does not
2025                  * currently check for that kind of unaligned access though.
2026                  * If we added it we would need to do so as a special case
2027                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2028                  */
2029                 fi->type = ARMFault_QEMU_SFault;
2030                 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
2031                 *phys_ptr = address;
2032                 *prot = 0;
2033                 return true;
2034             }
2035         }
2036     }
2037 
2038     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
2039                             txattrs, prot, &mpu_is_subpage, fi, NULL);
2040     *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
2041     return ret;
2042 }
2043 
2044 /*
2045  * Translate from the 4-bit stage 2 representation of
2046  * memory attributes (without cache-allocation hints) to
2047  * the 8-bit representation of the stage 1 MAIR registers
2048  * (which includes allocation hints).
2049  *
2050  * ref: shared/translation/attrs/S2AttrDecode()
2051  *      .../S2ConvertAttrsHints()
2052  */
2053 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
2054 {
2055     uint8_t hiattr = extract32(s2attrs, 2, 2);
2056     uint8_t loattr = extract32(s2attrs, 0, 2);
2057     uint8_t hihint = 0, lohint = 0;
2058 
2059     if (hiattr != 0) { /* normal memory */
2060         if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */
2061             hiattr = loattr = 1; /* non-cacheable */
2062         } else {
2063             if (hiattr != 1) { /* Write-through or write-back */
2064                 hihint = 3; /* RW allocate */
2065             }
2066             if (loattr != 1) { /* Write-through or write-back */
2067                 lohint = 3; /* RW allocate */
2068             }
2069         }
2070     }
2071 
2072     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2073 }
2074 
2075 /*
2076  * Combine either inner or outer cacheability attributes for normal
2077  * memory, according to table D4-42 and pseudocode procedure
2078  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2079  *
2080  * NB: only stage 1 includes allocation hints (RW bits), leading to
2081  * some asymmetry.
2082  */
2083 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2084 {
2085     if (s1 == 4 || s2 == 4) {
2086         /* non-cacheable has precedence */
2087         return 4;
2088     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2089         /* stage 1 write-through takes precedence */
2090         return s1;
2091     } else if (extract32(s2, 2, 2) == 2) {
2092         /* stage 2 write-through takes precedence, but the allocation hint
2093          * is still taken from stage 1
2094          */
2095         return (2 << 2) | extract32(s1, 0, 2);
2096     } else { /* write-back */
2097         return s1;
2098     }
2099 }
2100 
2101 /*
2102  * Combine the memory type and cacheability attributes of
2103  * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2104  * combined attributes in MAIR_EL1 format.
2105  */
2106 static uint8_t combined_attrs_nofwb(CPUARMState *env,
2107                                     ARMCacheAttrs s1, ARMCacheAttrs s2)
2108 {
2109     uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2110 
2111     s2_mair_attrs = convert_stage2_attrs(env, s2.attrs);
2112 
2113     s1lo = extract32(s1.attrs, 0, 4);
2114     s2lo = extract32(s2_mair_attrs, 0, 4);
2115     s1hi = extract32(s1.attrs, 4, 4);
2116     s2hi = extract32(s2_mair_attrs, 4, 4);
2117 
2118     /* Combine memory type and cacheability attributes */
2119     if (s1hi == 0 || s2hi == 0) {
2120         /* Device has precedence over normal */
2121         if (s1lo == 0 || s2lo == 0) {
2122             /* nGnRnE has precedence over anything */
2123             ret_attrs = 0;
2124         } else if (s1lo == 4 || s2lo == 4) {
2125             /* non-Reordering has precedence over Reordering */
2126             ret_attrs = 4;  /* nGnRE */
2127         } else if (s1lo == 8 || s2lo == 8) {
2128             /* non-Gathering has precedence over Gathering */
2129             ret_attrs = 8;  /* nGRE */
2130         } else {
2131             ret_attrs = 0xc; /* GRE */
2132         }
2133     } else { /* Normal memory */
2134         /* Outer/inner cacheability combine independently */
2135         ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2136                   | combine_cacheattr_nibble(s1lo, s2lo);
2137     }
2138     return ret_attrs;
2139 }
2140 
2141 static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2142 {
2143     /*
2144      * Given the 4 bits specifying the outer or inner cacheability
2145      * in MAIR format, return a value specifying Normal Write-Back,
2146      * with the allocation and transient hints taken from the input
2147      * if the input specified some kind of cacheable attribute.
2148      */
2149     if (attr == 0 || attr == 4) {
2150         /*
2151          * 0 == an UNPREDICTABLE encoding
2152          * 4 == Non-cacheable
2153          * Either way, force Write-Back RW allocate non-transient
2154          */
2155         return 0xf;
2156     }
2157     /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2158     return attr | 4;
2159 }
2160 
2161 /*
2162  * Combine the memory type and cacheability attributes of
2163  * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2164  * combined attributes in MAIR_EL1 format.
2165  */
2166 static uint8_t combined_attrs_fwb(CPUARMState *env,
2167                                   ARMCacheAttrs s1, ARMCacheAttrs s2)
2168 {
2169     switch (s2.attrs) {
2170     case 7:
2171         /* Use stage 1 attributes */
2172         return s1.attrs;
2173     case 6:
2174         /*
2175          * Force Normal Write-Back. Note that if S1 is Normal cacheable
2176          * then we take the allocation hints from it; otherwise it is
2177          * RW allocate, non-transient.
2178          */
2179         if ((s1.attrs & 0xf0) == 0) {
2180             /* S1 is Device */
2181             return 0xff;
2182         }
2183         /* Need to check the Inner and Outer nibbles separately */
2184         return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2185             force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2186     case 5:
2187         /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2188         if ((s1.attrs & 0xf0) == 0) {
2189             return s1.attrs;
2190         }
2191         return 0x44;
2192     case 0 ... 3:
2193         /* Force Device, of subtype specified by S2 */
2194         return s2.attrs << 2;
2195     default:
2196         /*
2197          * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2198          * arbitrarily force Device.
2199          */
2200         return 0;
2201     }
2202 }
2203 
2204 /*
2205  * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2206  * and CombineS1S2Desc()
2207  *
2208  * @env:     CPUARMState
2209  * @s1:      Attributes from stage 1 walk
2210  * @s2:      Attributes from stage 2 walk
2211  */
2212 static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
2213                                         ARMCacheAttrs s1, ARMCacheAttrs s2)
2214 {
2215     ARMCacheAttrs ret;
2216     bool tagged = false;
2217 
2218     assert(s2.is_s2_format && !s1.is_s2_format);
2219     ret.is_s2_format = false;
2220 
2221     if (s1.attrs == 0xf0) {
2222         tagged = true;
2223         s1.attrs = 0xff;
2224     }
2225 
2226     /* Combine shareability attributes (table D4-43) */
2227     if (s1.shareability == 2 || s2.shareability == 2) {
2228         /* if either are outer-shareable, the result is outer-shareable */
2229         ret.shareability = 2;
2230     } else if (s1.shareability == 3 || s2.shareability == 3) {
2231         /* if either are inner-shareable, the result is inner-shareable */
2232         ret.shareability = 3;
2233     } else {
2234         /* both non-shareable */
2235         ret.shareability = 0;
2236     }
2237 
2238     /* Combine memory type and cacheability attributes */
2239     if (arm_hcr_el2_eff(env) & HCR_FWB) {
2240         ret.attrs = combined_attrs_fwb(env, s1, s2);
2241     } else {
2242         ret.attrs = combined_attrs_nofwb(env, s1, s2);
2243     }
2244 
2245     /*
2246      * Any location for which the resultant memory type is any
2247      * type of Device memory is always treated as Outer Shareable.
2248      * Any location for which the resultant memory type is Normal
2249      * Inner Non-cacheable, Outer Non-cacheable is always treated
2250      * as Outer Shareable.
2251      * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2252      */
2253     if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2254         ret.shareability = 2;
2255     }
2256 
2257     /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2258     if (tagged && ret.attrs == 0xff) {
2259         ret.attrs = 0xf0;
2260     }
2261 
2262     return ret;
2263 }
2264 
2265 /**
2266  * get_phys_addr - get the physical address for this virtual address
2267  *
2268  * Find the physical address corresponding to the given virtual address,
2269  * by doing a translation table walk on MMU based systems or using the
2270  * MPU state on MPU based systems.
2271  *
2272  * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
2273  * prot and page_size may not be filled in, and the populated fsr value provides
2274  * information on why the translation aborted, in the format of a
2275  * DFSR/IFSR fault register, with the following caveats:
2276  *  * we honour the short vs long DFSR format differences.
2277  *  * the WnR bit is never set (the caller must do this).
2278  *  * for PSMAv5 based systems we don't bother to return a full FSR format
2279  *    value.
2280  *
2281  * @env: CPUARMState
2282  * @address: virtual address to get physical address for
2283  * @access_type: 0 for read, 1 for write, 2 for execute
2284  * @mmu_idx: MMU index indicating required translation regime
2285  * @phys_ptr: set to the physical address corresponding to the virtual address
2286  * @attrs: set to the memory transaction attributes to use
2287  * @prot: set to the permissions for the page containing phys_ptr
2288  * @page_size: set to the size of the page containing phys_ptr
2289  * @fi: set to fault info if the translation fails
2290  * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
2291  */
2292 bool get_phys_addr(CPUARMState *env, target_ulong address,
2293                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
2294                    hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
2295                    target_ulong *page_size,
2296                    ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
2297 {
2298     ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
2299 
2300     if (mmu_idx != s1_mmu_idx) {
2301         /*
2302          * Call ourselves recursively to do the stage 1 and then stage 2
2303          * translations if mmu_idx is a two-stage regime.
2304          */
2305         if (arm_feature(env, ARM_FEATURE_EL2)) {
2306             hwaddr ipa;
2307             int s2_prot;
2308             int ret;
2309             bool ipa_secure;
2310             ARMCacheAttrs cacheattrs2 = {};
2311             ARMMMUIdx s2_mmu_idx;
2312             bool is_el0;
2313 
2314             ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
2315                                 attrs, prot, page_size, fi, cacheattrs);
2316 
2317             /* If S1 fails or S2 is disabled, return early.  */
2318             if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
2319                 *phys_ptr = ipa;
2320                 return ret;
2321             }
2322 
2323             ipa_secure = attrs->secure;
2324             if (arm_is_secure_below_el3(env)) {
2325                 if (ipa_secure) {
2326                     attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW);
2327                 } else {
2328                     attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW);
2329                 }
2330             } else {
2331                 assert(!ipa_secure);
2332             }
2333 
2334             s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
2335             is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
2336 
2337             /* S1 is done. Now do S2 translation.  */
2338             ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
2339                                      phys_ptr, attrs, &s2_prot,
2340                                      page_size, fi, &cacheattrs2);
2341             fi->s2addr = ipa;
2342             /* Combine the S1 and S2 perms.  */
2343             *prot &= s2_prot;
2344 
2345             /* If S2 fails, return early.  */
2346             if (ret) {
2347                 return ret;
2348             }
2349 
2350             /* Combine the S1 and S2 cache attributes. */
2351             if (arm_hcr_el2_eff(env) & HCR_DC) {
2352                 /*
2353                  * HCR.DC forces the first stage attributes to
2354                  *  Normal Non-Shareable,
2355                  *  Inner Write-Back Read-Allocate Write-Allocate,
2356                  *  Outer Write-Back Read-Allocate Write-Allocate.
2357                  * Do not overwrite Tagged within attrs.
2358                  */
2359                 if (cacheattrs->attrs != 0xf0) {
2360                     cacheattrs->attrs = 0xff;
2361                 }
2362                 cacheattrs->shareability = 0;
2363             }
2364             *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
2365 
2366             /* Check if IPA translates to secure or non-secure PA space. */
2367             if (arm_is_secure_below_el3(env)) {
2368                 if (ipa_secure) {
2369                     attrs->secure =
2370                         !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW));
2371                 } else {
2372                     attrs->secure =
2373                         !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW))
2374                         || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)));
2375                 }
2376             }
2377             return 0;
2378         } else {
2379             /*
2380              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
2381              */
2382             mmu_idx = stage_1_mmu_idx(mmu_idx);
2383         }
2384     }
2385 
2386     /*
2387      * The page table entries may downgrade secure to non-secure, but
2388      * cannot upgrade an non-secure translation regime's attributes
2389      * to secure.
2390      */
2391     attrs->secure = regime_is_secure(env, mmu_idx);
2392     attrs->user = regime_is_user(env, mmu_idx);
2393 
2394     /*
2395      * Fast Context Switch Extension. This doesn't exist at all in v8.
2396      * In v7 and earlier it affects all stage 1 translations.
2397      */
2398     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
2399         && !arm_feature(env, ARM_FEATURE_V8)) {
2400         if (regime_el(env, mmu_idx) == 3) {
2401             address += env->cp15.fcseidr_s;
2402         } else {
2403             address += env->cp15.fcseidr_ns;
2404         }
2405     }
2406 
2407     if (arm_feature(env, ARM_FEATURE_PMSA)) {
2408         bool ret;
2409         *page_size = TARGET_PAGE_SIZE;
2410 
2411         if (arm_feature(env, ARM_FEATURE_V8)) {
2412             /* PMSAv8 */
2413             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2414                                        phys_ptr, attrs, prot, page_size, fi);
2415         } else if (arm_feature(env, ARM_FEATURE_V7)) {
2416             /* PMSAv7 */
2417             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2418                                        phys_ptr, prot, page_size, fi);
2419         } else {
2420             /* Pre-v7 MPU */
2421             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2422                                        phys_ptr, prot, fi);
2423         }
2424         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
2425                       " mmu_idx %u -> %s (prot %c%c%c)\n",
2426                       access_type == MMU_DATA_LOAD ? "reading" :
2427                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
2428                       (uint32_t)address, mmu_idx,
2429                       ret ? "Miss" : "Hit",
2430                       *prot & PAGE_READ ? 'r' : '-',
2431                       *prot & PAGE_WRITE ? 'w' : '-',
2432                       *prot & PAGE_EXEC ? 'x' : '-');
2433 
2434         return ret;
2435     }
2436 
2437     /* Definitely a real MMU, not an MPU */
2438 
2439     if (regime_translation_disabled(env, mmu_idx)) {
2440         uint64_t hcr;
2441         uint8_t memattr;
2442 
2443         /*
2444          * MMU disabled.  S1 addresses within aa64 translation regimes are
2445          * still checked for bounds -- see AArch64.TranslateAddressS1Off.
2446          */
2447         if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
2448             int r_el = regime_el(env, mmu_idx);
2449             if (arm_el_is_aa64(env, r_el)) {
2450                 int pamax = arm_pamax(env_archcpu(env));
2451                 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
2452                 int addrtop, tbi;
2453 
2454                 tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2455                 if (access_type == MMU_INST_FETCH) {
2456                     tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2457                 }
2458                 tbi = (tbi >> extract64(address, 55, 1)) & 1;
2459                 addrtop = (tbi ? 55 : 63);
2460 
2461                 if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2462                     fi->type = ARMFault_AddressSize;
2463                     fi->level = 0;
2464                     fi->stage2 = false;
2465                     return 1;
2466                 }
2467 
2468                 /*
2469                  * When TBI is disabled, we've just validated that all of the
2470                  * bits above PAMax are zero, so logically we only need to
2471                  * clear the top byte for TBI.  But it's clearer to follow
2472                  * the pseudocode set of addrdesc.paddress.
2473                  */
2474                 address = extract64(address, 0, 52);
2475             }
2476         }
2477         *phys_ptr = address;
2478         *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2479         *page_size = TARGET_PAGE_SIZE;
2480 
2481         /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
2482         hcr = arm_hcr_el2_eff(env);
2483         cacheattrs->shareability = 0;
2484         cacheattrs->is_s2_format = false;
2485         if (hcr & HCR_DC) {
2486             if (hcr & HCR_DCT) {
2487                 memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
2488             } else {
2489                 memattr = 0xff;  /* Normal, WB, RWA */
2490             }
2491         } else if (access_type == MMU_INST_FETCH) {
2492             if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2493                 memattr = 0xee;  /* Normal, WT, RA, NT */
2494             } else {
2495                 memattr = 0x44;  /* Normal, NC, No */
2496             }
2497             cacheattrs->shareability = 2; /* outer sharable */
2498         } else {
2499             memattr = 0x00;      /* Device, nGnRnE */
2500         }
2501         cacheattrs->attrs = memattr;
2502         return 0;
2503     }
2504 
2505     if (regime_using_lpae_format(env, mmu_idx)) {
2506         return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
2507                                   phys_ptr, attrs, prot, page_size,
2508                                   fi, cacheattrs);
2509     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
2510         return get_phys_addr_v6(env, address, access_type, mmu_idx,
2511                                 phys_ptr, attrs, prot, page_size, fi);
2512     } else {
2513         return get_phys_addr_v5(env, address, access_type, mmu_idx,
2514                                     phys_ptr, prot, page_size, fi);
2515     }
2516 }
2517 
2518 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
2519                                          MemTxAttrs *attrs)
2520 {
2521     ARMCPU *cpu = ARM_CPU(cs);
2522     CPUARMState *env = &cpu->env;
2523     hwaddr phys_addr;
2524     target_ulong page_size;
2525     int prot;
2526     bool ret;
2527     ARMMMUFaultInfo fi = {};
2528     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
2529     ARMCacheAttrs cacheattrs = {};
2530 
2531     *attrs = (MemTxAttrs) {};
2532 
2533     ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
2534                         attrs, &prot, &page_size, &fi, &cacheattrs);
2535 
2536     if (ret) {
2537         return -1;
2538     }
2539     return phys_addr;
2540 }
2541