xref: /openbmc/qemu/target/arm/ptw.c (revision 3c003f7029eb322c15f137b33af1120096e14f4d)
18ae08860SRichard Henderson /*
28ae08860SRichard Henderson  * ARM page table walking.
38ae08860SRichard Henderson  *
48ae08860SRichard Henderson  * This code is licensed under the GNU GPL v2 or later.
58ae08860SRichard Henderson  *
68ae08860SRichard Henderson  * SPDX-License-Identifier: GPL-2.0-or-later
78ae08860SRichard Henderson  */
88ae08860SRichard Henderson 
98ae08860SRichard Henderson #include "qemu/osdep.h"
108ae08860SRichard Henderson #include "qemu/log.h"
111f2e87e5SRichard Henderson #include "qemu/range.h"
128ae08860SRichard Henderson #include "cpu.h"
138ae08860SRichard Henderson #include "internals.h"
142c1f429dSRichard Henderson #include "idau.h"
158ae08860SRichard Henderson 
168ae08860SRichard Henderson 
1711552bb0SRichard Henderson static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
1811552bb0SRichard Henderson                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
19c23f08a5SRichard Henderson                                bool is_secure, bool s1_is_el0,
20c23f08a5SRichard Henderson                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
2111552bb0SRichard Henderson     __attribute__((nonnull));
2211552bb0SRichard Henderson 
231c73d848SRichard Henderson /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
241c73d848SRichard Henderson static const uint8_t pamax_map[] = {
251c73d848SRichard Henderson     [0] = 32,
261c73d848SRichard Henderson     [1] = 36,
271c73d848SRichard Henderson     [2] = 40,
281c73d848SRichard Henderson     [3] = 42,
291c73d848SRichard Henderson     [4] = 44,
301c73d848SRichard Henderson     [5] = 48,
311c73d848SRichard Henderson     [6] = 52,
321c73d848SRichard Henderson };
331c73d848SRichard Henderson 
341c73d848SRichard Henderson /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
351c73d848SRichard Henderson unsigned int arm_pamax(ARMCPU *cpu)
361c73d848SRichard Henderson {
3722536b13SRichard Henderson     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
381c73d848SRichard Henderson         unsigned int parange =
391c73d848SRichard Henderson             FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
401c73d848SRichard Henderson 
411c73d848SRichard Henderson         /*
421c73d848SRichard Henderson          * id_aa64mmfr0 is a read-only register so values outside of the
431c73d848SRichard Henderson          * supported mappings can be considered an implementation error.
441c73d848SRichard Henderson          */
451c73d848SRichard Henderson         assert(parange < ARRAY_SIZE(pamax_map));
461c73d848SRichard Henderson         return pamax_map[parange];
471c73d848SRichard Henderson     }
4859e1b8a2SRichard Henderson 
4959e1b8a2SRichard Henderson     /*
5059e1b8a2SRichard Henderson      * In machvirt_init, we call arm_pamax on a cpu that is not fully
5159e1b8a2SRichard Henderson      * initialized, so we can't rely on the propagation done in realize.
5259e1b8a2SRichard Henderson      */
5359e1b8a2SRichard Henderson     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE) ||
5459e1b8a2SRichard Henderson         arm_feature(&cpu->env, ARM_FEATURE_V7VE)) {
5522536b13SRichard Henderson         /* v7 with LPAE */
5622536b13SRichard Henderson         return 40;
5722536b13SRichard Henderson     }
5822536b13SRichard Henderson     /* Anything else */
5922536b13SRichard Henderson     return 32;
6022536b13SRichard Henderson }
611c73d848SRichard Henderson 
621d261255SRichard Henderson /*
631d261255SRichard Henderson  * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
641d261255SRichard Henderson  */
651d261255SRichard Henderson ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
661d261255SRichard Henderson {
671d261255SRichard Henderson     switch (mmu_idx) {
681d261255SRichard Henderson     case ARMMMUIdx_E10_0:
691d261255SRichard Henderson         return ARMMMUIdx_Stage1_E0;
701d261255SRichard Henderson     case ARMMMUIdx_E10_1:
711d261255SRichard Henderson         return ARMMMUIdx_Stage1_E1;
721d261255SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
731d261255SRichard Henderson         return ARMMMUIdx_Stage1_E1_PAN;
741d261255SRichard Henderson     default:
751d261255SRichard Henderson         return mmu_idx;
761d261255SRichard Henderson     }
771d261255SRichard Henderson }
781d261255SRichard Henderson 
791d261255SRichard Henderson ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
801d261255SRichard Henderson {
811d261255SRichard Henderson     return stage_1_mmu_idx(arm_mmu_idx(env));
821d261255SRichard Henderson }
831d261255SRichard Henderson 
8411552bb0SRichard Henderson static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
8511552bb0SRichard Henderson {
8611552bb0SRichard Henderson     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
8711552bb0SRichard Henderson }
8811552bb0SRichard Henderson 
890c23d56fSRichard Henderson static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
900c23d56fSRichard Henderson {
910c23d56fSRichard Henderson     switch (mmu_idx) {
920c23d56fSRichard Henderson     case ARMMMUIdx_E20_0:
930c23d56fSRichard Henderson     case ARMMMUIdx_Stage1_E0:
940c23d56fSRichard Henderson     case ARMMMUIdx_MUser:
950c23d56fSRichard Henderson     case ARMMMUIdx_MSUser:
960c23d56fSRichard Henderson     case ARMMMUIdx_MUserNegPri:
970c23d56fSRichard Henderson     case ARMMMUIdx_MSUserNegPri:
980c23d56fSRichard Henderson         return true;
990c23d56fSRichard Henderson     default:
1000c23d56fSRichard Henderson         return false;
1010c23d56fSRichard Henderson     case ARMMMUIdx_E10_0:
1020c23d56fSRichard Henderson     case ARMMMUIdx_E10_1:
1030c23d56fSRichard Henderson     case ARMMMUIdx_E10_1_PAN:
1040c23d56fSRichard Henderson         g_assert_not_reached();
1050c23d56fSRichard Henderson     }
1060c23d56fSRichard Henderson }
1070c23d56fSRichard Henderson 
1083b318aaeSRichard Henderson /* Return the TTBR associated with this translation regime */
1093b318aaeSRichard Henderson static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
1103b318aaeSRichard Henderson {
1113b318aaeSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2) {
1123b318aaeSRichard Henderson         return env->cp15.vttbr_el2;
1133b318aaeSRichard Henderson     }
1143b318aaeSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2_S) {
1153b318aaeSRichard Henderson         return env->cp15.vsttbr_el2;
1163b318aaeSRichard Henderson     }
1173b318aaeSRichard Henderson     if (ttbrn == 0) {
1183b318aaeSRichard Henderson         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
1193b318aaeSRichard Henderson     } else {
1203b318aaeSRichard Henderson         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
1213b318aaeSRichard Henderson     }
1223b318aaeSRichard Henderson }
1233b318aaeSRichard Henderson 
1248db1a3a0SRichard Henderson /* Return true if the specified stage of address translation is disabled */
1257e80c0a4SRichard Henderson static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
1267e80c0a4SRichard Henderson                                         bool is_secure)
1278db1a3a0SRichard Henderson {
1288db1a3a0SRichard Henderson     uint64_t hcr_el2;
1298db1a3a0SRichard Henderson 
1308db1a3a0SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M)) {
1317e80c0a4SRichard Henderson         switch (env->v7m.mpu_ctrl[is_secure] &
1328db1a3a0SRichard Henderson                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
1338db1a3a0SRichard Henderson         case R_V7M_MPU_CTRL_ENABLE_MASK:
1348db1a3a0SRichard Henderson             /* Enabled, but not for HardFault and NMI */
1358db1a3a0SRichard Henderson             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
1368db1a3a0SRichard Henderson         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
1378db1a3a0SRichard Henderson             /* Enabled for all cases */
1388db1a3a0SRichard Henderson             return false;
1398db1a3a0SRichard Henderson         case 0:
1408db1a3a0SRichard Henderson         default:
1418db1a3a0SRichard Henderson             /*
1428db1a3a0SRichard Henderson              * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
1438db1a3a0SRichard Henderson              * we warned about that in armv7m_nvic.c when the guest set it.
1448db1a3a0SRichard Henderson              */
1458db1a3a0SRichard Henderson             return true;
1468db1a3a0SRichard Henderson         }
1478db1a3a0SRichard Henderson     }
1488db1a3a0SRichard Henderson 
1492189c798SRichard Henderson     hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
1508db1a3a0SRichard Henderson 
1513b2af993SRichard Henderson     switch (mmu_idx) {
1523b2af993SRichard Henderson     case ARMMMUIdx_Stage2:
1533b2af993SRichard Henderson     case ARMMMUIdx_Stage2_S:
1548db1a3a0SRichard Henderson         /* HCR.DC means HCR.VM behaves as 1 */
1558db1a3a0SRichard Henderson         return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
1568db1a3a0SRichard Henderson 
1573b2af993SRichard Henderson     case ARMMMUIdx_E10_0:
1583b2af993SRichard Henderson     case ARMMMUIdx_E10_1:
1593b2af993SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
160fdf12933SRichard Henderson         /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
161fdf12933SRichard Henderson         if (hcr_el2 & HCR_TGE) {
1628db1a3a0SRichard Henderson             return true;
1638db1a3a0SRichard Henderson         }
1643b2af993SRichard Henderson         break;
1658db1a3a0SRichard Henderson 
1663b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E0:
1673b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E1:
1683b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
1698db1a3a0SRichard Henderson         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
1703b2af993SRichard Henderson         if (hcr_el2 & HCR_DC) {
1718db1a3a0SRichard Henderson             return true;
1728db1a3a0SRichard Henderson         }
1733b2af993SRichard Henderson         break;
1743b2af993SRichard Henderson 
1753b2af993SRichard Henderson     case ARMMMUIdx_E20_0:
1763b2af993SRichard Henderson     case ARMMMUIdx_E20_2:
1773b2af993SRichard Henderson     case ARMMMUIdx_E20_2_PAN:
1783b2af993SRichard Henderson     case ARMMMUIdx_E2:
1793b2af993SRichard Henderson     case ARMMMUIdx_E3:
1803b2af993SRichard Henderson         break;
1813b2af993SRichard Henderson 
1823b2af993SRichard Henderson     default:
1833b2af993SRichard Henderson         g_assert_not_reached();
1843b2af993SRichard Henderson     }
1858db1a3a0SRichard Henderson 
1868db1a3a0SRichard Henderson     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
1878db1a3a0SRichard Henderson }
1888db1a3a0SRichard Henderson 
189ac76c2e5SRichard Henderson static bool ptw_attrs_are_device(uint64_t hcr, ARMCacheAttrs cacheattrs)
19011552bb0SRichard Henderson {
19111552bb0SRichard Henderson     /*
19211552bb0SRichard Henderson      * For an S1 page table walk, the stage 1 attributes are always
19311552bb0SRichard Henderson      * some form of "this is Normal memory". The combined S1+S2
19411552bb0SRichard Henderson      * attributes are therefore only Device if stage 2 specifies Device.
19511552bb0SRichard Henderson      * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
19611552bb0SRichard Henderson      * ie when cacheattrs.attrs bits [3:2] are 0b00.
19711552bb0SRichard Henderson      * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
19811552bb0SRichard Henderson      * when cacheattrs.attrs bit [2] is 0.
19911552bb0SRichard Henderson      */
20011552bb0SRichard Henderson     assert(cacheattrs.is_s2_format);
201ac76c2e5SRichard Henderson     if (hcr & HCR_FWB) {
20211552bb0SRichard Henderson         return (cacheattrs.attrs & 0x4) == 0;
20311552bb0SRichard Henderson     } else {
20411552bb0SRichard Henderson         return (cacheattrs.attrs & 0xc) == 0;
20511552bb0SRichard Henderson     }
20611552bb0SRichard Henderson }
20711552bb0SRichard Henderson 
20811552bb0SRichard Henderson /* Translate a S1 pagetable walk through S2 if needed.  */
20911552bb0SRichard Henderson static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
210ab1f7885SRichard Henderson                                hwaddr addr, bool *is_secure_ptr,
21111552bb0SRichard Henderson                                ARMMMUFaultInfo *fi)
21211552bb0SRichard Henderson {
213ab1f7885SRichard Henderson     bool is_secure = *is_secure_ptr;
214ab1f7885SRichard Henderson     ARMMMUIdx s2_mmu_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
215bf25b7b0SRichard Henderson 
21611552bb0SRichard Henderson     if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
217ab1f7885SRichard Henderson         !regime_translation_disabled(env, s2_mmu_idx, is_secure)) {
21803ee9bbeSRichard Henderson         GetPhysAddrResult s2 = {};
219ac76c2e5SRichard Henderson         uint64_t hcr;
22003ee9bbeSRichard Henderson         int ret;
22111552bb0SRichard Henderson 
222c23f08a5SRichard Henderson         ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx,
223ab1f7885SRichard Henderson                                  is_secure, false, &s2, fi);
22411552bb0SRichard Henderson         if (ret) {
22511552bb0SRichard Henderson             assert(fi->type != ARMFault_None);
22611552bb0SRichard Henderson             fi->s2addr = addr;
22711552bb0SRichard Henderson             fi->stage2 = true;
22811552bb0SRichard Henderson             fi->s1ptw = true;
229ab1f7885SRichard Henderson             fi->s1ns = !is_secure;
23011552bb0SRichard Henderson             return ~0;
23111552bb0SRichard Henderson         }
232ac76c2e5SRichard Henderson 
2332189c798SRichard Henderson         hcr = arm_hcr_el2_eff_secstate(env, is_secure);
234ac76c2e5SRichard Henderson         if ((hcr & HCR_PTW) && ptw_attrs_are_device(hcr, s2.cacheattrs)) {
23511552bb0SRichard Henderson             /*
23611552bb0SRichard Henderson              * PTW set and S1 walk touched S2 Device memory:
23711552bb0SRichard Henderson              * generate Permission fault.
23811552bb0SRichard Henderson              */
23911552bb0SRichard Henderson             fi->type = ARMFault_Permission;
24011552bb0SRichard Henderson             fi->s2addr = addr;
24111552bb0SRichard Henderson             fi->stage2 = true;
24211552bb0SRichard Henderson             fi->s1ptw = true;
243ab1f7885SRichard Henderson             fi->s1ns = !is_secure;
24411552bb0SRichard Henderson             return ~0;
24511552bb0SRichard Henderson         }
24611552bb0SRichard Henderson 
24711552bb0SRichard Henderson         if (arm_is_secure_below_el3(env)) {
24811552bb0SRichard Henderson             /* Check if page table walk is to secure or non-secure PA space. */
249ab1f7885SRichard Henderson             if (is_secure) {
250ab1f7885SRichard Henderson                 is_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
25111552bb0SRichard Henderson             } else {
252ab1f7885SRichard Henderson                 is_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
25311552bb0SRichard Henderson             }
254ab1f7885SRichard Henderson             *is_secure_ptr = is_secure;
25511552bb0SRichard Henderson         } else {
256ab1f7885SRichard Henderson             assert(!is_secure);
25711552bb0SRichard Henderson         }
25811552bb0SRichard Henderson 
2597fa7ea8fSRichard Henderson         addr = s2.f.phys_addr;
26011552bb0SRichard Henderson     }
26111552bb0SRichard Henderson     return addr;
26211552bb0SRichard Henderson }
26311552bb0SRichard Henderson 
26411552bb0SRichard Henderson /* All loads done in the course of a page table walk go through here. */
2655e79887bSRichard Henderson static uint32_t arm_ldl_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
26611552bb0SRichard Henderson                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
26711552bb0SRichard Henderson {
2685e79887bSRichard Henderson     CPUState *cs = env_cpu(env);
26911552bb0SRichard Henderson     MemTxAttrs attrs = {};
27011552bb0SRichard Henderson     MemTxResult result = MEMTX_OK;
27111552bb0SRichard Henderson     AddressSpace *as;
27211552bb0SRichard Henderson     uint32_t data;
27311552bb0SRichard Henderson 
27411552bb0SRichard Henderson     addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
27511552bb0SRichard Henderson     attrs.secure = is_secure;
27611552bb0SRichard Henderson     as = arm_addressspace(cs, attrs);
27711552bb0SRichard Henderson     if (fi->s1ptw) {
27811552bb0SRichard Henderson         return 0;
27911552bb0SRichard Henderson     }
28011552bb0SRichard Henderson     if (regime_translation_big_endian(env, mmu_idx)) {
28111552bb0SRichard Henderson         data = address_space_ldl_be(as, addr, attrs, &result);
28211552bb0SRichard Henderson     } else {
28311552bb0SRichard Henderson         data = address_space_ldl_le(as, addr, attrs, &result);
28411552bb0SRichard Henderson     }
28511552bb0SRichard Henderson     if (result == MEMTX_OK) {
28611552bb0SRichard Henderson         return data;
28711552bb0SRichard Henderson     }
28811552bb0SRichard Henderson     fi->type = ARMFault_SyncExternalOnWalk;
28911552bb0SRichard Henderson     fi->ea = arm_extabort_type(result);
29011552bb0SRichard Henderson     return 0;
29111552bb0SRichard Henderson }
29211552bb0SRichard Henderson 
2935e79887bSRichard Henderson static uint64_t arm_ldq_ptw(CPUARMState *env, hwaddr addr, bool is_secure,
29411552bb0SRichard Henderson                             ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
29511552bb0SRichard Henderson {
2965e79887bSRichard Henderson     CPUState *cs = env_cpu(env);
29711552bb0SRichard Henderson     MemTxAttrs attrs = {};
29811552bb0SRichard Henderson     MemTxResult result = MEMTX_OK;
29911552bb0SRichard Henderson     AddressSpace *as;
30011552bb0SRichard Henderson     uint64_t data;
30111552bb0SRichard Henderson 
30211552bb0SRichard Henderson     addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi);
30311552bb0SRichard Henderson     attrs.secure = is_secure;
30411552bb0SRichard Henderson     as = arm_addressspace(cs, attrs);
30511552bb0SRichard Henderson     if (fi->s1ptw) {
30611552bb0SRichard Henderson         return 0;
30711552bb0SRichard Henderson     }
30811552bb0SRichard Henderson     if (regime_translation_big_endian(env, mmu_idx)) {
30911552bb0SRichard Henderson         data = address_space_ldq_be(as, addr, attrs, &result);
31011552bb0SRichard Henderson     } else {
31111552bb0SRichard Henderson         data = address_space_ldq_le(as, addr, attrs, &result);
31211552bb0SRichard Henderson     }
31311552bb0SRichard Henderson     if (result == MEMTX_OK) {
31411552bb0SRichard Henderson         return data;
31511552bb0SRichard Henderson     }
31611552bb0SRichard Henderson     fi->type = ARMFault_SyncExternalOnWalk;
31711552bb0SRichard Henderson     fi->ea = arm_extabort_type(result);
31811552bb0SRichard Henderson     return 0;
31911552bb0SRichard Henderson }
32011552bb0SRichard Henderson 
3214c74ab15SRichard Henderson static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
3224c74ab15SRichard Henderson                                      uint32_t *table, uint32_t address)
3234c74ab15SRichard Henderson {
3244c74ab15SRichard Henderson     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
325c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
3269e70e26cSPeter Maydell     int maskshift = extract32(tcr, 0, 3);
3279e70e26cSPeter Maydell     uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3289e70e26cSPeter Maydell     uint32_t base_mask;
3294c74ab15SRichard Henderson 
3309e70e26cSPeter Maydell     if (address & mask) {
3319e70e26cSPeter Maydell         if (tcr & TTBCR_PD1) {
3324c74ab15SRichard Henderson             /* Translation table walk disabled for TTBR1 */
3334c74ab15SRichard Henderson             return false;
3344c74ab15SRichard Henderson         }
3354c74ab15SRichard Henderson         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
3364c74ab15SRichard Henderson     } else {
3379e70e26cSPeter Maydell         if (tcr & TTBCR_PD0) {
3384c74ab15SRichard Henderson             /* Translation table walk disabled for TTBR0 */
3394c74ab15SRichard Henderson             return false;
3404c74ab15SRichard Henderson         }
3419e70e26cSPeter Maydell         base_mask = ~((uint32_t)0x3fffu >> maskshift);
3429e70e26cSPeter Maydell         *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
3434c74ab15SRichard Henderson     }
3444c74ab15SRichard Henderson     *table |= (address >> 18) & 0x3ffc;
3454c74ab15SRichard Henderson     return true;
3464c74ab15SRichard Henderson }
3474c74ab15SRichard Henderson 
3484845d3beSRichard Henderson /*
3494845d3beSRichard Henderson  * Translate section/page access permissions to page R/W protection flags
3504845d3beSRichard Henderson  * @env:         CPUARMState
3514845d3beSRichard Henderson  * @mmu_idx:     MMU index indicating required translation regime
3524845d3beSRichard Henderson  * @ap:          The 3-bit access permissions (AP[2:0])
3534845d3beSRichard Henderson  * @domain_prot: The 2-bit domain access permissions
3544845d3beSRichard Henderson  */
3554845d3beSRichard Henderson static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
3564845d3beSRichard Henderson                          int ap, int domain_prot)
3574845d3beSRichard Henderson {
3584845d3beSRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
3594845d3beSRichard Henderson 
3604845d3beSRichard Henderson     if (domain_prot == 3) {
3614845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
3624845d3beSRichard Henderson     }
3634845d3beSRichard Henderson 
3644845d3beSRichard Henderson     switch (ap) {
3654845d3beSRichard Henderson     case 0:
3664845d3beSRichard Henderson         if (arm_feature(env, ARM_FEATURE_V7)) {
3674845d3beSRichard Henderson             return 0;
3684845d3beSRichard Henderson         }
3694845d3beSRichard Henderson         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
3704845d3beSRichard Henderson         case SCTLR_S:
3714845d3beSRichard Henderson             return is_user ? 0 : PAGE_READ;
3724845d3beSRichard Henderson         case SCTLR_R:
3734845d3beSRichard Henderson             return PAGE_READ;
3744845d3beSRichard Henderson         default:
3754845d3beSRichard Henderson             return 0;
3764845d3beSRichard Henderson         }
3774845d3beSRichard Henderson     case 1:
3784845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
3794845d3beSRichard Henderson     case 2:
3804845d3beSRichard Henderson         if (is_user) {
3814845d3beSRichard Henderson             return PAGE_READ;
3824845d3beSRichard Henderson         } else {
3834845d3beSRichard Henderson             return PAGE_READ | PAGE_WRITE;
3844845d3beSRichard Henderson         }
3854845d3beSRichard Henderson     case 3:
3864845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
3874845d3beSRichard Henderson     case 4: /* Reserved.  */
3884845d3beSRichard Henderson         return 0;
3894845d3beSRichard Henderson     case 5:
3904845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ;
3914845d3beSRichard Henderson     case 6:
3924845d3beSRichard Henderson         return PAGE_READ;
3934845d3beSRichard Henderson     case 7:
3944845d3beSRichard Henderson         if (!arm_feature(env, ARM_FEATURE_V6K)) {
3954845d3beSRichard Henderson             return 0;
3964845d3beSRichard Henderson         }
3974845d3beSRichard Henderson         return PAGE_READ;
3984845d3beSRichard Henderson     default:
3994845d3beSRichard Henderson         g_assert_not_reached();
4004845d3beSRichard Henderson     }
4014845d3beSRichard Henderson }
4024845d3beSRichard Henderson 
4034845d3beSRichard Henderson /*
4044845d3beSRichard Henderson  * Translate section/page access permissions to page R/W protection flags.
4054845d3beSRichard Henderson  * @ap:      The 2-bit simple AP (AP[2:1])
4064845d3beSRichard Henderson  * @is_user: TRUE if accessing from PL0
4074845d3beSRichard Henderson  */
4084845d3beSRichard Henderson static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
4094845d3beSRichard Henderson {
4104845d3beSRichard Henderson     switch (ap) {
4114845d3beSRichard Henderson     case 0:
4124845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
4134845d3beSRichard Henderson     case 1:
4144845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
4154845d3beSRichard Henderson     case 2:
4164845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ;
4174845d3beSRichard Henderson     case 3:
4184845d3beSRichard Henderson         return PAGE_READ;
4194845d3beSRichard Henderson     default:
4204845d3beSRichard Henderson         g_assert_not_reached();
4214845d3beSRichard Henderson     }
4224845d3beSRichard Henderson }
4234845d3beSRichard Henderson 
4244845d3beSRichard Henderson static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
4254845d3beSRichard Henderson {
4264845d3beSRichard Henderson     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
4274845d3beSRichard Henderson }
4284845d3beSRichard Henderson 
429f2d2f5ceSRichard Henderson static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
430f2d2f5ceSRichard Henderson                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
431b29c85d5SRichard Henderson                              bool is_secure, GetPhysAddrResult *result,
432b29c85d5SRichard Henderson                              ARMMMUFaultInfo *fi)
433f2d2f5ceSRichard Henderson {
434f2d2f5ceSRichard Henderson     int level = 1;
435f2d2f5ceSRichard Henderson     uint32_t table;
436f2d2f5ceSRichard Henderson     uint32_t desc;
437f2d2f5ceSRichard Henderson     int type;
438f2d2f5ceSRichard Henderson     int ap;
439f2d2f5ceSRichard Henderson     int domain = 0;
440f2d2f5ceSRichard Henderson     int domain_prot;
441f2d2f5ceSRichard Henderson     hwaddr phys_addr;
442f2d2f5ceSRichard Henderson     uint32_t dacr;
443f2d2f5ceSRichard Henderson 
444f2d2f5ceSRichard Henderson     /* Pagetable walk.  */
445f2d2f5ceSRichard Henderson     /* Lookup l1 descriptor.  */
446f2d2f5ceSRichard Henderson     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
447f2d2f5ceSRichard Henderson         /* Section translation fault if page walk is disabled by PD0 or PD1 */
448f2d2f5ceSRichard Henderson         fi->type = ARMFault_Translation;
449f2d2f5ceSRichard Henderson         goto do_fault;
450f2d2f5ceSRichard Henderson     }
451b29c85d5SRichard Henderson     desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
452f2d2f5ceSRichard Henderson     if (fi->type != ARMFault_None) {
453f2d2f5ceSRichard Henderson         goto do_fault;
454f2d2f5ceSRichard Henderson     }
455f2d2f5ceSRichard Henderson     type = (desc & 3);
456f2d2f5ceSRichard Henderson     domain = (desc >> 5) & 0x0f;
457f2d2f5ceSRichard Henderson     if (regime_el(env, mmu_idx) == 1) {
458f2d2f5ceSRichard Henderson         dacr = env->cp15.dacr_ns;
459f2d2f5ceSRichard Henderson     } else {
460f2d2f5ceSRichard Henderson         dacr = env->cp15.dacr_s;
461f2d2f5ceSRichard Henderson     }
462f2d2f5ceSRichard Henderson     domain_prot = (dacr >> (domain * 2)) & 3;
463f2d2f5ceSRichard Henderson     if (type == 0) {
464f2d2f5ceSRichard Henderson         /* Section translation fault.  */
465f2d2f5ceSRichard Henderson         fi->type = ARMFault_Translation;
466f2d2f5ceSRichard Henderson         goto do_fault;
467f2d2f5ceSRichard Henderson     }
468f2d2f5ceSRichard Henderson     if (type != 2) {
469f2d2f5ceSRichard Henderson         level = 2;
470f2d2f5ceSRichard Henderson     }
471f2d2f5ceSRichard Henderson     if (domain_prot == 0 || domain_prot == 2) {
472f2d2f5ceSRichard Henderson         fi->type = ARMFault_Domain;
473f2d2f5ceSRichard Henderson         goto do_fault;
474f2d2f5ceSRichard Henderson     }
475f2d2f5ceSRichard Henderson     if (type == 2) {
476f2d2f5ceSRichard Henderson         /* 1Mb section.  */
477f2d2f5ceSRichard Henderson         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
478f2d2f5ceSRichard Henderson         ap = (desc >> 10) & 3;
4797fa7ea8fSRichard Henderson         result->f.lg_page_size = 20; /* 1MB */
480f2d2f5ceSRichard Henderson     } else {
481f2d2f5ceSRichard Henderson         /* Lookup l2 entry.  */
482f2d2f5ceSRichard Henderson         if (type == 1) {
483f2d2f5ceSRichard Henderson             /* Coarse pagetable.  */
484f2d2f5ceSRichard Henderson             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
485f2d2f5ceSRichard Henderson         } else {
486f2d2f5ceSRichard Henderson             /* Fine pagetable.  */
487f2d2f5ceSRichard Henderson             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
488f2d2f5ceSRichard Henderson         }
489b29c85d5SRichard Henderson         desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
490f2d2f5ceSRichard Henderson         if (fi->type != ARMFault_None) {
491f2d2f5ceSRichard Henderson             goto do_fault;
492f2d2f5ceSRichard Henderson         }
493f2d2f5ceSRichard Henderson         switch (desc & 3) {
494f2d2f5ceSRichard Henderson         case 0: /* Page translation fault.  */
495f2d2f5ceSRichard Henderson             fi->type = ARMFault_Translation;
496f2d2f5ceSRichard Henderson             goto do_fault;
497f2d2f5ceSRichard Henderson         case 1: /* 64k page.  */
498f2d2f5ceSRichard Henderson             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
499f2d2f5ceSRichard Henderson             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
5007fa7ea8fSRichard Henderson             result->f.lg_page_size = 16;
501f2d2f5ceSRichard Henderson             break;
502f2d2f5ceSRichard Henderson         case 2: /* 4k page.  */
503f2d2f5ceSRichard Henderson             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
504f2d2f5ceSRichard Henderson             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
5057fa7ea8fSRichard Henderson             result->f.lg_page_size = 12;
506f2d2f5ceSRichard Henderson             break;
507f2d2f5ceSRichard Henderson         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
508f2d2f5ceSRichard Henderson             if (type == 1) {
509f2d2f5ceSRichard Henderson                 /* ARMv6/XScale extended small page format */
510f2d2f5ceSRichard Henderson                 if (arm_feature(env, ARM_FEATURE_XSCALE)
511f2d2f5ceSRichard Henderson                     || arm_feature(env, ARM_FEATURE_V6)) {
512f2d2f5ceSRichard Henderson                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
5137fa7ea8fSRichard Henderson                     result->f.lg_page_size = 12;
514f2d2f5ceSRichard Henderson                 } else {
515f2d2f5ceSRichard Henderson                     /*
516f2d2f5ceSRichard Henderson                      * UNPREDICTABLE in ARMv5; we choose to take a
517f2d2f5ceSRichard Henderson                      * page translation fault.
518f2d2f5ceSRichard Henderson                      */
519f2d2f5ceSRichard Henderson                     fi->type = ARMFault_Translation;
520f2d2f5ceSRichard Henderson                     goto do_fault;
521f2d2f5ceSRichard Henderson                 }
522f2d2f5ceSRichard Henderson             } else {
523f2d2f5ceSRichard Henderson                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
5247fa7ea8fSRichard Henderson                 result->f.lg_page_size = 10;
525f2d2f5ceSRichard Henderson             }
526f2d2f5ceSRichard Henderson             ap = (desc >> 4) & 3;
527f2d2f5ceSRichard Henderson             break;
528f2d2f5ceSRichard Henderson         default:
529f2d2f5ceSRichard Henderson             /* Never happens, but compiler isn't smart enough to tell.  */
530f2d2f5ceSRichard Henderson             g_assert_not_reached();
531f2d2f5ceSRichard Henderson         }
532f2d2f5ceSRichard Henderson     }
5337fa7ea8fSRichard Henderson     result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
5347fa7ea8fSRichard Henderson     result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
5357fa7ea8fSRichard Henderson     if (!(result->f.prot & (1 << access_type))) {
536f2d2f5ceSRichard Henderson         /* Access permission fault.  */
537f2d2f5ceSRichard Henderson         fi->type = ARMFault_Permission;
538f2d2f5ceSRichard Henderson         goto do_fault;
539f2d2f5ceSRichard Henderson     }
5407fa7ea8fSRichard Henderson     result->f.phys_addr = phys_addr;
541f2d2f5ceSRichard Henderson     return false;
542f2d2f5ceSRichard Henderson do_fault:
543f2d2f5ceSRichard Henderson     fi->domain = domain;
544f2d2f5ceSRichard Henderson     fi->level = level;
545f2d2f5ceSRichard Henderson     return true;
546f2d2f5ceSRichard Henderson }
547f2d2f5ceSRichard Henderson 
54853c038efSRichard Henderson static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
54953c038efSRichard Henderson                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
55071e73bebSRichard Henderson                              bool is_secure, GetPhysAddrResult *result,
55171e73bebSRichard Henderson                              ARMMMUFaultInfo *fi)
55253c038efSRichard Henderson {
55353c038efSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
55453c038efSRichard Henderson     int level = 1;
55553c038efSRichard Henderson     uint32_t table;
55653c038efSRichard Henderson     uint32_t desc;
55753c038efSRichard Henderson     uint32_t xn;
55853c038efSRichard Henderson     uint32_t pxn = 0;
55953c038efSRichard Henderson     int type;
56053c038efSRichard Henderson     int ap;
56153c038efSRichard Henderson     int domain = 0;
56253c038efSRichard Henderson     int domain_prot;
56353c038efSRichard Henderson     hwaddr phys_addr;
56453c038efSRichard Henderson     uint32_t dacr;
56553c038efSRichard Henderson     bool ns;
56653c038efSRichard Henderson 
56753c038efSRichard Henderson     /* Pagetable walk.  */
56853c038efSRichard Henderson     /* Lookup l1 descriptor.  */
56953c038efSRichard Henderson     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
57053c038efSRichard Henderson         /* Section translation fault if page walk is disabled by PD0 or PD1 */
57153c038efSRichard Henderson         fi->type = ARMFault_Translation;
57253c038efSRichard Henderson         goto do_fault;
57353c038efSRichard Henderson     }
57471e73bebSRichard Henderson     desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
57553c038efSRichard Henderson     if (fi->type != ARMFault_None) {
57653c038efSRichard Henderson         goto do_fault;
57753c038efSRichard Henderson     }
57853c038efSRichard Henderson     type = (desc & 3);
57953c038efSRichard Henderson     if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
58053c038efSRichard Henderson         /* Section translation fault, or attempt to use the encoding
58153c038efSRichard Henderson          * which is Reserved on implementations without PXN.
58253c038efSRichard Henderson          */
58353c038efSRichard Henderson         fi->type = ARMFault_Translation;
58453c038efSRichard Henderson         goto do_fault;
58553c038efSRichard Henderson     }
58653c038efSRichard Henderson     if ((type == 1) || !(desc & (1 << 18))) {
58753c038efSRichard Henderson         /* Page or Section.  */
58853c038efSRichard Henderson         domain = (desc >> 5) & 0x0f;
58953c038efSRichard Henderson     }
59053c038efSRichard Henderson     if (regime_el(env, mmu_idx) == 1) {
59153c038efSRichard Henderson         dacr = env->cp15.dacr_ns;
59253c038efSRichard Henderson     } else {
59353c038efSRichard Henderson         dacr = env->cp15.dacr_s;
59453c038efSRichard Henderson     }
59553c038efSRichard Henderson     if (type == 1) {
59653c038efSRichard Henderson         level = 2;
59753c038efSRichard Henderson     }
59853c038efSRichard Henderson     domain_prot = (dacr >> (domain * 2)) & 3;
59953c038efSRichard Henderson     if (domain_prot == 0 || domain_prot == 2) {
60053c038efSRichard Henderson         /* Section or Page domain fault */
60153c038efSRichard Henderson         fi->type = ARMFault_Domain;
60253c038efSRichard Henderson         goto do_fault;
60353c038efSRichard Henderson     }
60453c038efSRichard Henderson     if (type != 1) {
60553c038efSRichard Henderson         if (desc & (1 << 18)) {
60653c038efSRichard Henderson             /* Supersection.  */
60753c038efSRichard Henderson             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
60853c038efSRichard Henderson             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
60953c038efSRichard Henderson             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
6107fa7ea8fSRichard Henderson             result->f.lg_page_size = 24;  /* 16MB */
61153c038efSRichard Henderson         } else {
61253c038efSRichard Henderson             /* Section.  */
61353c038efSRichard Henderson             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
6147fa7ea8fSRichard Henderson             result->f.lg_page_size = 20;  /* 1MB */
61553c038efSRichard Henderson         }
61653c038efSRichard Henderson         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
61753c038efSRichard Henderson         xn = desc & (1 << 4);
61853c038efSRichard Henderson         pxn = desc & 1;
61953c038efSRichard Henderson         ns = extract32(desc, 19, 1);
62053c038efSRichard Henderson     } else {
62153c038efSRichard Henderson         if (cpu_isar_feature(aa32_pxn, cpu)) {
62253c038efSRichard Henderson             pxn = (desc >> 2) & 1;
62353c038efSRichard Henderson         }
62453c038efSRichard Henderson         ns = extract32(desc, 3, 1);
62553c038efSRichard Henderson         /* Lookup l2 entry.  */
62653c038efSRichard Henderson         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
62771e73bebSRichard Henderson         desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
62853c038efSRichard Henderson         if (fi->type != ARMFault_None) {
62953c038efSRichard Henderson             goto do_fault;
63053c038efSRichard Henderson         }
63153c038efSRichard Henderson         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
63253c038efSRichard Henderson         switch (desc & 3) {
63353c038efSRichard Henderson         case 0: /* Page translation fault.  */
63453c038efSRichard Henderson             fi->type = ARMFault_Translation;
63553c038efSRichard Henderson             goto do_fault;
63653c038efSRichard Henderson         case 1: /* 64k page.  */
63753c038efSRichard Henderson             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
63853c038efSRichard Henderson             xn = desc & (1 << 15);
6397fa7ea8fSRichard Henderson             result->f.lg_page_size = 16;
64053c038efSRichard Henderson             break;
64153c038efSRichard Henderson         case 2: case 3: /* 4k page.  */
64253c038efSRichard Henderson             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
64353c038efSRichard Henderson             xn = desc & 1;
6447fa7ea8fSRichard Henderson             result->f.lg_page_size = 12;
64553c038efSRichard Henderson             break;
64653c038efSRichard Henderson         default:
64753c038efSRichard Henderson             /* Never happens, but compiler isn't smart enough to tell.  */
64853c038efSRichard Henderson             g_assert_not_reached();
64953c038efSRichard Henderson         }
65053c038efSRichard Henderson     }
65153c038efSRichard Henderson     if (domain_prot == 3) {
6527fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
65353c038efSRichard Henderson     } else {
65453c038efSRichard Henderson         if (pxn && !regime_is_user(env, mmu_idx)) {
65553c038efSRichard Henderson             xn = 1;
65653c038efSRichard Henderson         }
65753c038efSRichard Henderson         if (xn && access_type == MMU_INST_FETCH) {
65853c038efSRichard Henderson             fi->type = ARMFault_Permission;
65953c038efSRichard Henderson             goto do_fault;
66053c038efSRichard Henderson         }
66153c038efSRichard Henderson 
66253c038efSRichard Henderson         if (arm_feature(env, ARM_FEATURE_V6K) &&
66353c038efSRichard Henderson                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
66453c038efSRichard Henderson             /* The simplified model uses AP[0] as an access control bit.  */
66553c038efSRichard Henderson             if ((ap & 1) == 0) {
66653c038efSRichard Henderson                 /* Access flag fault.  */
66753c038efSRichard Henderson                 fi->type = ARMFault_AccessFlag;
66853c038efSRichard Henderson                 goto do_fault;
66953c038efSRichard Henderson             }
6707fa7ea8fSRichard Henderson             result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
67153c038efSRichard Henderson         } else {
6727fa7ea8fSRichard Henderson             result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
67353c038efSRichard Henderson         }
6747fa7ea8fSRichard Henderson         if (result->f.prot && !xn) {
6757fa7ea8fSRichard Henderson             result->f.prot |= PAGE_EXEC;
67653c038efSRichard Henderson         }
6777fa7ea8fSRichard Henderson         if (!(result->f.prot & (1 << access_type))) {
67853c038efSRichard Henderson             /* Access permission fault.  */
67953c038efSRichard Henderson             fi->type = ARMFault_Permission;
68053c038efSRichard Henderson             goto do_fault;
68153c038efSRichard Henderson         }
68253c038efSRichard Henderson     }
68353c038efSRichard Henderson     if (ns) {
68453c038efSRichard Henderson         /* The NS bit will (as required by the architecture) have no effect if
68553c038efSRichard Henderson          * the CPU doesn't support TZ or this is a non-secure translation
68653c038efSRichard Henderson          * regime, because the attribute will already be non-secure.
68753c038efSRichard Henderson          */
6887fa7ea8fSRichard Henderson         result->f.attrs.secure = false;
68953c038efSRichard Henderson     }
6907fa7ea8fSRichard Henderson     result->f.phys_addr = phys_addr;
69153c038efSRichard Henderson     return false;
69253c038efSRichard Henderson do_fault:
69353c038efSRichard Henderson     fi->domain = domain;
69453c038efSRichard Henderson     fi->level = level;
69553c038efSRichard Henderson     return true;
69653c038efSRichard Henderson }
69753c038efSRichard Henderson 
698f8526edcSRichard Henderson /*
699f8526edcSRichard Henderson  * Translate S2 section/page access permissions to protection flags
700f8526edcSRichard Henderson  * @env:     CPUARMState
701f8526edcSRichard Henderson  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
702f8526edcSRichard Henderson  * @xn:      XN (execute-never) bits
703f8526edcSRichard Henderson  * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
704f8526edcSRichard Henderson  */
705f8526edcSRichard Henderson static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
706f8526edcSRichard Henderson {
707f8526edcSRichard Henderson     int prot = 0;
708f8526edcSRichard Henderson 
709f8526edcSRichard Henderson     if (s2ap & 1) {
710f8526edcSRichard Henderson         prot |= PAGE_READ;
711f8526edcSRichard Henderson     }
712f8526edcSRichard Henderson     if (s2ap & 2) {
713f8526edcSRichard Henderson         prot |= PAGE_WRITE;
714f8526edcSRichard Henderson     }
715f8526edcSRichard Henderson 
716f8526edcSRichard Henderson     if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
717f8526edcSRichard Henderson         switch (xn) {
718f8526edcSRichard Henderson         case 0:
719f8526edcSRichard Henderson             prot |= PAGE_EXEC;
720f8526edcSRichard Henderson             break;
721f8526edcSRichard Henderson         case 1:
722f8526edcSRichard Henderson             if (s1_is_el0) {
723f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
724f8526edcSRichard Henderson             }
725f8526edcSRichard Henderson             break;
726f8526edcSRichard Henderson         case 2:
727f8526edcSRichard Henderson             break;
728f8526edcSRichard Henderson         case 3:
729f8526edcSRichard Henderson             if (!s1_is_el0) {
730f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
731f8526edcSRichard Henderson             }
732f8526edcSRichard Henderson             break;
733f8526edcSRichard Henderson         default:
734f8526edcSRichard Henderson             g_assert_not_reached();
735f8526edcSRichard Henderson         }
736f8526edcSRichard Henderson     } else {
737f8526edcSRichard Henderson         if (!extract32(xn, 1, 1)) {
738f8526edcSRichard Henderson             if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
739f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
740f8526edcSRichard Henderson             }
741f8526edcSRichard Henderson         }
742f8526edcSRichard Henderson     }
743f8526edcSRichard Henderson     return prot;
744f8526edcSRichard Henderson }
745f8526edcSRichard Henderson 
746f8526edcSRichard Henderson /*
747f8526edcSRichard Henderson  * Translate section/page access permissions to protection flags
748f8526edcSRichard Henderson  * @env:     CPUARMState
749f8526edcSRichard Henderson  * @mmu_idx: MMU index indicating required translation regime
750f8526edcSRichard Henderson  * @is_aa64: TRUE if AArch64
751f8526edcSRichard Henderson  * @ap:      The 2-bit simple AP (AP[2:1])
752f8526edcSRichard Henderson  * @ns:      NS (non-secure) bit
753f8526edcSRichard Henderson  * @xn:      XN (execute-never) bit
754f8526edcSRichard Henderson  * @pxn:     PXN (privileged execute-never) bit
755f8526edcSRichard Henderson  */
756f8526edcSRichard Henderson static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
757f8526edcSRichard Henderson                       int ap, int ns, int xn, int pxn)
758f8526edcSRichard Henderson {
759f8526edcSRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
760f8526edcSRichard Henderson     int prot_rw, user_rw;
761f8526edcSRichard Henderson     bool have_wxn;
762f8526edcSRichard Henderson     int wxn = 0;
763f8526edcSRichard Henderson 
764f8526edcSRichard Henderson     assert(mmu_idx != ARMMMUIdx_Stage2);
765f8526edcSRichard Henderson     assert(mmu_idx != ARMMMUIdx_Stage2_S);
766f8526edcSRichard Henderson 
767f8526edcSRichard Henderson     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
768f8526edcSRichard Henderson     if (is_user) {
769f8526edcSRichard Henderson         prot_rw = user_rw;
770f8526edcSRichard Henderson     } else {
771f8526edcSRichard Henderson         if (user_rw && regime_is_pan(env, mmu_idx)) {
772f8526edcSRichard Henderson             /* PAN forbids data accesses but doesn't affect insn fetch */
773f8526edcSRichard Henderson             prot_rw = 0;
774f8526edcSRichard Henderson         } else {
775f8526edcSRichard Henderson             prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
776f8526edcSRichard Henderson         }
777f8526edcSRichard Henderson     }
778f8526edcSRichard Henderson 
779f8526edcSRichard Henderson     if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
780f8526edcSRichard Henderson         return prot_rw;
781f8526edcSRichard Henderson     }
782f8526edcSRichard Henderson 
783f8526edcSRichard Henderson     /* TODO have_wxn should be replaced with
784f8526edcSRichard Henderson      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
785f8526edcSRichard Henderson      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
786f8526edcSRichard Henderson      * compatible processors have EL2, which is required for [U]WXN.
787f8526edcSRichard Henderson      */
788f8526edcSRichard Henderson     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
789f8526edcSRichard Henderson 
790f8526edcSRichard Henderson     if (have_wxn) {
791f8526edcSRichard Henderson         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
792f8526edcSRichard Henderson     }
793f8526edcSRichard Henderson 
794f8526edcSRichard Henderson     if (is_aa64) {
795f8526edcSRichard Henderson         if (regime_has_2_ranges(mmu_idx) && !is_user) {
796f8526edcSRichard Henderson             xn = pxn || (user_rw & PAGE_WRITE);
797f8526edcSRichard Henderson         }
798f8526edcSRichard Henderson     } else if (arm_feature(env, ARM_FEATURE_V7)) {
799f8526edcSRichard Henderson         switch (regime_el(env, mmu_idx)) {
800f8526edcSRichard Henderson         case 1:
801f8526edcSRichard Henderson         case 3:
802f8526edcSRichard Henderson             if (is_user) {
803f8526edcSRichard Henderson                 xn = xn || !(user_rw & PAGE_READ);
804f8526edcSRichard Henderson             } else {
805f8526edcSRichard Henderson                 int uwxn = 0;
806f8526edcSRichard Henderson                 if (have_wxn) {
807f8526edcSRichard Henderson                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
808f8526edcSRichard Henderson                 }
809f8526edcSRichard Henderson                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
810f8526edcSRichard Henderson                      (uwxn && (user_rw & PAGE_WRITE));
811f8526edcSRichard Henderson             }
812f8526edcSRichard Henderson             break;
813f8526edcSRichard Henderson         case 2:
814f8526edcSRichard Henderson             break;
815f8526edcSRichard Henderson         }
816f8526edcSRichard Henderson     } else {
817f8526edcSRichard Henderson         xn = wxn = 0;
818f8526edcSRichard Henderson     }
819f8526edcSRichard Henderson 
820f8526edcSRichard Henderson     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
821f8526edcSRichard Henderson         return prot_rw;
822f8526edcSRichard Henderson     }
823f8526edcSRichard Henderson     return prot_rw | PAGE_EXEC;
824f8526edcSRichard Henderson }
825f8526edcSRichard Henderson 
8262f0ec92eSRichard Henderson static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
8272f0ec92eSRichard Henderson                                           ARMMMUIdx mmu_idx)
8282f0ec92eSRichard Henderson {
829c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
8302f0ec92eSRichard Henderson     uint32_t el = regime_el(env, mmu_idx);
8312f0ec92eSRichard Henderson     int select, tsz;
8322f0ec92eSRichard Henderson     bool epd, hpd;
8332f0ec92eSRichard Henderson 
8342f0ec92eSRichard Henderson     assert(mmu_idx != ARMMMUIdx_Stage2_S);
8352f0ec92eSRichard Henderson 
8362f0ec92eSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2) {
8372f0ec92eSRichard Henderson         /* VTCR */
8382f0ec92eSRichard Henderson         bool sext = extract32(tcr, 4, 1);
8392f0ec92eSRichard Henderson         bool sign = extract32(tcr, 3, 1);
8402f0ec92eSRichard Henderson 
8412f0ec92eSRichard Henderson         /*
8422f0ec92eSRichard Henderson          * If the sign-extend bit is not the same as t0sz[3], the result
8432f0ec92eSRichard Henderson          * is unpredictable. Flag this as a guest error.
8442f0ec92eSRichard Henderson          */
8452f0ec92eSRichard Henderson         if (sign != sext) {
8462f0ec92eSRichard Henderson             qemu_log_mask(LOG_GUEST_ERROR,
8472f0ec92eSRichard Henderson                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
8482f0ec92eSRichard Henderson         }
8492f0ec92eSRichard Henderson         tsz = sextract32(tcr, 0, 4) + 8;
8502f0ec92eSRichard Henderson         select = 0;
8512f0ec92eSRichard Henderson         hpd = false;
8522f0ec92eSRichard Henderson         epd = false;
8532f0ec92eSRichard Henderson     } else if (el == 2) {
8542f0ec92eSRichard Henderson         /* HTCR */
8552f0ec92eSRichard Henderson         tsz = extract32(tcr, 0, 3);
8562f0ec92eSRichard Henderson         select = 0;
8572f0ec92eSRichard Henderson         hpd = extract64(tcr, 24, 1);
8582f0ec92eSRichard Henderson         epd = false;
8592f0ec92eSRichard Henderson     } else {
8602f0ec92eSRichard Henderson         int t0sz = extract32(tcr, 0, 3);
8612f0ec92eSRichard Henderson         int t1sz = extract32(tcr, 16, 3);
8622f0ec92eSRichard Henderson 
8632f0ec92eSRichard Henderson         if (t1sz == 0) {
8642f0ec92eSRichard Henderson             select = va > (0xffffffffu >> t0sz);
8652f0ec92eSRichard Henderson         } else {
8662f0ec92eSRichard Henderson             /* Note that we will detect errors later.  */
8672f0ec92eSRichard Henderson             select = va >= ~(0xffffffffu >> t1sz);
8682f0ec92eSRichard Henderson         }
8692f0ec92eSRichard Henderson         if (!select) {
8702f0ec92eSRichard Henderson             tsz = t0sz;
8712f0ec92eSRichard Henderson             epd = extract32(tcr, 7, 1);
8722f0ec92eSRichard Henderson             hpd = extract64(tcr, 41, 1);
8732f0ec92eSRichard Henderson         } else {
8742f0ec92eSRichard Henderson             tsz = t1sz;
8752f0ec92eSRichard Henderson             epd = extract32(tcr, 23, 1);
8762f0ec92eSRichard Henderson             hpd = extract64(tcr, 42, 1);
8772f0ec92eSRichard Henderson         }
8782f0ec92eSRichard Henderson         /* For aarch32, hpd0 is not enabled without t2e as well.  */
8792f0ec92eSRichard Henderson         hpd &= extract32(tcr, 6, 1);
8802f0ec92eSRichard Henderson     }
8812f0ec92eSRichard Henderson 
8822f0ec92eSRichard Henderson     return (ARMVAParameters) {
8832f0ec92eSRichard Henderson         .tsz = tsz,
8842f0ec92eSRichard Henderson         .select = select,
8852f0ec92eSRichard Henderson         .epd = epd,
8862f0ec92eSRichard Henderson         .hpd = hpd,
8872f0ec92eSRichard Henderson     };
8882f0ec92eSRichard Henderson }
8892f0ec92eSRichard Henderson 
890c5168785SRichard Henderson /*
891c5168785SRichard Henderson  * check_s2_mmu_setup
892c5168785SRichard Henderson  * @cpu:        ARMCPU
893c5168785SRichard Henderson  * @is_aa64:    True if the translation regime is in AArch64 state
894c5168785SRichard Henderson  * @startlevel: Suggested starting level
895c5168785SRichard Henderson  * @inputsize:  Bitsize of IPAs
896c5168785SRichard Henderson  * @stride:     Page-table stride (See the ARM ARM)
897c5168785SRichard Henderson  *
898c5168785SRichard Henderson  * Returns true if the suggested S2 translation parameters are OK and
899c5168785SRichard Henderson  * false otherwise.
900c5168785SRichard Henderson  */
901c5168785SRichard Henderson static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
902c5168785SRichard Henderson                                int inputsize, int stride, int outputsize)
903c5168785SRichard Henderson {
904c5168785SRichard Henderson     const int grainsize = stride + 3;
905c5168785SRichard Henderson     int startsizecheck;
906c5168785SRichard Henderson 
907c5168785SRichard Henderson     /*
908c5168785SRichard Henderson      * Negative levels are usually not allowed...
909c5168785SRichard Henderson      * Except for FEAT_LPA2, 4k page table, 52-bit address space, which
910c5168785SRichard Henderson      * begins with level -1.  Note that previous feature tests will have
911c5168785SRichard Henderson      * eliminated this combination if it is not enabled.
912c5168785SRichard Henderson      */
913c5168785SRichard Henderson     if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) {
914c5168785SRichard Henderson         return false;
915c5168785SRichard Henderson     }
916c5168785SRichard Henderson 
917c5168785SRichard Henderson     startsizecheck = inputsize - ((3 - level) * stride + grainsize);
918c5168785SRichard Henderson     if (startsizecheck < 1 || startsizecheck > stride + 4) {
919c5168785SRichard Henderson         return false;
920c5168785SRichard Henderson     }
921c5168785SRichard Henderson 
922c5168785SRichard Henderson     if (is_aa64) {
923c5168785SRichard Henderson         switch (stride) {
924c5168785SRichard Henderson         case 13: /* 64KB Pages.  */
925c5168785SRichard Henderson             if (level == 0 || (level == 1 && outputsize <= 42)) {
926c5168785SRichard Henderson                 return false;
927c5168785SRichard Henderson             }
928c5168785SRichard Henderson             break;
929c5168785SRichard Henderson         case 11: /* 16KB Pages.  */
930c5168785SRichard Henderson             if (level == 0 || (level == 1 && outputsize <= 40)) {
931c5168785SRichard Henderson                 return false;
932c5168785SRichard Henderson             }
933c5168785SRichard Henderson             break;
934c5168785SRichard Henderson         case 9: /* 4KB Pages.  */
935c5168785SRichard Henderson             if (level == 0 && outputsize <= 42) {
936c5168785SRichard Henderson                 return false;
937c5168785SRichard Henderson             }
938c5168785SRichard Henderson             break;
939c5168785SRichard Henderson         default:
940c5168785SRichard Henderson             g_assert_not_reached();
941c5168785SRichard Henderson         }
942c5168785SRichard Henderson 
943c5168785SRichard Henderson         /* Inputsize checks.  */
944c5168785SRichard Henderson         if (inputsize > outputsize &&
945c5168785SRichard Henderson             (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) {
946c5168785SRichard Henderson             /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
947c5168785SRichard Henderson             return false;
948c5168785SRichard Henderson         }
949c5168785SRichard Henderson     } else {
950c5168785SRichard Henderson         /* AArch32 only supports 4KB pages. Assert on that.  */
951c5168785SRichard Henderson         assert(stride == 9);
952c5168785SRichard Henderson 
953c5168785SRichard Henderson         if (level == 0) {
954c5168785SRichard Henderson             return false;
955c5168785SRichard Henderson         }
956c5168785SRichard Henderson     }
957c5168785SRichard Henderson     return true;
958c5168785SRichard Henderson }
959c5168785SRichard Henderson 
9603283222aSRichard Henderson /**
9613283222aSRichard Henderson  * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
9623283222aSRichard Henderson  *
9633283222aSRichard Henderson  * Returns false if the translation was successful. Otherwise, phys_ptr,
9643283222aSRichard Henderson  * attrs, prot and page_size may not be filled in, and the populated fsr
9653283222aSRichard Henderson  * value provides information on why the translation aborted, in the format
9663283222aSRichard Henderson  * of a long-format DFSR/IFSR fault register, with the following caveat:
9673283222aSRichard Henderson  * the WnR bit is never set (the caller must do this).
9683283222aSRichard Henderson  *
9693283222aSRichard Henderson  * @env: CPUARMState
9703283222aSRichard Henderson  * @address: virtual address to get physical address for
9713283222aSRichard Henderson  * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
9723283222aSRichard Henderson  * @mmu_idx: MMU index indicating required translation regime
9733283222aSRichard Henderson  * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page
9743283222aSRichard Henderson  *             table walk), must be true if this is stage 2 of a stage 1+2
9753283222aSRichard Henderson  *             walk for an EL0 access. If @mmu_idx is anything else,
9763283222aSRichard Henderson  *             @s1_is_el0 is ignored.
97703ee9bbeSRichard Henderson  * @result: set on translation success,
9783283222aSRichard Henderson  * @fi: set to fault info if the translation fails
9793283222aSRichard Henderson  */
98011552bb0SRichard Henderson static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
9813283222aSRichard Henderson                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
982c23f08a5SRichard Henderson                                bool is_secure, bool s1_is_el0,
983c23f08a5SRichard Henderson                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
9843283222aSRichard Henderson {
9853283222aSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
9863283222aSRichard Henderson     /* Read an LPAE long-descriptor translation table. */
9873283222aSRichard Henderson     ARMFaultType fault_type = ARMFault_Translation;
9883283222aSRichard Henderson     uint32_t level;
9893283222aSRichard Henderson     ARMVAParameters param;
9903283222aSRichard Henderson     uint64_t ttbr;
9913283222aSRichard Henderson     hwaddr descaddr, indexmask, indexmask_grainsize;
9923283222aSRichard Henderson     uint32_t tableattrs;
9933283222aSRichard Henderson     target_ulong page_size;
9943283222aSRichard Henderson     uint32_t attrs;
9953283222aSRichard Henderson     int32_t stride;
9963283222aSRichard Henderson     int addrsize, inputsize, outputsize;
997c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
9983283222aSRichard Henderson     int ap, ns, xn, pxn;
9993283222aSRichard Henderson     uint32_t el = regime_el(env, mmu_idx);
10003283222aSRichard Henderson     uint64_t descaddrmask;
10013283222aSRichard Henderson     bool aarch64 = arm_el_is_aa64(env, el);
10023283222aSRichard Henderson     bool guarded = false;
10033283222aSRichard Henderson 
10043283222aSRichard Henderson     /* TODO: This code does not support shareability levels. */
10053283222aSRichard Henderson     if (aarch64) {
10063283222aSRichard Henderson         int ps;
10073283222aSRichard Henderson 
10083283222aSRichard Henderson         param = aa64_va_parameters(env, address, mmu_idx,
10093283222aSRichard Henderson                                    access_type != MMU_INST_FETCH);
10103283222aSRichard Henderson         level = 0;
10113283222aSRichard Henderson 
10123283222aSRichard Henderson         /*
10133283222aSRichard Henderson          * If TxSZ is programmed to a value larger than the maximum,
10143283222aSRichard Henderson          * or smaller than the effective minimum, it is IMPLEMENTATION
10153283222aSRichard Henderson          * DEFINED whether we behave as if the field were programmed
10163283222aSRichard Henderson          * within bounds, or if a level 0 Translation fault is generated.
10173283222aSRichard Henderson          *
10183283222aSRichard Henderson          * With FEAT_LVA, fault on less than minimum becomes required,
10193283222aSRichard Henderson          * so our choice is to always raise the fault.
10203283222aSRichard Henderson          */
10213283222aSRichard Henderson         if (param.tsz_oob) {
10223283222aSRichard Henderson             fault_type = ARMFault_Translation;
10233283222aSRichard Henderson             goto do_fault;
10243283222aSRichard Henderson         }
10253283222aSRichard Henderson 
10263283222aSRichard Henderson         addrsize = 64 - 8 * param.tbi;
10273283222aSRichard Henderson         inputsize = 64 - param.tsz;
10283283222aSRichard Henderson 
10293283222aSRichard Henderson         /*
10303283222aSRichard Henderson          * Bound PS by PARANGE to find the effective output address size.
10313283222aSRichard Henderson          * ID_AA64MMFR0 is a read-only register so values outside of the
10323283222aSRichard Henderson          * supported mappings can be considered an implementation error.
10333283222aSRichard Henderson          */
10343283222aSRichard Henderson         ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
10353283222aSRichard Henderson         ps = MIN(ps, param.ps);
10363283222aSRichard Henderson         assert(ps < ARRAY_SIZE(pamax_map));
10373283222aSRichard Henderson         outputsize = pamax_map[ps];
10383283222aSRichard Henderson     } else {
10393283222aSRichard Henderson         param = aa32_va_parameters(env, address, mmu_idx);
10403283222aSRichard Henderson         level = 1;
10413283222aSRichard Henderson         addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
10423283222aSRichard Henderson         inputsize = addrsize - param.tsz;
10433283222aSRichard Henderson         outputsize = 40;
10443283222aSRichard Henderson     }
10453283222aSRichard Henderson 
10463283222aSRichard Henderson     /*
10473283222aSRichard Henderson      * We determined the region when collecting the parameters, but we
10483283222aSRichard Henderson      * have not yet validated that the address is valid for the region.
10493283222aSRichard Henderson      * Extract the top bits and verify that they all match select.
10503283222aSRichard Henderson      *
10513283222aSRichard Henderson      * For aa32, if inputsize == addrsize, then we have selected the
10523283222aSRichard Henderson      * region by exclusion in aa32_va_parameters and there is no more
10533283222aSRichard Henderson      * validation to do here.
10543283222aSRichard Henderson      */
10553283222aSRichard Henderson     if (inputsize < addrsize) {
10563283222aSRichard Henderson         target_ulong top_bits = sextract64(address, inputsize,
10573283222aSRichard Henderson                                            addrsize - inputsize);
10583283222aSRichard Henderson         if (-top_bits != param.select) {
10593283222aSRichard Henderson             /* The gap between the two regions is a Translation fault */
10603283222aSRichard Henderson             fault_type = ARMFault_Translation;
10613283222aSRichard Henderson             goto do_fault;
10623283222aSRichard Henderson         }
10633283222aSRichard Henderson     }
10643283222aSRichard Henderson 
1065*3c003f70SPeter Maydell     stride = arm_granule_bits(param.gran) - 3;
10663283222aSRichard Henderson 
10673283222aSRichard Henderson     /*
10683283222aSRichard Henderson      * Note that QEMU ignores shareability and cacheability attributes,
10693283222aSRichard Henderson      * so we don't need to do anything with the SH, ORGN, IRGN fields
10703283222aSRichard Henderson      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
10713283222aSRichard Henderson      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
10723283222aSRichard Henderson      * implement any ASID-like capability so we can ignore it (instead
10733283222aSRichard Henderson      * we will always flush the TLB any time the ASID is changed).
10743283222aSRichard Henderson      */
10753283222aSRichard Henderson     ttbr = regime_ttbr(env, mmu_idx, param.select);
10763283222aSRichard Henderson 
10773283222aSRichard Henderson     /*
10783283222aSRichard Henderson      * Here we should have set up all the parameters for the translation:
10793283222aSRichard Henderson      * inputsize, ttbr, epd, stride, tbi
10803283222aSRichard Henderson      */
10813283222aSRichard Henderson 
10823283222aSRichard Henderson     if (param.epd) {
10833283222aSRichard Henderson         /*
10843283222aSRichard Henderson          * Translation table walk disabled => Translation fault on TLB miss
10853283222aSRichard Henderson          * Note: This is always 0 on 64-bit EL2 and EL3.
10863283222aSRichard Henderson          */
10873283222aSRichard Henderson         goto do_fault;
10883283222aSRichard Henderson     }
10893283222aSRichard Henderson 
10903283222aSRichard Henderson     if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
10913283222aSRichard Henderson         /*
10923283222aSRichard Henderson          * The starting level depends on the virtual address size (which can
10933283222aSRichard Henderson          * be up to 48 bits) and the translation granule size. It indicates
10943283222aSRichard Henderson          * the number of strides (stride bits at a time) needed to
10953283222aSRichard Henderson          * consume the bits of the input address. In the pseudocode this is:
10963283222aSRichard Henderson          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
10973283222aSRichard Henderson          * where their 'inputsize' is our 'inputsize', 'grainsize' is
10983283222aSRichard Henderson          * our 'stride + 3' and 'stride' is our 'stride'.
10993283222aSRichard Henderson          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11003283222aSRichard Henderson          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11013283222aSRichard Henderson          * = 4 - (inputsize - 4) / stride;
11023283222aSRichard Henderson          */
11033283222aSRichard Henderson         level = 4 - (inputsize - 4) / stride;
11043283222aSRichard Henderson     } else {
11053283222aSRichard Henderson         /*
11063283222aSRichard Henderson          * For stage 2 translations the starting level is specified by the
11073283222aSRichard Henderson          * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11083283222aSRichard Henderson          */
1109dfce4aa8SPeter Maydell         uint32_t sl0 = extract32(tcr, 6, 2);
1110dfce4aa8SPeter Maydell         uint32_t sl2 = extract64(tcr, 33, 1);
11113283222aSRichard Henderson         uint32_t startlevel;
11123283222aSRichard Henderson         bool ok;
11133283222aSRichard Henderson 
11143283222aSRichard Henderson         /* SL2 is RES0 unless DS=1 & 4kb granule. */
11153283222aSRichard Henderson         if (param.ds && stride == 9 && sl2) {
11163283222aSRichard Henderson             if (sl0 != 0) {
11173283222aSRichard Henderson                 level = 0;
11183283222aSRichard Henderson                 fault_type = ARMFault_Translation;
11193283222aSRichard Henderson                 goto do_fault;
11203283222aSRichard Henderson             }
11213283222aSRichard Henderson             startlevel = -1;
11223283222aSRichard Henderson         } else if (!aarch64 || stride == 9) {
11233283222aSRichard Henderson             /* AArch32 or 4KB pages */
11243283222aSRichard Henderson             startlevel = 2 - sl0;
11253283222aSRichard Henderson 
11263283222aSRichard Henderson             if (cpu_isar_feature(aa64_st, cpu)) {
11273283222aSRichard Henderson                 startlevel &= 3;
11283283222aSRichard Henderson             }
11293283222aSRichard Henderson         } else {
11303283222aSRichard Henderson             /* 16KB or 64KB pages */
11313283222aSRichard Henderson             startlevel = 3 - sl0;
11323283222aSRichard Henderson         }
11333283222aSRichard Henderson 
11343283222aSRichard Henderson         /* Check that the starting level is valid. */
11353283222aSRichard Henderson         ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
11363283222aSRichard Henderson                                 inputsize, stride, outputsize);
11373283222aSRichard Henderson         if (!ok) {
11383283222aSRichard Henderson             fault_type = ARMFault_Translation;
11393283222aSRichard Henderson             goto do_fault;
11403283222aSRichard Henderson         }
11413283222aSRichard Henderson         level = startlevel;
11423283222aSRichard Henderson     }
11433283222aSRichard Henderson 
11443283222aSRichard Henderson     indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
11453283222aSRichard Henderson     indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
11463283222aSRichard Henderson 
11473283222aSRichard Henderson     /* Now we can extract the actual base address from the TTBR */
11483283222aSRichard Henderson     descaddr = extract64(ttbr, 0, 48);
11493283222aSRichard Henderson 
11503283222aSRichard Henderson     /*
11513283222aSRichard Henderson      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
11523283222aSRichard Henderson      *
11533283222aSRichard Henderson      * Otherwise, if the base address is out of range, raise AddressSizeFault.
11543283222aSRichard Henderson      * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
11553283222aSRichard Henderson      * but we've just cleared the bits above 47, so simplify the test.
11563283222aSRichard Henderson      */
11573283222aSRichard Henderson     if (outputsize > 48) {
11583283222aSRichard Henderson         descaddr |= extract64(ttbr, 2, 4) << 48;
11593283222aSRichard Henderson     } else if (descaddr >> outputsize) {
11603283222aSRichard Henderson         level = 0;
11613283222aSRichard Henderson         fault_type = ARMFault_AddressSize;
11623283222aSRichard Henderson         goto do_fault;
11633283222aSRichard Henderson     }
11643283222aSRichard Henderson 
11653283222aSRichard Henderson     /*
11663283222aSRichard Henderson      * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
11673283222aSRichard Henderson      * and also to mask out CnP (bit 0) which could validly be non-zero.
11683283222aSRichard Henderson      */
11693283222aSRichard Henderson     descaddr &= ~indexmask;
11703283222aSRichard Henderson 
11713283222aSRichard Henderson     /*
11723283222aSRichard Henderson      * For AArch32, the address field in the descriptor goes up to bit 39
11733283222aSRichard Henderson      * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
11743283222aSRichard Henderson      * or an AddressSize fault is raised.  So for v8 we extract those SBZ
11753283222aSRichard Henderson      * bits as part of the address, which will be checked via outputsize.
11763283222aSRichard Henderson      * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
11773283222aSRichard Henderson      * the highest bits of a 52-bit output are placed elsewhere.
11783283222aSRichard Henderson      */
11793283222aSRichard Henderson     if (param.ds) {
11803283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 50);
11813283222aSRichard Henderson     } else if (arm_feature(env, ARM_FEATURE_V8)) {
11823283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 48);
11833283222aSRichard Henderson     } else {
11843283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 40);
11853283222aSRichard Henderson     }
11863283222aSRichard Henderson     descaddrmask &= ~indexmask_grainsize;
11873283222aSRichard Henderson 
11883283222aSRichard Henderson     /*
11893283222aSRichard Henderson      * Secure accesses start with the page table in secure memory and
11903283222aSRichard Henderson      * can be downgraded to non-secure at any step. Non-secure accesses
11913283222aSRichard Henderson      * remain non-secure. We implement this by just ORing in the NSTable/NS
11923283222aSRichard Henderson      * bits at each step.
11933283222aSRichard Henderson      */
1194c23f08a5SRichard Henderson     tableattrs = is_secure ? 0 : (1 << 4);
11953283222aSRichard Henderson     for (;;) {
11963283222aSRichard Henderson         uint64_t descriptor;
11973283222aSRichard Henderson         bool nstable;
11983283222aSRichard Henderson 
11993283222aSRichard Henderson         descaddr |= (address >> (stride * (4 - level))) & indexmask;
12003283222aSRichard Henderson         descaddr &= ~7ULL;
12013283222aSRichard Henderson         nstable = extract32(tableattrs, 4, 1);
12025e79887bSRichard Henderson         descriptor = arm_ldq_ptw(env, descaddr, !nstable, mmu_idx, fi);
12033283222aSRichard Henderson         if (fi->type != ARMFault_None) {
12043283222aSRichard Henderson             goto do_fault;
12053283222aSRichard Henderson         }
12063283222aSRichard Henderson 
12073283222aSRichard Henderson         if (!(descriptor & 1) ||
12083283222aSRichard Henderson             (!(descriptor & 2) && (level == 3))) {
12093283222aSRichard Henderson             /* Invalid, or the Reserved level 3 encoding */
12103283222aSRichard Henderson             goto do_fault;
12113283222aSRichard Henderson         }
12123283222aSRichard Henderson 
12133283222aSRichard Henderson         descaddr = descriptor & descaddrmask;
12143283222aSRichard Henderson 
12153283222aSRichard Henderson         /*
12163283222aSRichard Henderson          * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
12173283222aSRichard Henderson          * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
12183283222aSRichard Henderson          * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
12193283222aSRichard Henderson          * raise AddressSizeFault.
12203283222aSRichard Henderson          */
12213283222aSRichard Henderson         if (outputsize > 48) {
12223283222aSRichard Henderson             if (param.ds) {
12233283222aSRichard Henderson                 descaddr |= extract64(descriptor, 8, 2) << 50;
12243283222aSRichard Henderson             } else {
12253283222aSRichard Henderson                 descaddr |= extract64(descriptor, 12, 4) << 48;
12263283222aSRichard Henderson             }
12273283222aSRichard Henderson         } else if (descaddr >> outputsize) {
12283283222aSRichard Henderson             fault_type = ARMFault_AddressSize;
12293283222aSRichard Henderson             goto do_fault;
12303283222aSRichard Henderson         }
12313283222aSRichard Henderson 
12323283222aSRichard Henderson         if ((descriptor & 2) && (level < 3)) {
12333283222aSRichard Henderson             /*
12343283222aSRichard Henderson              * Table entry. The top five bits are attributes which may
12353283222aSRichard Henderson              * propagate down through lower levels of the table (and
12363283222aSRichard Henderson              * which are all arranged so that 0 means "no effect", so
12373283222aSRichard Henderson              * we can gather them up by ORing in the bits at each level).
12383283222aSRichard Henderson              */
12393283222aSRichard Henderson             tableattrs |= extract64(descriptor, 59, 5);
12403283222aSRichard Henderson             level++;
12413283222aSRichard Henderson             indexmask = indexmask_grainsize;
12423283222aSRichard Henderson             continue;
12433283222aSRichard Henderson         }
12443283222aSRichard Henderson         /*
12453283222aSRichard Henderson          * Block entry at level 1 or 2, or page entry at level 3.
12463283222aSRichard Henderson          * These are basically the same thing, although the number
12473283222aSRichard Henderson          * of bits we pull in from the vaddr varies. Note that although
12483283222aSRichard Henderson          * descaddrmask masks enough of the low bits of the descriptor
12493283222aSRichard Henderson          * to give a correct page or table address, the address field
12503283222aSRichard Henderson          * in a block descriptor is smaller; so we need to explicitly
12513283222aSRichard Henderson          * clear the lower bits here before ORing in the low vaddr bits.
12523283222aSRichard Henderson          */
12533283222aSRichard Henderson         page_size = (1ULL << ((stride * (4 - level)) + 3));
1254c2360eaaSPeter Maydell         descaddr &= ~(hwaddr)(page_size - 1);
12553283222aSRichard Henderson         descaddr |= (address & (page_size - 1));
12563283222aSRichard Henderson         /* Extract attributes from the descriptor */
12573283222aSRichard Henderson         attrs = extract64(descriptor, 2, 10)
12583283222aSRichard Henderson             | (extract64(descriptor, 52, 12) << 10);
12593283222aSRichard Henderson 
12603283222aSRichard Henderson         if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
12613283222aSRichard Henderson             /* Stage 2 table descriptors do not include any attribute fields */
12623283222aSRichard Henderson             break;
12633283222aSRichard Henderson         }
12643283222aSRichard Henderson         /* Merge in attributes from table descriptors */
12653283222aSRichard Henderson         attrs |= nstable << 3; /* NS */
12663283222aSRichard Henderson         guarded = extract64(descriptor, 50, 1);  /* GP */
12673283222aSRichard Henderson         if (param.hpd) {
12683283222aSRichard Henderson             /* HPD disables all the table attributes except NSTable.  */
12693283222aSRichard Henderson             break;
12703283222aSRichard Henderson         }
12713283222aSRichard Henderson         attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
12723283222aSRichard Henderson         /*
12733283222aSRichard Henderson          * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
12743283222aSRichard Henderson          * means "force PL1 access only", which means forcing AP[1] to 0.
12753283222aSRichard Henderson          */
12763283222aSRichard Henderson         attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
12773283222aSRichard Henderson         attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
12783283222aSRichard Henderson         break;
12793283222aSRichard Henderson     }
12803283222aSRichard Henderson     /*
12813283222aSRichard Henderson      * Here descaddr is the final physical address, and attributes
12823283222aSRichard Henderson      * are all in attrs.
12833283222aSRichard Henderson      */
12843283222aSRichard Henderson     fault_type = ARMFault_AccessFlag;
12853283222aSRichard Henderson     if ((attrs & (1 << 8)) == 0) {
12863283222aSRichard Henderson         /* Access flag */
12873283222aSRichard Henderson         goto do_fault;
12883283222aSRichard Henderson     }
12893283222aSRichard Henderson 
12903283222aSRichard Henderson     ap = extract32(attrs, 4, 2);
12913283222aSRichard Henderson 
12923283222aSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
12933283222aSRichard Henderson         ns = mmu_idx == ARMMMUIdx_Stage2;
12943283222aSRichard Henderson         xn = extract32(attrs, 11, 2);
12957fa7ea8fSRichard Henderson         result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
12963283222aSRichard Henderson     } else {
12973283222aSRichard Henderson         ns = extract32(attrs, 3, 1);
12983283222aSRichard Henderson         xn = extract32(attrs, 12, 1);
12993283222aSRichard Henderson         pxn = extract32(attrs, 11, 1);
13007fa7ea8fSRichard Henderson         result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
13013283222aSRichard Henderson     }
13023283222aSRichard Henderson 
13033283222aSRichard Henderson     fault_type = ARMFault_Permission;
13047fa7ea8fSRichard Henderson     if (!(result->f.prot & (1 << access_type))) {
13053283222aSRichard Henderson         goto do_fault;
13063283222aSRichard Henderson     }
13073283222aSRichard Henderson 
13083283222aSRichard Henderson     if (ns) {
13093283222aSRichard Henderson         /*
13103283222aSRichard Henderson          * The NS bit will (as required by the architecture) have no effect if
13113283222aSRichard Henderson          * the CPU doesn't support TZ or this is a non-secure translation
13123283222aSRichard Henderson          * regime, because the attribute will already be non-secure.
13133283222aSRichard Henderson          */
13147fa7ea8fSRichard Henderson         result->f.attrs.secure = false;
13153283222aSRichard Henderson     }
13163283222aSRichard Henderson     /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
13173283222aSRichard Henderson     if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
13187fa7ea8fSRichard Henderson         arm_tlb_bti_gp(&result->f.attrs) = true;
13193283222aSRichard Henderson     }
13203283222aSRichard Henderson 
13213283222aSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
132203ee9bbeSRichard Henderson         result->cacheattrs.is_s2_format = true;
132303ee9bbeSRichard Henderson         result->cacheattrs.attrs = extract32(attrs, 0, 4);
13243283222aSRichard Henderson     } else {
13253283222aSRichard Henderson         /* Index into MAIR registers for cache attributes */
13263283222aSRichard Henderson         uint8_t attrindx = extract32(attrs, 0, 3);
13273283222aSRichard Henderson         uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
13283283222aSRichard Henderson         assert(attrindx <= 7);
132903ee9bbeSRichard Henderson         result->cacheattrs.is_s2_format = false;
133003ee9bbeSRichard Henderson         result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
13313283222aSRichard Henderson     }
13323283222aSRichard Henderson 
13333283222aSRichard Henderson     /*
13343283222aSRichard Henderson      * For FEAT_LPA2 and effective DS, the SH field in the attributes
13353283222aSRichard Henderson      * was re-purposed for output address bits.  The SH attribute in
13363283222aSRichard Henderson      * that case comes from TCR_ELx, which we extracted earlier.
13373283222aSRichard Henderson      */
13383283222aSRichard Henderson     if (param.ds) {
133903ee9bbeSRichard Henderson         result->cacheattrs.shareability = param.sh;
13403283222aSRichard Henderson     } else {
134103ee9bbeSRichard Henderson         result->cacheattrs.shareability = extract32(attrs, 6, 2);
13423283222aSRichard Henderson     }
13433283222aSRichard Henderson 
13447fa7ea8fSRichard Henderson     result->f.phys_addr = descaddr;
13457fa7ea8fSRichard Henderson     result->f.lg_page_size = ctz64(page_size);
13463283222aSRichard Henderson     return false;
13473283222aSRichard Henderson 
13483283222aSRichard Henderson do_fault:
13493283222aSRichard Henderson     fi->type = fault_type;
13503283222aSRichard Henderson     fi->level = level;
13513283222aSRichard Henderson     /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
13523283222aSRichard Henderson     fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
13533283222aSRichard Henderson                                mmu_idx == ARMMMUIdx_Stage2_S);
13543283222aSRichard Henderson     fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
13553283222aSRichard Henderson     return true;
13563283222aSRichard Henderson }
13573283222aSRichard Henderson 
13589a12fb36SRichard Henderson static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
13599a12fb36SRichard Henderson                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1360a5b5092fSRichard Henderson                                  bool is_secure, GetPhysAddrResult *result,
13619a12fb36SRichard Henderson                                  ARMMMUFaultInfo *fi)
13629a12fb36SRichard Henderson {
13639a12fb36SRichard Henderson     int n;
13649a12fb36SRichard Henderson     uint32_t mask;
13659a12fb36SRichard Henderson     uint32_t base;
13669a12fb36SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
13679a12fb36SRichard Henderson 
13687e80c0a4SRichard Henderson     if (regime_translation_disabled(env, mmu_idx, is_secure)) {
13699a12fb36SRichard Henderson         /* MPU disabled.  */
13707fa7ea8fSRichard Henderson         result->f.phys_addr = address;
13717fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
13729a12fb36SRichard Henderson         return false;
13739a12fb36SRichard Henderson     }
13749a12fb36SRichard Henderson 
13757fa7ea8fSRichard Henderson     result->f.phys_addr = address;
13769a12fb36SRichard Henderson     for (n = 7; n >= 0; n--) {
13779a12fb36SRichard Henderson         base = env->cp15.c6_region[n];
13789a12fb36SRichard Henderson         if ((base & 1) == 0) {
13799a12fb36SRichard Henderson             continue;
13809a12fb36SRichard Henderson         }
13819a12fb36SRichard Henderson         mask = 1 << ((base >> 1) & 0x1f);
13829a12fb36SRichard Henderson         /* Keep this shift separate from the above to avoid an
13839a12fb36SRichard Henderson            (undefined) << 32.  */
13849a12fb36SRichard Henderson         mask = (mask << 1) - 1;
13859a12fb36SRichard Henderson         if (((base ^ address) & ~mask) == 0) {
13869a12fb36SRichard Henderson             break;
13879a12fb36SRichard Henderson         }
13889a12fb36SRichard Henderson     }
13899a12fb36SRichard Henderson     if (n < 0) {
13909a12fb36SRichard Henderson         fi->type = ARMFault_Background;
13919a12fb36SRichard Henderson         return true;
13929a12fb36SRichard Henderson     }
13939a12fb36SRichard Henderson 
13949a12fb36SRichard Henderson     if (access_type == MMU_INST_FETCH) {
13959a12fb36SRichard Henderson         mask = env->cp15.pmsav5_insn_ap;
13969a12fb36SRichard Henderson     } else {
13979a12fb36SRichard Henderson         mask = env->cp15.pmsav5_data_ap;
13989a12fb36SRichard Henderson     }
13999a12fb36SRichard Henderson     mask = (mask >> (n * 4)) & 0xf;
14009a12fb36SRichard Henderson     switch (mask) {
14019a12fb36SRichard Henderson     case 0:
14029a12fb36SRichard Henderson         fi->type = ARMFault_Permission;
14039a12fb36SRichard Henderson         fi->level = 1;
14049a12fb36SRichard Henderson         return true;
14059a12fb36SRichard Henderson     case 1:
14069a12fb36SRichard Henderson         if (is_user) {
14079a12fb36SRichard Henderson             fi->type = ARMFault_Permission;
14089a12fb36SRichard Henderson             fi->level = 1;
14099a12fb36SRichard Henderson             return true;
14109a12fb36SRichard Henderson         }
14117fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE;
14129a12fb36SRichard Henderson         break;
14139a12fb36SRichard Henderson     case 2:
14147fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
14159a12fb36SRichard Henderson         if (!is_user) {
14167fa7ea8fSRichard Henderson             result->f.prot |= PAGE_WRITE;
14179a12fb36SRichard Henderson         }
14189a12fb36SRichard Henderson         break;
14199a12fb36SRichard Henderson     case 3:
14207fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE;
14219a12fb36SRichard Henderson         break;
14229a12fb36SRichard Henderson     case 5:
14239a12fb36SRichard Henderson         if (is_user) {
14249a12fb36SRichard Henderson             fi->type = ARMFault_Permission;
14259a12fb36SRichard Henderson             fi->level = 1;
14269a12fb36SRichard Henderson             return true;
14279a12fb36SRichard Henderson         }
14287fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
14299a12fb36SRichard Henderson         break;
14309a12fb36SRichard Henderson     case 6:
14317fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
14329a12fb36SRichard Henderson         break;
14339a12fb36SRichard Henderson     default:
14349a12fb36SRichard Henderson         /* Bad permission.  */
14359a12fb36SRichard Henderson         fi->type = ARMFault_Permission;
14369a12fb36SRichard Henderson         fi->level = 1;
14379a12fb36SRichard Henderson         return true;
14389a12fb36SRichard Henderson     }
14397fa7ea8fSRichard Henderson     result->f.prot |= PAGE_EXEC;
14409a12fb36SRichard Henderson     return false;
14419a12fb36SRichard Henderson }
14429a12fb36SRichard Henderson 
1443fedbaa05SRichard Henderson static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
14447fa7ea8fSRichard Henderson                                          int32_t address, uint8_t *prot)
14457d2e08c9SRichard Henderson {
14467d2e08c9SRichard Henderson     if (!arm_feature(env, ARM_FEATURE_M)) {
14477d2e08c9SRichard Henderson         *prot = PAGE_READ | PAGE_WRITE;
14487d2e08c9SRichard Henderson         switch (address) {
14497d2e08c9SRichard Henderson         case 0xF0000000 ... 0xFFFFFFFF:
14507d2e08c9SRichard Henderson             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
14517d2e08c9SRichard Henderson                 /* hivecs execing is ok */
14527d2e08c9SRichard Henderson                 *prot |= PAGE_EXEC;
14537d2e08c9SRichard Henderson             }
14547d2e08c9SRichard Henderson             break;
14557d2e08c9SRichard Henderson         case 0x00000000 ... 0x7FFFFFFF:
14567d2e08c9SRichard Henderson             *prot |= PAGE_EXEC;
14577d2e08c9SRichard Henderson             break;
14587d2e08c9SRichard Henderson         }
14597d2e08c9SRichard Henderson     } else {
14607d2e08c9SRichard Henderson         /* Default system address map for M profile cores.
14617d2e08c9SRichard Henderson          * The architecture specifies which regions are execute-never;
14627d2e08c9SRichard Henderson          * at the MPU level no other checks are defined.
14637d2e08c9SRichard Henderson          */
14647d2e08c9SRichard Henderson         switch (address) {
14657d2e08c9SRichard Henderson         case 0x00000000 ... 0x1fffffff: /* ROM */
14667d2e08c9SRichard Henderson         case 0x20000000 ... 0x3fffffff: /* SRAM */
14677d2e08c9SRichard Henderson         case 0x60000000 ... 0x7fffffff: /* RAM */
14687d2e08c9SRichard Henderson         case 0x80000000 ... 0x9fffffff: /* RAM */
14697d2e08c9SRichard Henderson             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
14707d2e08c9SRichard Henderson             break;
14717d2e08c9SRichard Henderson         case 0x40000000 ... 0x5fffffff: /* Peripheral */
14727d2e08c9SRichard Henderson         case 0xa0000000 ... 0xbfffffff: /* Device */
14737d2e08c9SRichard Henderson         case 0xc0000000 ... 0xdfffffff: /* Device */
14747d2e08c9SRichard Henderson         case 0xe0000000 ... 0xffffffff: /* System */
14757d2e08c9SRichard Henderson             *prot = PAGE_READ | PAGE_WRITE;
14767d2e08c9SRichard Henderson             break;
14777d2e08c9SRichard Henderson         default:
14787d2e08c9SRichard Henderson             g_assert_not_reached();
14797d2e08c9SRichard Henderson         }
14807d2e08c9SRichard Henderson     }
14817d2e08c9SRichard Henderson }
14827d2e08c9SRichard Henderson 
148347ff5ba9SRichard Henderson static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
148447ff5ba9SRichard Henderson {
148547ff5ba9SRichard Henderson     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
148647ff5ba9SRichard Henderson     return arm_feature(env, ARM_FEATURE_M) &&
148747ff5ba9SRichard Henderson         extract32(address, 20, 12) == 0xe00;
148847ff5ba9SRichard Henderson }
148947ff5ba9SRichard Henderson 
149047ff5ba9SRichard Henderson static bool m_is_system_region(CPUARMState *env, uint32_t address)
149147ff5ba9SRichard Henderson {
149247ff5ba9SRichard Henderson     /*
149347ff5ba9SRichard Henderson      * True if address is in the M profile system region
149447ff5ba9SRichard Henderson      * 0xe0000000 - 0xffffffff
149547ff5ba9SRichard Henderson      */
149647ff5ba9SRichard Henderson     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
149747ff5ba9SRichard Henderson }
149847ff5ba9SRichard Henderson 
1499c8e436c9SRichard Henderson static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
15001a469cf7SRichard Henderson                                          bool is_secure, bool is_user)
1501c8e436c9SRichard Henderson {
1502c8e436c9SRichard Henderson     /*
1503c8e436c9SRichard Henderson      * Return true if we should use the default memory map as a
1504c8e436c9SRichard Henderson      * "background" region if there are no hits against any MPU regions.
1505c8e436c9SRichard Henderson      */
1506c8e436c9SRichard Henderson     CPUARMState *env = &cpu->env;
1507c8e436c9SRichard Henderson 
1508c8e436c9SRichard Henderson     if (is_user) {
1509c8e436c9SRichard Henderson         return false;
1510c8e436c9SRichard Henderson     }
1511c8e436c9SRichard Henderson 
1512c8e436c9SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M)) {
15131a469cf7SRichard Henderson         return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
1514c8e436c9SRichard Henderson     } else {
1515c8e436c9SRichard Henderson         return regime_sctlr(env, mmu_idx) & SCTLR_BR;
1516c8e436c9SRichard Henderson     }
1517c8e436c9SRichard Henderson }
1518c8e436c9SRichard Henderson 
15191f2e87e5SRichard Henderson static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
15201f2e87e5SRichard Henderson                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1521957a0bb7SRichard Henderson                                  bool secure, GetPhysAddrResult *result,
15221f2e87e5SRichard Henderson                                  ARMMMUFaultInfo *fi)
15231f2e87e5SRichard Henderson {
15241f2e87e5SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
15251f2e87e5SRichard Henderson     int n;
15261f2e87e5SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
15271f2e87e5SRichard Henderson 
15287fa7ea8fSRichard Henderson     result->f.phys_addr = address;
15297fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
15307fa7ea8fSRichard Henderson     result->f.prot = 0;
15311f2e87e5SRichard Henderson 
15327e80c0a4SRichard Henderson     if (regime_translation_disabled(env, mmu_idx, secure) ||
15331f2e87e5SRichard Henderson         m_is_ppb_region(env, address)) {
15341f2e87e5SRichard Henderson         /*
15351f2e87e5SRichard Henderson          * MPU disabled or M profile PPB access: use default memory map.
15361f2e87e5SRichard Henderson          * The other case which uses the default memory map in the
15371f2e87e5SRichard Henderson          * v7M ARM ARM pseudocode is exception vector reads from the vector
15381f2e87e5SRichard Henderson          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
15391f2e87e5SRichard Henderson          * which always does a direct read using address_space_ldl(), rather
15401f2e87e5SRichard Henderson          * than going via this function, so we don't need to check that here.
15411f2e87e5SRichard Henderson          */
15427fa7ea8fSRichard Henderson         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
15431f2e87e5SRichard Henderson     } else { /* MPU enabled */
15441f2e87e5SRichard Henderson         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
15451f2e87e5SRichard Henderson             /* region search */
15461f2e87e5SRichard Henderson             uint32_t base = env->pmsav7.drbar[n];
15471f2e87e5SRichard Henderson             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
15481f2e87e5SRichard Henderson             uint32_t rmask;
15491f2e87e5SRichard Henderson             bool srdis = false;
15501f2e87e5SRichard Henderson 
15511f2e87e5SRichard Henderson             if (!(env->pmsav7.drsr[n] & 0x1)) {
15521f2e87e5SRichard Henderson                 continue;
15531f2e87e5SRichard Henderson             }
15541f2e87e5SRichard Henderson 
15551f2e87e5SRichard Henderson             if (!rsize) {
15561f2e87e5SRichard Henderson                 qemu_log_mask(LOG_GUEST_ERROR,
15571f2e87e5SRichard Henderson                               "DRSR[%d]: Rsize field cannot be 0\n", n);
15581f2e87e5SRichard Henderson                 continue;
15591f2e87e5SRichard Henderson             }
15601f2e87e5SRichard Henderson             rsize++;
15611f2e87e5SRichard Henderson             rmask = (1ull << rsize) - 1;
15621f2e87e5SRichard Henderson 
15631f2e87e5SRichard Henderson             if (base & rmask) {
15641f2e87e5SRichard Henderson                 qemu_log_mask(LOG_GUEST_ERROR,
15651f2e87e5SRichard Henderson                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
15661f2e87e5SRichard Henderson                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
15671f2e87e5SRichard Henderson                               n, base, rmask);
15681f2e87e5SRichard Henderson                 continue;
15691f2e87e5SRichard Henderson             }
15701f2e87e5SRichard Henderson 
15711f2e87e5SRichard Henderson             if (address < base || address > base + rmask) {
15721f2e87e5SRichard Henderson                 /*
15731f2e87e5SRichard Henderson                  * Address not in this region. We must check whether the
15741f2e87e5SRichard Henderson                  * region covers addresses in the same page as our address.
15751f2e87e5SRichard Henderson                  * In that case we must not report a size that covers the
15761f2e87e5SRichard Henderson                  * whole page for a subsequent hit against a different MPU
15771f2e87e5SRichard Henderson                  * region or the background region, because it would result in
15781f2e87e5SRichard Henderson                  * incorrect TLB hits for subsequent accesses to addresses that
15791f2e87e5SRichard Henderson                  * are in this MPU region.
15801f2e87e5SRichard Henderson                  */
15811f2e87e5SRichard Henderson                 if (ranges_overlap(base, rmask,
15821f2e87e5SRichard Henderson                                    address & TARGET_PAGE_MASK,
15831f2e87e5SRichard Henderson                                    TARGET_PAGE_SIZE)) {
15847fa7ea8fSRichard Henderson                     result->f.lg_page_size = 0;
15851f2e87e5SRichard Henderson                 }
15861f2e87e5SRichard Henderson                 continue;
15871f2e87e5SRichard Henderson             }
15881f2e87e5SRichard Henderson 
15891f2e87e5SRichard Henderson             /* Region matched */
15901f2e87e5SRichard Henderson 
15911f2e87e5SRichard Henderson             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
15921f2e87e5SRichard Henderson                 int i, snd;
15931f2e87e5SRichard Henderson                 uint32_t srdis_mask;
15941f2e87e5SRichard Henderson 
15951f2e87e5SRichard Henderson                 rsize -= 3; /* sub region size (power of 2) */
15961f2e87e5SRichard Henderson                 snd = ((address - base) >> rsize) & 0x7;
15971f2e87e5SRichard Henderson                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
15981f2e87e5SRichard Henderson 
15991f2e87e5SRichard Henderson                 srdis_mask = srdis ? 0x3 : 0x0;
16001f2e87e5SRichard Henderson                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
16011f2e87e5SRichard Henderson                     /*
16021f2e87e5SRichard Henderson                      * This will check in groups of 2, 4 and then 8, whether
16031f2e87e5SRichard Henderson                      * the subregion bits are consistent. rsize is incremented
16041f2e87e5SRichard Henderson                      * back up to give the region size, considering consistent
16051f2e87e5SRichard Henderson                      * adjacent subregions as one region. Stop testing if rsize
16061f2e87e5SRichard Henderson                      * is already big enough for an entire QEMU page.
16071f2e87e5SRichard Henderson                      */
16081f2e87e5SRichard Henderson                     int snd_rounded = snd & ~(i - 1);
16091f2e87e5SRichard Henderson                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
16101f2e87e5SRichard Henderson                                                      snd_rounded + 8, i);
16111f2e87e5SRichard Henderson                     if (srdis_mask ^ srdis_multi) {
16121f2e87e5SRichard Henderson                         break;
16131f2e87e5SRichard Henderson                     }
16141f2e87e5SRichard Henderson                     srdis_mask = (srdis_mask << i) | srdis_mask;
16151f2e87e5SRichard Henderson                     rsize++;
16161f2e87e5SRichard Henderson                 }
16171f2e87e5SRichard Henderson             }
16181f2e87e5SRichard Henderson             if (srdis) {
16191f2e87e5SRichard Henderson                 continue;
16201f2e87e5SRichard Henderson             }
16211f2e87e5SRichard Henderson             if (rsize < TARGET_PAGE_BITS) {
16227fa7ea8fSRichard Henderson                 result->f.lg_page_size = rsize;
16231f2e87e5SRichard Henderson             }
16241f2e87e5SRichard Henderson             break;
16251f2e87e5SRichard Henderson         }
16261f2e87e5SRichard Henderson 
16271f2e87e5SRichard Henderson         if (n == -1) { /* no hits */
16281a469cf7SRichard Henderson             if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
16291f2e87e5SRichard Henderson                 /* background fault */
16301f2e87e5SRichard Henderson                 fi->type = ARMFault_Background;
16311f2e87e5SRichard Henderson                 return true;
16321f2e87e5SRichard Henderson             }
16337fa7ea8fSRichard Henderson             get_phys_addr_pmsav7_default(env, mmu_idx, address,
16347fa7ea8fSRichard Henderson                                          &result->f.prot);
16351f2e87e5SRichard Henderson         } else { /* a MPU hit! */
16361f2e87e5SRichard Henderson             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
16371f2e87e5SRichard Henderson             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
16381f2e87e5SRichard Henderson 
16391f2e87e5SRichard Henderson             if (m_is_system_region(env, address)) {
16401f2e87e5SRichard Henderson                 /* System space is always execute never */
16411f2e87e5SRichard Henderson                 xn = 1;
16421f2e87e5SRichard Henderson             }
16431f2e87e5SRichard Henderson 
16441f2e87e5SRichard Henderson             if (is_user) { /* User mode AP bit decoding */
16451f2e87e5SRichard Henderson                 switch (ap) {
16461f2e87e5SRichard Henderson                 case 0:
16471f2e87e5SRichard Henderson                 case 1:
16481f2e87e5SRichard Henderson                 case 5:
16491f2e87e5SRichard Henderson                     break; /* no access */
16501f2e87e5SRichard Henderson                 case 3:
16517fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_WRITE;
16521f2e87e5SRichard Henderson                     /* fall through */
16531f2e87e5SRichard Henderson                 case 2:
16541f2e87e5SRichard Henderson                 case 6:
16557fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_READ | PAGE_EXEC;
16561f2e87e5SRichard Henderson                     break;
16571f2e87e5SRichard Henderson                 case 7:
16581f2e87e5SRichard Henderson                     /* for v7M, same as 6; for R profile a reserved value */
16591f2e87e5SRichard Henderson                     if (arm_feature(env, ARM_FEATURE_M)) {
16607fa7ea8fSRichard Henderson                         result->f.prot |= PAGE_READ | PAGE_EXEC;
16611f2e87e5SRichard Henderson                         break;
16621f2e87e5SRichard Henderson                     }
16631f2e87e5SRichard Henderson                     /* fall through */
16641f2e87e5SRichard Henderson                 default:
16651f2e87e5SRichard Henderson                     qemu_log_mask(LOG_GUEST_ERROR,
16661f2e87e5SRichard Henderson                                   "DRACR[%d]: Bad value for AP bits: 0x%"
16671f2e87e5SRichard Henderson                                   PRIx32 "\n", n, ap);
16681f2e87e5SRichard Henderson                 }
16691f2e87e5SRichard Henderson             } else { /* Priv. mode AP bits decoding */
16701f2e87e5SRichard Henderson                 switch (ap) {
16711f2e87e5SRichard Henderson                 case 0:
16721f2e87e5SRichard Henderson                     break; /* no access */
16731f2e87e5SRichard Henderson                 case 1:
16741f2e87e5SRichard Henderson                 case 2:
16751f2e87e5SRichard Henderson                 case 3:
16767fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_WRITE;
16771f2e87e5SRichard Henderson                     /* fall through */
16781f2e87e5SRichard Henderson                 case 5:
16791f2e87e5SRichard Henderson                 case 6:
16807fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_READ | PAGE_EXEC;
16811f2e87e5SRichard Henderson                     break;
16821f2e87e5SRichard Henderson                 case 7:
16831f2e87e5SRichard Henderson                     /* for v7M, same as 6; for R profile a reserved value */
16841f2e87e5SRichard Henderson                     if (arm_feature(env, ARM_FEATURE_M)) {
16857fa7ea8fSRichard Henderson                         result->f.prot |= PAGE_READ | PAGE_EXEC;
16861f2e87e5SRichard Henderson                         break;
16871f2e87e5SRichard Henderson                     }
16881f2e87e5SRichard Henderson                     /* fall through */
16891f2e87e5SRichard Henderson                 default:
16901f2e87e5SRichard Henderson                     qemu_log_mask(LOG_GUEST_ERROR,
16911f2e87e5SRichard Henderson                                   "DRACR[%d]: Bad value for AP bits: 0x%"
16921f2e87e5SRichard Henderson                                   PRIx32 "\n", n, ap);
16931f2e87e5SRichard Henderson                 }
16941f2e87e5SRichard Henderson             }
16951f2e87e5SRichard Henderson 
16961f2e87e5SRichard Henderson             /* execute never */
16971f2e87e5SRichard Henderson             if (xn) {
16987fa7ea8fSRichard Henderson                 result->f.prot &= ~PAGE_EXEC;
16991f2e87e5SRichard Henderson             }
17001f2e87e5SRichard Henderson         }
17011f2e87e5SRichard Henderson     }
17021f2e87e5SRichard Henderson 
17031f2e87e5SRichard Henderson     fi->type = ARMFault_Permission;
17041f2e87e5SRichard Henderson     fi->level = 1;
17057fa7ea8fSRichard Henderson     return !(result->f.prot & (1 << access_type));
17061f2e87e5SRichard Henderson }
17071f2e87e5SRichard Henderson 
1708fedbaa05SRichard Henderson bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
1709fedbaa05SRichard Henderson                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
1710e9fb7090SRichard Henderson                        bool secure, GetPhysAddrResult *result,
1711e9fb7090SRichard Henderson                        ARMMMUFaultInfo *fi, uint32_t *mregion)
1712fedbaa05SRichard Henderson {
1713fedbaa05SRichard Henderson     /*
1714fedbaa05SRichard Henderson      * Perform a PMSAv8 MPU lookup (without also doing the SAU check
1715fedbaa05SRichard Henderson      * that a full phys-to-virt translation does).
1716fedbaa05SRichard Henderson      * mregion is (if not NULL) set to the region number which matched,
1717fedbaa05SRichard Henderson      * or -1 if no region number is returned (MPU off, address did not
1718fedbaa05SRichard Henderson      * hit a region, address hit in multiple regions).
1719652c750eSRichard Henderson      * If the region hit doesn't cover the entire TARGET_PAGE the address
1720652c750eSRichard Henderson      * is within, then we set the result page_size to 1 to force the
1721652c750eSRichard Henderson      * memory system to use a subpage.
1722fedbaa05SRichard Henderson      */
1723fedbaa05SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
1724fedbaa05SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
1725fedbaa05SRichard Henderson     int n;
1726fedbaa05SRichard Henderson     int matchregion = -1;
1727fedbaa05SRichard Henderson     bool hit = false;
1728fedbaa05SRichard Henderson     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
1729fedbaa05SRichard Henderson     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
1730fedbaa05SRichard Henderson 
17317fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
17327fa7ea8fSRichard Henderson     result->f.phys_addr = address;
17337fa7ea8fSRichard Henderson     result->f.prot = 0;
1734fedbaa05SRichard Henderson     if (mregion) {
1735fedbaa05SRichard Henderson         *mregion = -1;
1736fedbaa05SRichard Henderson     }
1737fedbaa05SRichard Henderson 
1738fedbaa05SRichard Henderson     /*
1739fedbaa05SRichard Henderson      * Unlike the ARM ARM pseudocode, we don't need to check whether this
1740fedbaa05SRichard Henderson      * was an exception vector read from the vector table (which is always
1741fedbaa05SRichard Henderson      * done using the default system address map), because those accesses
1742fedbaa05SRichard Henderson      * are done in arm_v7m_load_vector(), which always does a direct
1743fedbaa05SRichard Henderson      * read using address_space_ldl(), rather than going via this function.
1744fedbaa05SRichard Henderson      */
17457e80c0a4SRichard Henderson     if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
1746fedbaa05SRichard Henderson         hit = true;
1747fedbaa05SRichard Henderson     } else if (m_is_ppb_region(env, address)) {
1748fedbaa05SRichard Henderson         hit = true;
1749fedbaa05SRichard Henderson     } else {
17501a469cf7SRichard Henderson         if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
1751fedbaa05SRichard Henderson             hit = true;
1752fedbaa05SRichard Henderson         }
1753fedbaa05SRichard Henderson 
1754fedbaa05SRichard Henderson         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
1755fedbaa05SRichard Henderson             /* region search */
1756fedbaa05SRichard Henderson             /*
1757fedbaa05SRichard Henderson              * Note that the base address is bits [31:5] from the register
1758fedbaa05SRichard Henderson              * with bits [4:0] all zeroes, but the limit address is bits
1759fedbaa05SRichard Henderson              * [31:5] from the register with bits [4:0] all ones.
1760fedbaa05SRichard Henderson              */
1761fedbaa05SRichard Henderson             uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
1762fedbaa05SRichard Henderson             uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
1763fedbaa05SRichard Henderson 
1764fedbaa05SRichard Henderson             if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
1765fedbaa05SRichard Henderson                 /* Region disabled */
1766fedbaa05SRichard Henderson                 continue;
1767fedbaa05SRichard Henderson             }
1768fedbaa05SRichard Henderson 
1769fedbaa05SRichard Henderson             if (address < base || address > limit) {
1770fedbaa05SRichard Henderson                 /*
1771fedbaa05SRichard Henderson                  * Address not in this region. We must check whether the
1772fedbaa05SRichard Henderson                  * region covers addresses in the same page as our address.
1773fedbaa05SRichard Henderson                  * In that case we must not report a size that covers the
1774fedbaa05SRichard Henderson                  * whole page for a subsequent hit against a different MPU
1775fedbaa05SRichard Henderson                  * region or the background region, because it would result in
1776fedbaa05SRichard Henderson                  * incorrect TLB hits for subsequent accesses to addresses that
1777fedbaa05SRichard Henderson                  * are in this MPU region.
1778fedbaa05SRichard Henderson                  */
1779fedbaa05SRichard Henderson                 if (limit >= base &&
1780fedbaa05SRichard Henderson                     ranges_overlap(base, limit - base + 1,
1781fedbaa05SRichard Henderson                                    addr_page_base,
1782fedbaa05SRichard Henderson                                    TARGET_PAGE_SIZE)) {
17837fa7ea8fSRichard Henderson                     result->f.lg_page_size = 0;
1784fedbaa05SRichard Henderson                 }
1785fedbaa05SRichard Henderson                 continue;
1786fedbaa05SRichard Henderson             }
1787fedbaa05SRichard Henderson 
1788fedbaa05SRichard Henderson             if (base > addr_page_base || limit < addr_page_limit) {
17897fa7ea8fSRichard Henderson                 result->f.lg_page_size = 0;
1790fedbaa05SRichard Henderson             }
1791fedbaa05SRichard Henderson 
1792fedbaa05SRichard Henderson             if (matchregion != -1) {
1793fedbaa05SRichard Henderson                 /*
1794fedbaa05SRichard Henderson                  * Multiple regions match -- always a failure (unlike
1795fedbaa05SRichard Henderson                  * PMSAv7 where highest-numbered-region wins)
1796fedbaa05SRichard Henderson                  */
1797fedbaa05SRichard Henderson                 fi->type = ARMFault_Permission;
1798fedbaa05SRichard Henderson                 fi->level = 1;
1799fedbaa05SRichard Henderson                 return true;
1800fedbaa05SRichard Henderson             }
1801fedbaa05SRichard Henderson 
1802fedbaa05SRichard Henderson             matchregion = n;
1803fedbaa05SRichard Henderson             hit = true;
1804fedbaa05SRichard Henderson         }
1805fedbaa05SRichard Henderson     }
1806fedbaa05SRichard Henderson 
1807fedbaa05SRichard Henderson     if (!hit) {
1808fedbaa05SRichard Henderson         /* background fault */
1809fedbaa05SRichard Henderson         fi->type = ARMFault_Background;
1810fedbaa05SRichard Henderson         return true;
1811fedbaa05SRichard Henderson     }
1812fedbaa05SRichard Henderson 
1813fedbaa05SRichard Henderson     if (matchregion == -1) {
1814fedbaa05SRichard Henderson         /* hit using the background region */
18157fa7ea8fSRichard Henderson         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
1816fedbaa05SRichard Henderson     } else {
1817fedbaa05SRichard Henderson         uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
1818fedbaa05SRichard Henderson         uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
1819fedbaa05SRichard Henderson         bool pxn = false;
1820fedbaa05SRichard Henderson 
1821fedbaa05SRichard Henderson         if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1822fedbaa05SRichard Henderson             pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
1823fedbaa05SRichard Henderson         }
1824fedbaa05SRichard Henderson 
1825fedbaa05SRichard Henderson         if (m_is_system_region(env, address)) {
1826fedbaa05SRichard Henderson             /* System space is always execute never */
1827fedbaa05SRichard Henderson             xn = 1;
1828fedbaa05SRichard Henderson         }
1829fedbaa05SRichard Henderson 
18307fa7ea8fSRichard Henderson         result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
18317fa7ea8fSRichard Henderson         if (result->f.prot && !xn && !(pxn && !is_user)) {
18327fa7ea8fSRichard Henderson             result->f.prot |= PAGE_EXEC;
1833fedbaa05SRichard Henderson         }
1834fedbaa05SRichard Henderson         /*
1835fedbaa05SRichard Henderson          * We don't need to look the attribute up in the MAIR0/MAIR1
1836fedbaa05SRichard Henderson          * registers because that only tells us about cacheability.
1837fedbaa05SRichard Henderson          */
1838fedbaa05SRichard Henderson         if (mregion) {
1839fedbaa05SRichard Henderson             *mregion = matchregion;
1840fedbaa05SRichard Henderson         }
1841fedbaa05SRichard Henderson     }
1842fedbaa05SRichard Henderson 
1843fedbaa05SRichard Henderson     fi->type = ARMFault_Permission;
1844fedbaa05SRichard Henderson     fi->level = 1;
18457fa7ea8fSRichard Henderson     return !(result->f.prot & (1 << access_type));
1846fedbaa05SRichard Henderson }
1847fedbaa05SRichard Henderson 
18482c1f429dSRichard Henderson static bool v8m_is_sau_exempt(CPUARMState *env,
18492c1f429dSRichard Henderson                               uint32_t address, MMUAccessType access_type)
18502c1f429dSRichard Henderson {
18512c1f429dSRichard Henderson     /*
18522c1f429dSRichard Henderson      * The architecture specifies that certain address ranges are
18532c1f429dSRichard Henderson      * exempt from v8M SAU/IDAU checks.
18542c1f429dSRichard Henderson      */
18552c1f429dSRichard Henderson     return
18562c1f429dSRichard Henderson         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
18572c1f429dSRichard Henderson         (address >= 0xe0000000 && address <= 0xe0002fff) ||
18582c1f429dSRichard Henderson         (address >= 0xe000e000 && address <= 0xe000efff) ||
18592c1f429dSRichard Henderson         (address >= 0xe002e000 && address <= 0xe002efff) ||
18602c1f429dSRichard Henderson         (address >= 0xe0040000 && address <= 0xe0041fff) ||
18612c1f429dSRichard Henderson         (address >= 0xe00ff000 && address <= 0xe00fffff);
18622c1f429dSRichard Henderson }
18632c1f429dSRichard Henderson 
18642c1f429dSRichard Henderson void v8m_security_lookup(CPUARMState *env, uint32_t address,
18652c1f429dSRichard Henderson                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
1866dbf2a71aSRichard Henderson                          bool is_secure, V8M_SAttributes *sattrs)
18672c1f429dSRichard Henderson {
18682c1f429dSRichard Henderson     /*
18692c1f429dSRichard Henderson      * Look up the security attributes for this address. Compare the
18702c1f429dSRichard Henderson      * pseudocode SecurityCheck() function.
18712c1f429dSRichard Henderson      * We assume the caller has zero-initialized *sattrs.
18722c1f429dSRichard Henderson      */
18732c1f429dSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
18742c1f429dSRichard Henderson     int r;
18752c1f429dSRichard Henderson     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
18762c1f429dSRichard Henderson     int idau_region = IREGION_NOTVALID;
18772c1f429dSRichard Henderson     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
18782c1f429dSRichard Henderson     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
18792c1f429dSRichard Henderson 
18802c1f429dSRichard Henderson     if (cpu->idau) {
18812c1f429dSRichard Henderson         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
18822c1f429dSRichard Henderson         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
18832c1f429dSRichard Henderson 
18842c1f429dSRichard Henderson         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
18852c1f429dSRichard Henderson                    &idau_nsc);
18862c1f429dSRichard Henderson     }
18872c1f429dSRichard Henderson 
18882c1f429dSRichard Henderson     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
18892c1f429dSRichard Henderson         /* 0xf0000000..0xffffffff is always S for insn fetches */
18902c1f429dSRichard Henderson         return;
18912c1f429dSRichard Henderson     }
18922c1f429dSRichard Henderson 
18932c1f429dSRichard Henderson     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
1894dbf2a71aSRichard Henderson         sattrs->ns = !is_secure;
18952c1f429dSRichard Henderson         return;
18962c1f429dSRichard Henderson     }
18972c1f429dSRichard Henderson 
18982c1f429dSRichard Henderson     if (idau_region != IREGION_NOTVALID) {
18992c1f429dSRichard Henderson         sattrs->irvalid = true;
19002c1f429dSRichard Henderson         sattrs->iregion = idau_region;
19012c1f429dSRichard Henderson     }
19022c1f429dSRichard Henderson 
19032c1f429dSRichard Henderson     switch (env->sau.ctrl & 3) {
19042c1f429dSRichard Henderson     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
19052c1f429dSRichard Henderson         break;
19062c1f429dSRichard Henderson     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
19072c1f429dSRichard Henderson         sattrs->ns = true;
19082c1f429dSRichard Henderson         break;
19092c1f429dSRichard Henderson     default: /* SAU.ENABLE == 1 */
19102c1f429dSRichard Henderson         for (r = 0; r < cpu->sau_sregion; r++) {
19112c1f429dSRichard Henderson             if (env->sau.rlar[r] & 1) {
19122c1f429dSRichard Henderson                 uint32_t base = env->sau.rbar[r] & ~0x1f;
19132c1f429dSRichard Henderson                 uint32_t limit = env->sau.rlar[r] | 0x1f;
19142c1f429dSRichard Henderson 
19152c1f429dSRichard Henderson                 if (base <= address && limit >= address) {
19162c1f429dSRichard Henderson                     if (base > addr_page_base || limit < addr_page_limit) {
19172c1f429dSRichard Henderson                         sattrs->subpage = true;
19182c1f429dSRichard Henderson                     }
19192c1f429dSRichard Henderson                     if (sattrs->srvalid) {
19202c1f429dSRichard Henderson                         /*
19212c1f429dSRichard Henderson                          * If we hit in more than one region then we must report
19222c1f429dSRichard Henderson                          * as Secure, not NS-Callable, with no valid region
19232c1f429dSRichard Henderson                          * number info.
19242c1f429dSRichard Henderson                          */
19252c1f429dSRichard Henderson                         sattrs->ns = false;
19262c1f429dSRichard Henderson                         sattrs->nsc = false;
19272c1f429dSRichard Henderson                         sattrs->sregion = 0;
19282c1f429dSRichard Henderson                         sattrs->srvalid = false;
19292c1f429dSRichard Henderson                         break;
19302c1f429dSRichard Henderson                     } else {
19312c1f429dSRichard Henderson                         if (env->sau.rlar[r] & 2) {
19322c1f429dSRichard Henderson                             sattrs->nsc = true;
19332c1f429dSRichard Henderson                         } else {
19342c1f429dSRichard Henderson                             sattrs->ns = true;
19352c1f429dSRichard Henderson                         }
19362c1f429dSRichard Henderson                         sattrs->srvalid = true;
19372c1f429dSRichard Henderson                         sattrs->sregion = r;
19382c1f429dSRichard Henderson                     }
19392c1f429dSRichard Henderson                 } else {
19402c1f429dSRichard Henderson                     /*
19412c1f429dSRichard Henderson                      * Address not in this region. We must check whether the
19422c1f429dSRichard Henderson                      * region covers addresses in the same page as our address.
19432c1f429dSRichard Henderson                      * In that case we must not report a size that covers the
19442c1f429dSRichard Henderson                      * whole page for a subsequent hit against a different MPU
19452c1f429dSRichard Henderson                      * region or the background region, because it would result
19462c1f429dSRichard Henderson                      * in incorrect TLB hits for subsequent accesses to
19472c1f429dSRichard Henderson                      * addresses that are in this MPU region.
19482c1f429dSRichard Henderson                      */
19492c1f429dSRichard Henderson                     if (limit >= base &&
19502c1f429dSRichard Henderson                         ranges_overlap(base, limit - base + 1,
19512c1f429dSRichard Henderson                                        addr_page_base,
19522c1f429dSRichard Henderson                                        TARGET_PAGE_SIZE)) {
19532c1f429dSRichard Henderson                         sattrs->subpage = true;
19542c1f429dSRichard Henderson                     }
19552c1f429dSRichard Henderson                 }
19562c1f429dSRichard Henderson             }
19572c1f429dSRichard Henderson         }
19582c1f429dSRichard Henderson         break;
19592c1f429dSRichard Henderson     }
19602c1f429dSRichard Henderson 
19612c1f429dSRichard Henderson     /*
19622c1f429dSRichard Henderson      * The IDAU will override the SAU lookup results if it specifies
19632c1f429dSRichard Henderson      * higher security than the SAU does.
19642c1f429dSRichard Henderson      */
19652c1f429dSRichard Henderson     if (!idau_ns) {
19662c1f429dSRichard Henderson         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
19672c1f429dSRichard Henderson             sattrs->ns = false;
19682c1f429dSRichard Henderson             sattrs->nsc = idau_nsc;
19692c1f429dSRichard Henderson         }
19702c1f429dSRichard Henderson     }
19712c1f429dSRichard Henderson }
19722c1f429dSRichard Henderson 
1973730d5c31SRichard Henderson static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
1974730d5c31SRichard Henderson                                  MMUAccessType access_type, ARMMMUIdx mmu_idx,
1975be0ca948SRichard Henderson                                  bool secure, GetPhysAddrResult *result,
1976730d5c31SRichard Henderson                                  ARMMMUFaultInfo *fi)
1977730d5c31SRichard Henderson {
1978730d5c31SRichard Henderson     V8M_SAttributes sattrs = {};
1979730d5c31SRichard Henderson     bool ret;
1980730d5c31SRichard Henderson 
1981730d5c31SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1982dbf2a71aSRichard Henderson         v8m_security_lookup(env, address, access_type, mmu_idx,
1983dbf2a71aSRichard Henderson                             secure, &sattrs);
1984730d5c31SRichard Henderson         if (access_type == MMU_INST_FETCH) {
1985730d5c31SRichard Henderson             /*
1986730d5c31SRichard Henderson              * Instruction fetches always use the MMU bank and the
1987730d5c31SRichard Henderson              * transaction attribute determined by the fetch address,
1988730d5c31SRichard Henderson              * regardless of CPU state. This is painful for QEMU
1989730d5c31SRichard Henderson              * to handle, because it would mean we need to encode
1990730d5c31SRichard Henderson              * into the mmu_idx not just the (user, negpri) information
1991730d5c31SRichard Henderson              * for the current security state but also that for the
1992730d5c31SRichard Henderson              * other security state, which would balloon the number
1993730d5c31SRichard Henderson              * of mmu_idx values needed alarmingly.
1994730d5c31SRichard Henderson              * Fortunately we can avoid this because it's not actually
1995730d5c31SRichard Henderson              * possible to arbitrarily execute code from memory with
1996730d5c31SRichard Henderson              * the wrong security attribute: it will always generate
1997730d5c31SRichard Henderson              * an exception of some kind or another, apart from the
1998730d5c31SRichard Henderson              * special case of an NS CPU executing an SG instruction
1999730d5c31SRichard Henderson              * in S&NSC memory. So we always just fail the translation
2000730d5c31SRichard Henderson              * here and sort things out in the exception handler
2001730d5c31SRichard Henderson              * (including possibly emulating an SG instruction).
2002730d5c31SRichard Henderson              */
2003730d5c31SRichard Henderson             if (sattrs.ns != !secure) {
2004730d5c31SRichard Henderson                 if (sattrs.nsc) {
2005730d5c31SRichard Henderson                     fi->type = ARMFault_QEMU_NSCExec;
2006730d5c31SRichard Henderson                 } else {
2007730d5c31SRichard Henderson                     fi->type = ARMFault_QEMU_SFault;
2008730d5c31SRichard Henderson                 }
20097fa7ea8fSRichard Henderson                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
20107fa7ea8fSRichard Henderson                 result->f.phys_addr = address;
20117fa7ea8fSRichard Henderson                 result->f.prot = 0;
2012730d5c31SRichard Henderson                 return true;
2013730d5c31SRichard Henderson             }
2014730d5c31SRichard Henderson         } else {
2015730d5c31SRichard Henderson             /*
2016730d5c31SRichard Henderson              * For data accesses we always use the MMU bank indicated
2017730d5c31SRichard Henderson              * by the current CPU state, but the security attributes
2018730d5c31SRichard Henderson              * might downgrade a secure access to nonsecure.
2019730d5c31SRichard Henderson              */
2020730d5c31SRichard Henderson             if (sattrs.ns) {
20217fa7ea8fSRichard Henderson                 result->f.attrs.secure = false;
2022730d5c31SRichard Henderson             } else if (!secure) {
2023730d5c31SRichard Henderson                 /*
2024730d5c31SRichard Henderson                  * NS access to S memory must fault.
2025730d5c31SRichard Henderson                  * Architecturally we should first check whether the
2026730d5c31SRichard Henderson                  * MPU information for this address indicates that we
2027730d5c31SRichard Henderson                  * are doing an unaligned access to Device memory, which
2028730d5c31SRichard Henderson                  * should generate a UsageFault instead. QEMU does not
2029730d5c31SRichard Henderson                  * currently check for that kind of unaligned access though.
2030730d5c31SRichard Henderson                  * If we added it we would need to do so as a special case
2031730d5c31SRichard Henderson                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2032730d5c31SRichard Henderson                  */
2033730d5c31SRichard Henderson                 fi->type = ARMFault_QEMU_SFault;
20347fa7ea8fSRichard Henderson                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
20357fa7ea8fSRichard Henderson                 result->f.phys_addr = address;
20367fa7ea8fSRichard Henderson                 result->f.prot = 0;
2037730d5c31SRichard Henderson                 return true;
2038730d5c31SRichard Henderson             }
2039730d5c31SRichard Henderson         }
2040730d5c31SRichard Henderson     }
2041730d5c31SRichard Henderson 
2042e9fb7090SRichard Henderson     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2043652c750eSRichard Henderson                             result, fi, NULL);
2044652c750eSRichard Henderson     if (sattrs.subpage) {
20457fa7ea8fSRichard Henderson         result->f.lg_page_size = 0;
2046652c750eSRichard Henderson     }
2047730d5c31SRichard Henderson     return ret;
2048730d5c31SRichard Henderson }
2049730d5c31SRichard Henderson 
2050966f4bb7SRichard Henderson /*
2051966f4bb7SRichard Henderson  * Translate from the 4-bit stage 2 representation of
2052966f4bb7SRichard Henderson  * memory attributes (without cache-allocation hints) to
2053966f4bb7SRichard Henderson  * the 8-bit representation of the stage 1 MAIR registers
2054966f4bb7SRichard Henderson  * (which includes allocation hints).
2055966f4bb7SRichard Henderson  *
2056966f4bb7SRichard Henderson  * ref: shared/translation/attrs/S2AttrDecode()
2057966f4bb7SRichard Henderson  *      .../S2ConvertAttrsHints()
2058966f4bb7SRichard Henderson  */
2059ac76c2e5SRichard Henderson static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
2060966f4bb7SRichard Henderson {
2061966f4bb7SRichard Henderson     uint8_t hiattr = extract32(s2attrs, 2, 2);
2062966f4bb7SRichard Henderson     uint8_t loattr = extract32(s2attrs, 0, 2);
2063966f4bb7SRichard Henderson     uint8_t hihint = 0, lohint = 0;
2064966f4bb7SRichard Henderson 
2065966f4bb7SRichard Henderson     if (hiattr != 0) { /* normal memory */
2066ac76c2e5SRichard Henderson         if (hcr & HCR_CD) { /* cache disabled */
2067966f4bb7SRichard Henderson             hiattr = loattr = 1; /* non-cacheable */
2068966f4bb7SRichard Henderson         } else {
2069966f4bb7SRichard Henderson             if (hiattr != 1) { /* Write-through or write-back */
2070966f4bb7SRichard Henderson                 hihint = 3; /* RW allocate */
2071966f4bb7SRichard Henderson             }
2072966f4bb7SRichard Henderson             if (loattr != 1) { /* Write-through or write-back */
2073966f4bb7SRichard Henderson                 lohint = 3; /* RW allocate */
2074966f4bb7SRichard Henderson             }
2075966f4bb7SRichard Henderson         }
2076966f4bb7SRichard Henderson     }
2077966f4bb7SRichard Henderson 
2078966f4bb7SRichard Henderson     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
2079966f4bb7SRichard Henderson }
2080966f4bb7SRichard Henderson 
2081966f4bb7SRichard Henderson /*
2082966f4bb7SRichard Henderson  * Combine either inner or outer cacheability attributes for normal
2083966f4bb7SRichard Henderson  * memory, according to table D4-42 and pseudocode procedure
2084966f4bb7SRichard Henderson  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
2085966f4bb7SRichard Henderson  *
2086966f4bb7SRichard Henderson  * NB: only stage 1 includes allocation hints (RW bits), leading to
2087966f4bb7SRichard Henderson  * some asymmetry.
2088966f4bb7SRichard Henderson  */
2089966f4bb7SRichard Henderson static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
2090966f4bb7SRichard Henderson {
2091966f4bb7SRichard Henderson     if (s1 == 4 || s2 == 4) {
2092966f4bb7SRichard Henderson         /* non-cacheable has precedence */
2093966f4bb7SRichard Henderson         return 4;
2094966f4bb7SRichard Henderson     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
2095966f4bb7SRichard Henderson         /* stage 1 write-through takes precedence */
2096966f4bb7SRichard Henderson         return s1;
2097966f4bb7SRichard Henderson     } else if (extract32(s2, 2, 2) == 2) {
2098966f4bb7SRichard Henderson         /* stage 2 write-through takes precedence, but the allocation hint
2099966f4bb7SRichard Henderson          * is still taken from stage 1
2100966f4bb7SRichard Henderson          */
2101966f4bb7SRichard Henderson         return (2 << 2) | extract32(s1, 0, 2);
2102966f4bb7SRichard Henderson     } else { /* write-back */
2103966f4bb7SRichard Henderson         return s1;
2104966f4bb7SRichard Henderson     }
2105966f4bb7SRichard Henderson }
2106966f4bb7SRichard Henderson 
2107966f4bb7SRichard Henderson /*
2108966f4bb7SRichard Henderson  * Combine the memory type and cacheability attributes of
2109966f4bb7SRichard Henderson  * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
2110966f4bb7SRichard Henderson  * combined attributes in MAIR_EL1 format.
2111966f4bb7SRichard Henderson  */
2112ac76c2e5SRichard Henderson static uint8_t combined_attrs_nofwb(uint64_t hcr,
2113966f4bb7SRichard Henderson                                     ARMCacheAttrs s1, ARMCacheAttrs s2)
2114966f4bb7SRichard Henderson {
2115966f4bb7SRichard Henderson     uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
2116966f4bb7SRichard Henderson 
2117ac76c2e5SRichard Henderson     s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
2118966f4bb7SRichard Henderson 
2119966f4bb7SRichard Henderson     s1lo = extract32(s1.attrs, 0, 4);
2120966f4bb7SRichard Henderson     s2lo = extract32(s2_mair_attrs, 0, 4);
2121966f4bb7SRichard Henderson     s1hi = extract32(s1.attrs, 4, 4);
2122966f4bb7SRichard Henderson     s2hi = extract32(s2_mair_attrs, 4, 4);
2123966f4bb7SRichard Henderson 
2124966f4bb7SRichard Henderson     /* Combine memory type and cacheability attributes */
2125966f4bb7SRichard Henderson     if (s1hi == 0 || s2hi == 0) {
2126966f4bb7SRichard Henderson         /* Device has precedence over normal */
2127966f4bb7SRichard Henderson         if (s1lo == 0 || s2lo == 0) {
2128966f4bb7SRichard Henderson             /* nGnRnE has precedence over anything */
2129966f4bb7SRichard Henderson             ret_attrs = 0;
2130966f4bb7SRichard Henderson         } else if (s1lo == 4 || s2lo == 4) {
2131966f4bb7SRichard Henderson             /* non-Reordering has precedence over Reordering */
2132966f4bb7SRichard Henderson             ret_attrs = 4;  /* nGnRE */
2133966f4bb7SRichard Henderson         } else if (s1lo == 8 || s2lo == 8) {
2134966f4bb7SRichard Henderson             /* non-Gathering has precedence over Gathering */
2135966f4bb7SRichard Henderson             ret_attrs = 8;  /* nGRE */
2136966f4bb7SRichard Henderson         } else {
2137966f4bb7SRichard Henderson             ret_attrs = 0xc; /* GRE */
2138966f4bb7SRichard Henderson         }
2139966f4bb7SRichard Henderson     } else { /* Normal memory */
2140966f4bb7SRichard Henderson         /* Outer/inner cacheability combine independently */
2141966f4bb7SRichard Henderson         ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
2142966f4bb7SRichard Henderson                   | combine_cacheattr_nibble(s1lo, s2lo);
2143966f4bb7SRichard Henderson     }
2144966f4bb7SRichard Henderson     return ret_attrs;
2145966f4bb7SRichard Henderson }
2146966f4bb7SRichard Henderson 
2147966f4bb7SRichard Henderson static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
2148966f4bb7SRichard Henderson {
2149966f4bb7SRichard Henderson     /*
2150966f4bb7SRichard Henderson      * Given the 4 bits specifying the outer or inner cacheability
2151966f4bb7SRichard Henderson      * in MAIR format, return a value specifying Normal Write-Back,
2152966f4bb7SRichard Henderson      * with the allocation and transient hints taken from the input
2153966f4bb7SRichard Henderson      * if the input specified some kind of cacheable attribute.
2154966f4bb7SRichard Henderson      */
2155966f4bb7SRichard Henderson     if (attr == 0 || attr == 4) {
2156966f4bb7SRichard Henderson         /*
2157966f4bb7SRichard Henderson          * 0 == an UNPREDICTABLE encoding
2158966f4bb7SRichard Henderson          * 4 == Non-cacheable
2159966f4bb7SRichard Henderson          * Either way, force Write-Back RW allocate non-transient
2160966f4bb7SRichard Henderson          */
2161966f4bb7SRichard Henderson         return 0xf;
2162966f4bb7SRichard Henderson     }
2163966f4bb7SRichard Henderson     /* Change WriteThrough to WriteBack, keep allocation and transient hints */
2164966f4bb7SRichard Henderson     return attr | 4;
2165966f4bb7SRichard Henderson }
2166966f4bb7SRichard Henderson 
2167966f4bb7SRichard Henderson /*
2168966f4bb7SRichard Henderson  * Combine the memory type and cacheability attributes of
2169966f4bb7SRichard Henderson  * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
2170966f4bb7SRichard Henderson  * combined attributes in MAIR_EL1 format.
2171966f4bb7SRichard Henderson  */
217272cef09cSRichard Henderson static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
2173966f4bb7SRichard Henderson {
2174966f4bb7SRichard Henderson     switch (s2.attrs) {
2175966f4bb7SRichard Henderson     case 7:
2176966f4bb7SRichard Henderson         /* Use stage 1 attributes */
2177966f4bb7SRichard Henderson         return s1.attrs;
2178966f4bb7SRichard Henderson     case 6:
2179966f4bb7SRichard Henderson         /*
2180966f4bb7SRichard Henderson          * Force Normal Write-Back. Note that if S1 is Normal cacheable
2181966f4bb7SRichard Henderson          * then we take the allocation hints from it; otherwise it is
2182966f4bb7SRichard Henderson          * RW allocate, non-transient.
2183966f4bb7SRichard Henderson          */
2184966f4bb7SRichard Henderson         if ((s1.attrs & 0xf0) == 0) {
2185966f4bb7SRichard Henderson             /* S1 is Device */
2186966f4bb7SRichard Henderson             return 0xff;
2187966f4bb7SRichard Henderson         }
2188966f4bb7SRichard Henderson         /* Need to check the Inner and Outer nibbles separately */
2189966f4bb7SRichard Henderson         return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
2190966f4bb7SRichard Henderson             force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
2191966f4bb7SRichard Henderson     case 5:
2192966f4bb7SRichard Henderson         /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
2193966f4bb7SRichard Henderson         if ((s1.attrs & 0xf0) == 0) {
2194966f4bb7SRichard Henderson             return s1.attrs;
2195966f4bb7SRichard Henderson         }
2196966f4bb7SRichard Henderson         return 0x44;
2197966f4bb7SRichard Henderson     case 0 ... 3:
2198966f4bb7SRichard Henderson         /* Force Device, of subtype specified by S2 */
2199966f4bb7SRichard Henderson         return s2.attrs << 2;
2200966f4bb7SRichard Henderson     default:
2201966f4bb7SRichard Henderson         /*
2202966f4bb7SRichard Henderson          * RESERVED values (including RES0 descriptor bit [5] being nonzero);
2203966f4bb7SRichard Henderson          * arbitrarily force Device.
2204966f4bb7SRichard Henderson          */
2205966f4bb7SRichard Henderson         return 0;
2206966f4bb7SRichard Henderson     }
2207966f4bb7SRichard Henderson }
2208966f4bb7SRichard Henderson 
2209966f4bb7SRichard Henderson /*
2210966f4bb7SRichard Henderson  * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
2211966f4bb7SRichard Henderson  * and CombineS1S2Desc()
2212966f4bb7SRichard Henderson  *
2213966f4bb7SRichard Henderson  * @env:     CPUARMState
2214966f4bb7SRichard Henderson  * @s1:      Attributes from stage 1 walk
2215966f4bb7SRichard Henderson  * @s2:      Attributes from stage 2 walk
2216966f4bb7SRichard Henderson  */
2217ac76c2e5SRichard Henderson static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
2218966f4bb7SRichard Henderson                                         ARMCacheAttrs s1, ARMCacheAttrs s2)
2219966f4bb7SRichard Henderson {
2220966f4bb7SRichard Henderson     ARMCacheAttrs ret;
2221966f4bb7SRichard Henderson     bool tagged = false;
2222966f4bb7SRichard Henderson 
2223966f4bb7SRichard Henderson     assert(s2.is_s2_format && !s1.is_s2_format);
2224966f4bb7SRichard Henderson     ret.is_s2_format = false;
2225966f4bb7SRichard Henderson 
2226966f4bb7SRichard Henderson     if (s1.attrs == 0xf0) {
2227966f4bb7SRichard Henderson         tagged = true;
2228966f4bb7SRichard Henderson         s1.attrs = 0xff;
2229966f4bb7SRichard Henderson     }
2230966f4bb7SRichard Henderson 
2231966f4bb7SRichard Henderson     /* Combine shareability attributes (table D4-43) */
2232966f4bb7SRichard Henderson     if (s1.shareability == 2 || s2.shareability == 2) {
2233966f4bb7SRichard Henderson         /* if either are outer-shareable, the result is outer-shareable */
2234966f4bb7SRichard Henderson         ret.shareability = 2;
2235966f4bb7SRichard Henderson     } else if (s1.shareability == 3 || s2.shareability == 3) {
2236966f4bb7SRichard Henderson         /* if either are inner-shareable, the result is inner-shareable */
2237966f4bb7SRichard Henderson         ret.shareability = 3;
2238966f4bb7SRichard Henderson     } else {
2239966f4bb7SRichard Henderson         /* both non-shareable */
2240966f4bb7SRichard Henderson         ret.shareability = 0;
2241966f4bb7SRichard Henderson     }
2242966f4bb7SRichard Henderson 
2243966f4bb7SRichard Henderson     /* Combine memory type and cacheability attributes */
2244ac76c2e5SRichard Henderson     if (hcr & HCR_FWB) {
224572cef09cSRichard Henderson         ret.attrs = combined_attrs_fwb(s1, s2);
2246966f4bb7SRichard Henderson     } else {
2247ac76c2e5SRichard Henderson         ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
2248966f4bb7SRichard Henderson     }
2249966f4bb7SRichard Henderson 
2250966f4bb7SRichard Henderson     /*
2251966f4bb7SRichard Henderson      * Any location for which the resultant memory type is any
2252966f4bb7SRichard Henderson      * type of Device memory is always treated as Outer Shareable.
2253966f4bb7SRichard Henderson      * Any location for which the resultant memory type is Normal
2254966f4bb7SRichard Henderson      * Inner Non-cacheable, Outer Non-cacheable is always treated
2255966f4bb7SRichard Henderson      * as Outer Shareable.
2256966f4bb7SRichard Henderson      * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
2257966f4bb7SRichard Henderson      */
2258966f4bb7SRichard Henderson     if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
2259966f4bb7SRichard Henderson         ret.shareability = 2;
2260966f4bb7SRichard Henderson     }
2261966f4bb7SRichard Henderson 
2262966f4bb7SRichard Henderson     /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
2263966f4bb7SRichard Henderson     if (tagged && ret.attrs == 0xff) {
2264966f4bb7SRichard Henderson         ret.attrs = 0xf0;
2265966f4bb7SRichard Henderson     }
2266966f4bb7SRichard Henderson 
2267966f4bb7SRichard Henderson     return ret;
2268966f4bb7SRichard Henderson }
2269966f4bb7SRichard Henderson 
2270448e42fdSRichard Henderson /*
2271448e42fdSRichard Henderson  * MMU disabled.  S1 addresses within aa64 translation regimes are
2272448e42fdSRichard Henderson  * still checked for bounds -- see AArch64.S1DisabledOutput().
2273448e42fdSRichard Henderson  */
2274448e42fdSRichard Henderson static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
2275448e42fdSRichard Henderson                                    MMUAccessType access_type,
2276448e42fdSRichard Henderson                                    ARMMMUIdx mmu_idx, bool is_secure,
2277448e42fdSRichard Henderson                                    GetPhysAddrResult *result,
2278448e42fdSRichard Henderson                                    ARMMMUFaultInfo *fi)
2279448e42fdSRichard Henderson {
22805b74f9b4SRichard Henderson     uint8_t memattr = 0x00;    /* Device nGnRnE */
22815b74f9b4SRichard Henderson     uint8_t shareability = 0;  /* non-sharable */
2282448e42fdSRichard Henderson 
2283448e42fdSRichard Henderson     if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
2284448e42fdSRichard Henderson         int r_el = regime_el(env, mmu_idx);
22855b74f9b4SRichard Henderson 
2286448e42fdSRichard Henderson         if (arm_el_is_aa64(env, r_el)) {
2287448e42fdSRichard Henderson             int pamax = arm_pamax(env_archcpu(env));
2288448e42fdSRichard Henderson             uint64_t tcr = env->cp15.tcr_el[r_el];
2289448e42fdSRichard Henderson             int addrtop, tbi;
2290448e42fdSRichard Henderson 
2291448e42fdSRichard Henderson             tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
2292448e42fdSRichard Henderson             if (access_type == MMU_INST_FETCH) {
2293448e42fdSRichard Henderson                 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
2294448e42fdSRichard Henderson             }
2295448e42fdSRichard Henderson             tbi = (tbi >> extract64(address, 55, 1)) & 1;
2296448e42fdSRichard Henderson             addrtop = (tbi ? 55 : 63);
2297448e42fdSRichard Henderson 
2298448e42fdSRichard Henderson             if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
2299448e42fdSRichard Henderson                 fi->type = ARMFault_AddressSize;
2300448e42fdSRichard Henderson                 fi->level = 0;
2301448e42fdSRichard Henderson                 fi->stage2 = false;
2302448e42fdSRichard Henderson                 return 1;
2303448e42fdSRichard Henderson             }
2304448e42fdSRichard Henderson 
2305448e42fdSRichard Henderson             /*
2306448e42fdSRichard Henderson              * When TBI is disabled, we've just validated that all of the
2307448e42fdSRichard Henderson              * bits above PAMax are zero, so logically we only need to
2308448e42fdSRichard Henderson              * clear the top byte for TBI.  But it's clearer to follow
2309448e42fdSRichard Henderson              * the pseudocode set of addrdesc.paddress.
2310448e42fdSRichard Henderson              */
2311448e42fdSRichard Henderson             address = extract64(address, 0, 52);
2312448e42fdSRichard Henderson         }
2313448e42fdSRichard Henderson 
2314448e42fdSRichard Henderson         /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
23155b74f9b4SRichard Henderson         if (r_el == 1) {
23165b74f9b4SRichard Henderson             uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2317448e42fdSRichard Henderson             if (hcr & HCR_DC) {
2318448e42fdSRichard Henderson                 if (hcr & HCR_DCT) {
2319448e42fdSRichard Henderson                     memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
2320448e42fdSRichard Henderson                 } else {
2321448e42fdSRichard Henderson                     memattr = 0xff;  /* Normal, WB, RWA */
2322448e42fdSRichard Henderson                 }
23235b74f9b4SRichard Henderson             }
23245b74f9b4SRichard Henderson         }
23255b74f9b4SRichard Henderson         if (memattr == 0 && access_type == MMU_INST_FETCH) {
2326448e42fdSRichard Henderson             if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
2327448e42fdSRichard Henderson                 memattr = 0xee;  /* Normal, WT, RA, NT */
2328448e42fdSRichard Henderson             } else {
2329448e42fdSRichard Henderson                 memattr = 0x44;  /* Normal, NC, No */
2330448e42fdSRichard Henderson             }
23315b74f9b4SRichard Henderson             shareability = 2; /* outer sharable */
2332448e42fdSRichard Henderson         }
23335b74f9b4SRichard Henderson         result->cacheattrs.is_s2_format = false;
23345b74f9b4SRichard Henderson     }
23355b74f9b4SRichard Henderson 
23367fa7ea8fSRichard Henderson     result->f.phys_addr = address;
23377fa7ea8fSRichard Henderson     result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
23387fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
23395b74f9b4SRichard Henderson     result->cacheattrs.shareability = shareability;
2340448e42fdSRichard Henderson     result->cacheattrs.attrs = memattr;
2341448e42fdSRichard Henderson     return 0;
2342448e42fdSRichard Henderson }
2343448e42fdSRichard Henderson 
2344def8aa5bSRichard Henderson bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
23458ae08860SRichard Henderson                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
2346def8aa5bSRichard Henderson                                bool is_secure, GetPhysAddrResult *result,
2347def8aa5bSRichard Henderson                                ARMMMUFaultInfo *fi)
23488ae08860SRichard Henderson {
23498ae08860SRichard Henderson     ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
23508ae08860SRichard Henderson 
23518ae08860SRichard Henderson     if (mmu_idx != s1_mmu_idx) {
23528ae08860SRichard Henderson         /*
23538ae08860SRichard Henderson          * Call ourselves recursively to do the stage 1 and then stage 2
23548ae08860SRichard Henderson          * translations if mmu_idx is a two-stage regime.
23558ae08860SRichard Henderson          */
23568ae08860SRichard Henderson         if (arm_feature(env, ARM_FEATURE_EL2)) {
23578ae08860SRichard Henderson             hwaddr ipa;
2358de05a709SRichard Henderson             int s1_prot;
23598ae08860SRichard Henderson             int ret;
2360c7637be3SRichard Henderson             bool ipa_secure, s2walk_secure;
2361de05a709SRichard Henderson             ARMCacheAttrs cacheattrs1;
23628ae08860SRichard Henderson             ARMMMUIdx s2_mmu_idx;
23638ae08860SRichard Henderson             bool is_el0;
2364ac76c2e5SRichard Henderson             uint64_t hcr;
23658ae08860SRichard Henderson 
2366def8aa5bSRichard Henderson             ret = get_phys_addr_with_secure(env, address, access_type,
2367def8aa5bSRichard Henderson                                             s1_mmu_idx, is_secure, result, fi);
23688ae08860SRichard Henderson 
23698ae08860SRichard Henderson             /* If S1 fails or S2 is disabled, return early.  */
23707e80c0a4SRichard Henderson             if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2,
23717e80c0a4SRichard Henderson                                                    is_secure)) {
23728ae08860SRichard Henderson                 return ret;
23738ae08860SRichard Henderson             }
23748ae08860SRichard Henderson 
23757fa7ea8fSRichard Henderson             ipa = result->f.phys_addr;
23767fa7ea8fSRichard Henderson             ipa_secure = result->f.attrs.secure;
2377c7637be3SRichard Henderson             if (is_secure) {
2378c7637be3SRichard Henderson                 /* Select TCR based on the NS bit from the S1 walk. */
2379c7637be3SRichard Henderson                 s2walk_secure = !(ipa_secure
2380c7637be3SRichard Henderson                                   ? env->cp15.vstcr_el2 & VSTCR_SW
2381c7637be3SRichard Henderson                                   : env->cp15.vtcr_el2 & VTCR_NSW);
23828ae08860SRichard Henderson             } else {
23838ae08860SRichard Henderson                 assert(!ipa_secure);
2384c7637be3SRichard Henderson                 s2walk_secure = false;
23858ae08860SRichard Henderson             }
23868ae08860SRichard Henderson 
2387c7637be3SRichard Henderson             s2_mmu_idx = (s2walk_secure
2388de05a709SRichard Henderson                           ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
2389d902ae75SRichard Henderson             is_el0 = mmu_idx == ARMMMUIdx_E10_0;
23908ae08860SRichard Henderson 
2391de05a709SRichard Henderson             /*
2392de05a709SRichard Henderson              * S1 is done, now do S2 translation.
2393de05a709SRichard Henderson              * Save the stage1 results so that we may merge
2394de05a709SRichard Henderson              * prot and cacheattrs later.
2395de05a709SRichard Henderson              */
23967fa7ea8fSRichard Henderson             s1_prot = result->f.prot;
2397de05a709SRichard Henderson             cacheattrs1 = result->cacheattrs;
2398de05a709SRichard Henderson             memset(result, 0, sizeof(*result));
2399de05a709SRichard Henderson 
240003ee9bbeSRichard Henderson             ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
2401c23f08a5SRichard Henderson                                      s2walk_secure, is_el0, result, fi);
24028ae08860SRichard Henderson             fi->s2addr = ipa;
2403de05a709SRichard Henderson 
24048ae08860SRichard Henderson             /* Combine the S1 and S2 perms.  */
24057fa7ea8fSRichard Henderson             result->f.prot &= s1_prot;
24068ae08860SRichard Henderson 
24078ae08860SRichard Henderson             /* If S2 fails, return early.  */
24088ae08860SRichard Henderson             if (ret) {
24098ae08860SRichard Henderson                 return ret;
24108ae08860SRichard Henderson             }
24118ae08860SRichard Henderson 
24128ae08860SRichard Henderson             /* Combine the S1 and S2 cache attributes. */
24132189c798SRichard Henderson             hcr = arm_hcr_el2_eff_secstate(env, is_secure);
2414ac76c2e5SRichard Henderson             if (hcr & HCR_DC) {
24158ae08860SRichard Henderson                 /*
24168ae08860SRichard Henderson                  * HCR.DC forces the first stage attributes to
24178ae08860SRichard Henderson                  *  Normal Non-Shareable,
24188ae08860SRichard Henderson                  *  Inner Write-Back Read-Allocate Write-Allocate,
24198ae08860SRichard Henderson                  *  Outer Write-Back Read-Allocate Write-Allocate.
24208ae08860SRichard Henderson                  * Do not overwrite Tagged within attrs.
24218ae08860SRichard Henderson                  */
2422de05a709SRichard Henderson                 if (cacheattrs1.attrs != 0xf0) {
2423de05a709SRichard Henderson                     cacheattrs1.attrs = 0xff;
24248ae08860SRichard Henderson                 }
2425de05a709SRichard Henderson                 cacheattrs1.shareability = 0;
24268ae08860SRichard Henderson             }
2427ac76c2e5SRichard Henderson             result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
2428de05a709SRichard Henderson                                                     result->cacheattrs);
24298ae08860SRichard Henderson 
24309b5ba97aSRichard Henderson             /*
24319b5ba97aSRichard Henderson              * Check if IPA translates to secure or non-secure PA space.
24329b5ba97aSRichard Henderson              * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
24339b5ba97aSRichard Henderson              */
24347fa7ea8fSRichard Henderson             result->f.attrs.secure =
24359b5ba97aSRichard Henderson                 (is_secure
24369b5ba97aSRichard Henderson                  && !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
24379b5ba97aSRichard Henderson                  && (ipa_secure
24389b5ba97aSRichard Henderson                      || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))));
24399b5ba97aSRichard Henderson 
24408ae08860SRichard Henderson             return 0;
24418ae08860SRichard Henderson         } else {
24428ae08860SRichard Henderson             /*
24438ae08860SRichard Henderson              * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
24448ae08860SRichard Henderson              */
24458ae08860SRichard Henderson             mmu_idx = stage_1_mmu_idx(mmu_idx);
24468ae08860SRichard Henderson         }
24478ae08860SRichard Henderson     }
24488ae08860SRichard Henderson 
24498ae08860SRichard Henderson     /*
24508ae08860SRichard Henderson      * The page table entries may downgrade secure to non-secure, but
24518ae08860SRichard Henderson      * cannot upgrade an non-secure translation regime's attributes
24528ae08860SRichard Henderson      * to secure.
24538ae08860SRichard Henderson      */
24547fa7ea8fSRichard Henderson     result->f.attrs.secure = is_secure;
24557fa7ea8fSRichard Henderson     result->f.attrs.user = regime_is_user(env, mmu_idx);
24568ae08860SRichard Henderson 
24578ae08860SRichard Henderson     /*
24588ae08860SRichard Henderson      * Fast Context Switch Extension. This doesn't exist at all in v8.
24598ae08860SRichard Henderson      * In v7 and earlier it affects all stage 1 translations.
24608ae08860SRichard Henderson      */
24618ae08860SRichard Henderson     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
24628ae08860SRichard Henderson         && !arm_feature(env, ARM_FEATURE_V8)) {
24638ae08860SRichard Henderson         if (regime_el(env, mmu_idx) == 3) {
24648ae08860SRichard Henderson             address += env->cp15.fcseidr_s;
24658ae08860SRichard Henderson         } else {
24668ae08860SRichard Henderson             address += env->cp15.fcseidr_ns;
24678ae08860SRichard Henderson         }
24688ae08860SRichard Henderson     }
24698ae08860SRichard Henderson 
24708ae08860SRichard Henderson     if (arm_feature(env, ARM_FEATURE_PMSA)) {
24718ae08860SRichard Henderson         bool ret;
24727fa7ea8fSRichard Henderson         result->f.lg_page_size = TARGET_PAGE_BITS;
24738ae08860SRichard Henderson 
24748ae08860SRichard Henderson         if (arm_feature(env, ARM_FEATURE_V8)) {
24758ae08860SRichard Henderson             /* PMSAv8 */
24768ae08860SRichard Henderson             ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
2477be0ca948SRichard Henderson                                        is_secure, result, fi);
24788ae08860SRichard Henderson         } else if (arm_feature(env, ARM_FEATURE_V7)) {
24798ae08860SRichard Henderson             /* PMSAv7 */
24808ae08860SRichard Henderson             ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
2481957a0bb7SRichard Henderson                                        is_secure, result, fi);
24828ae08860SRichard Henderson         } else {
24838ae08860SRichard Henderson             /* Pre-v7 MPU */
24848ae08860SRichard Henderson             ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
2485a5b5092fSRichard Henderson                                        is_secure, result, fi);
24868ae08860SRichard Henderson         }
24878ae08860SRichard Henderson         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
24888ae08860SRichard Henderson                       " mmu_idx %u -> %s (prot %c%c%c)\n",
24898ae08860SRichard Henderson                       access_type == MMU_DATA_LOAD ? "reading" :
24908ae08860SRichard Henderson                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
24918ae08860SRichard Henderson                       (uint32_t)address, mmu_idx,
24928ae08860SRichard Henderson                       ret ? "Miss" : "Hit",
24937fa7ea8fSRichard Henderson                       result->f.prot & PAGE_READ ? 'r' : '-',
24947fa7ea8fSRichard Henderson                       result->f.prot & PAGE_WRITE ? 'w' : '-',
24957fa7ea8fSRichard Henderson                       result->f.prot & PAGE_EXEC ? 'x' : '-');
24968ae08860SRichard Henderson 
24978ae08860SRichard Henderson         return ret;
24988ae08860SRichard Henderson     }
24998ae08860SRichard Henderson 
25008ae08860SRichard Henderson     /* Definitely a real MMU, not an MPU */
25018ae08860SRichard Henderson 
25027e80c0a4SRichard Henderson     if (regime_translation_disabled(env, mmu_idx, is_secure)) {
2503448e42fdSRichard Henderson         return get_phys_addr_disabled(env, address, access_type, mmu_idx,
2504448e42fdSRichard Henderson                                       is_secure, result, fi);
25058ae08860SRichard Henderson     }
25068ae08860SRichard Henderson     if (regime_using_lpae_format(env, mmu_idx)) {
2507c23f08a5SRichard Henderson         return get_phys_addr_lpae(env, address, access_type, mmu_idx,
2508c23f08a5SRichard Henderson                                   is_secure, false, result, fi);
25098ae08860SRichard Henderson     } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
25108ae08860SRichard Henderson         return get_phys_addr_v6(env, address, access_type, mmu_idx,
251171e73bebSRichard Henderson                                 is_secure, result, fi);
25128ae08860SRichard Henderson     } else {
25138ae08860SRichard Henderson         return get_phys_addr_v5(env, address, access_type, mmu_idx,
2514b29c85d5SRichard Henderson                                 is_secure, result, fi);
25158ae08860SRichard Henderson     }
25168ae08860SRichard Henderson }
251723971205SRichard Henderson 
2518def8aa5bSRichard Henderson bool get_phys_addr(CPUARMState *env, target_ulong address,
2519def8aa5bSRichard Henderson                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
2520def8aa5bSRichard Henderson                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
2521def8aa5bSRichard Henderson {
252203bea66eSRichard Henderson     bool is_secure;
252303bea66eSRichard Henderson 
252403bea66eSRichard Henderson     switch (mmu_idx) {
252503bea66eSRichard Henderson     case ARMMMUIdx_E10_0:
252603bea66eSRichard Henderson     case ARMMMUIdx_E10_1:
252703bea66eSRichard Henderson     case ARMMMUIdx_E10_1_PAN:
252803bea66eSRichard Henderson     case ARMMMUIdx_E20_0:
252903bea66eSRichard Henderson     case ARMMMUIdx_E20_2:
253003bea66eSRichard Henderson     case ARMMMUIdx_E20_2_PAN:
253103bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E0:
253203bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E1:
253303bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
253403bea66eSRichard Henderson     case ARMMMUIdx_E2:
2535d902ae75SRichard Henderson         is_secure = arm_is_secure_below_el3(env);
2536d902ae75SRichard Henderson         break;
253703bea66eSRichard Henderson     case ARMMMUIdx_Stage2:
253803bea66eSRichard Henderson     case ARMMMUIdx_MPrivNegPri:
253903bea66eSRichard Henderson     case ARMMMUIdx_MUserNegPri:
254003bea66eSRichard Henderson     case ARMMMUIdx_MPriv:
254103bea66eSRichard Henderson     case ARMMMUIdx_MUser:
254203bea66eSRichard Henderson         is_secure = false;
254303bea66eSRichard Henderson         break;
2544d902ae75SRichard Henderson     case ARMMMUIdx_E3:
254503bea66eSRichard Henderson     case ARMMMUIdx_Stage2_S:
254603bea66eSRichard Henderson     case ARMMMUIdx_MSPrivNegPri:
254703bea66eSRichard Henderson     case ARMMMUIdx_MSUserNegPri:
254803bea66eSRichard Henderson     case ARMMMUIdx_MSPriv:
254903bea66eSRichard Henderson     case ARMMMUIdx_MSUser:
255003bea66eSRichard Henderson         is_secure = true;
255103bea66eSRichard Henderson         break;
255203bea66eSRichard Henderson     default:
255303bea66eSRichard Henderson         g_assert_not_reached();
255403bea66eSRichard Henderson     }
2555def8aa5bSRichard Henderson     return get_phys_addr_with_secure(env, address, access_type, mmu_idx,
255603bea66eSRichard Henderson                                      is_secure, result, fi);
2557def8aa5bSRichard Henderson }
2558def8aa5bSRichard Henderson 
255923971205SRichard Henderson hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
256023971205SRichard Henderson                                          MemTxAttrs *attrs)
256123971205SRichard Henderson {
256223971205SRichard Henderson     ARMCPU *cpu = ARM_CPU(cs);
256323971205SRichard Henderson     CPUARMState *env = &cpu->env;
2564de05a709SRichard Henderson     GetPhysAddrResult res = {};
256523971205SRichard Henderson     ARMMMUFaultInfo fi = {};
256623971205SRichard Henderson     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
2567de05a709SRichard Henderson     bool ret;
256823971205SRichard Henderson 
2569de05a709SRichard Henderson     ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi);
25707fa7ea8fSRichard Henderson     *attrs = res.f.attrs;
257123971205SRichard Henderson 
257223971205SRichard Henderson     if (ret) {
257323971205SRichard Henderson         return -1;
257423971205SRichard Henderson     }
25757fa7ea8fSRichard Henderson     return res.f.phys_addr;
257623971205SRichard Henderson }
2577