xref: /openbmc/qemu/target/arm/ptw.c (revision 67d762e716a7127ecc114e9708254316dd521911)
18ae08860SRichard Henderson /*
28ae08860SRichard Henderson  * ARM page table walking.
38ae08860SRichard Henderson  *
48ae08860SRichard Henderson  * This code is licensed under the GNU GPL v2 or later.
58ae08860SRichard Henderson  *
68ae08860SRichard Henderson  * SPDX-License-Identifier: GPL-2.0-or-later
78ae08860SRichard Henderson  */
88ae08860SRichard Henderson 
98ae08860SRichard Henderson #include "qemu/osdep.h"
108ae08860SRichard Henderson #include "qemu/log.h"
111f2e87e5SRichard Henderson #include "qemu/range.h"
1271943a1eSRichard Henderson #include "qemu/main-loop.h"
13f3639a64SRichard Henderson #include "exec/exec-all.h"
1474781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
158ae08860SRichard Henderson #include "cpu.h"
168ae08860SRichard Henderson #include "internals.h"
175a534314SPeter Maydell #include "cpu-features.h"
182c1f429dSRichard Henderson #include "idau.h"
19007cd176SRichard Henderson #ifdef CONFIG_TCG
2070f168f8SRichard Henderson # include "tcg/oversized-guest.h"
21007cd176SRichard Henderson #endif
228ae08860SRichard Henderson 
236d2654ffSRichard Henderson typedef struct S1Translate {
2434eed551SPeter Maydell     /*
2534eed551SPeter Maydell      * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
2634eed551SPeter Maydell      * Together with in_space, specifies the architectural translation regime.
2734eed551SPeter Maydell      */
286d2654ffSRichard Henderson     ARMMMUIdx in_mmu_idx;
2934eed551SPeter Maydell     /*
3034eed551SPeter Maydell      * in_ptw_idx: specifies which mmuidx to use for the actual
3134eed551SPeter Maydell      * page table descriptor load operations. This will be one of the
3234eed551SPeter Maydell      * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
3334eed551SPeter Maydell      * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
3434eed551SPeter Maydell      * this field is updated accordingly.
3534eed551SPeter Maydell      */
3648da29e4SRichard Henderson     ARMMMUIdx in_ptw_idx;
3734eed551SPeter Maydell     /*
3834eed551SPeter Maydell      * in_space: the security space for this walk. This plus
3934eed551SPeter Maydell      * the in_mmu_idx specify the architectural translation regime.
4034eed551SPeter Maydell      * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
4134eed551SPeter Maydell      * this field is updated accordingly.
4234eed551SPeter Maydell      *
4334eed551SPeter Maydell      * Note that the security space for the in_ptw_idx may be different
4434eed551SPeter Maydell      * from that for the in_mmu_idx. We do not need to explicitly track
4534eed551SPeter Maydell      * the in_ptw_idx security space because:
4634eed551SPeter Maydell      *  - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
4734eed551SPeter Maydell      *    itself specifies the security space
4834eed551SPeter Maydell      *  - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
4934eed551SPeter Maydell      *    space used for ptw reads is the same as that of the security
5034eed551SPeter Maydell      *    space of the stage 1 translation for all cases except where
5134eed551SPeter Maydell      *    stage 1 is Secure; in that case the only possibilities for
5234eed551SPeter Maydell      *    the ptw read are Secure and NonSecure, and the in_ptw_idx
5334eed551SPeter Maydell      *    value being Stage2 vs Stage2_S distinguishes those.
5434eed551SPeter Maydell      */
5590c66293SRichard Henderson     ARMSecuritySpace in_space;
5634eed551SPeter Maydell     /*
5734eed551SPeter Maydell      * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
5834eed551SPeter Maydell      * accesses will not update the guest page table access flags
5934eed551SPeter Maydell      * and will not change the state of the softmmu TLBs.
6034eed551SPeter Maydell      */
614a358556SRichard Henderson     bool in_debug;
627c19b2d6SRichard Henderson     /*
637c19b2d6SRichard Henderson      * If this is stage 2 of a stage 1+2 page table walk, then this must
647c19b2d6SRichard Henderson      * be true if stage 1 is an EL0 access; otherwise this is ignored.
657c19b2d6SRichard Henderson      * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
667c19b2d6SRichard Henderson      */
677c19b2d6SRichard Henderson     bool in_s1_is_el0;
6871943a1eSRichard Henderson     bool out_rw;
694e7a2c98SRichard Henderson     bool out_be;
7090c66293SRichard Henderson     ARMSecuritySpace out_space;
7171943a1eSRichard Henderson     hwaddr out_virt;
726d2654ffSRichard Henderson     hwaddr out_phys;
73f3639a64SRichard Henderson     void *out_host;
746d2654ffSRichard Henderson } S1Translate;
756d2654ffSRichard Henderson 
7646f38c97SRichard Henderson static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
77*67d762e7SArd Biesheuvel                                 vaddr address,
7846f38c97SRichard Henderson                                 MMUAccessType access_type,
7946f38c97SRichard Henderson                                 GetPhysAddrResult *result,
8046f38c97SRichard Henderson                                 ARMMMUFaultInfo *fi);
8146f38c97SRichard Henderson 
8246f38c97SRichard Henderson static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
83*67d762e7SArd Biesheuvel                               vaddr address,
843f5a74c5SRichard Henderson                               MMUAccessType access_type,
853f5a74c5SRichard Henderson                               GetPhysAddrResult *result,
8686a438b4SRichard Henderson                               ARMMMUFaultInfo *fi);
873f5a74c5SRichard Henderson 
881c73d848SRichard Henderson /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
891c73d848SRichard Henderson static const uint8_t pamax_map[] = {
901c73d848SRichard Henderson     [0] = 32,
911c73d848SRichard Henderson     [1] = 36,
921c73d848SRichard Henderson     [2] = 40,
931c73d848SRichard Henderson     [3] = 42,
941c73d848SRichard Henderson     [4] = 44,
951c73d848SRichard Henderson     [5] = 48,
961c73d848SRichard Henderson     [6] = 52,
971c73d848SRichard Henderson };
981c73d848SRichard Henderson 
99d54ffa54SDanny Canter uint8_t round_down_to_parange_index(uint8_t bit_size)
100d54ffa54SDanny Canter {
101d54ffa54SDanny Canter     for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) {
102d54ffa54SDanny Canter         if (pamax_map[i] <= bit_size) {
103d54ffa54SDanny Canter             return i;
104d54ffa54SDanny Canter         }
105d54ffa54SDanny Canter     }
106d54ffa54SDanny Canter     g_assert_not_reached();
107d54ffa54SDanny Canter }
108d54ffa54SDanny Canter 
109d54ffa54SDanny Canter uint8_t round_down_to_parange_bit_size(uint8_t bit_size)
110d54ffa54SDanny Canter {
111d54ffa54SDanny Canter     return pamax_map[round_down_to_parange_index(bit_size)];
112d54ffa54SDanny Canter }
113d54ffa54SDanny Canter 
11471e269fbSPeter Maydell /*
11571e269fbSPeter Maydell  * The cpu-specific constant value of PAMax; also used by hw/arm/virt.
11671e269fbSPeter Maydell  * Note that machvirt_init calls this on a CPU that is inited but not realized!
11771e269fbSPeter Maydell  */
1181c73d848SRichard Henderson unsigned int arm_pamax(ARMCPU *cpu)
1191c73d848SRichard Henderson {
12022536b13SRichard Henderson     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1211c73d848SRichard Henderson         unsigned int parange =
1221c73d848SRichard Henderson             FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1231c73d848SRichard Henderson 
1241c73d848SRichard Henderson         /*
1251c73d848SRichard Henderson          * id_aa64mmfr0 is a read-only register so values outside of the
1261c73d848SRichard Henderson          * supported mappings can be considered an implementation error.
1271c73d848SRichard Henderson          */
1281c73d848SRichard Henderson         assert(parange < ARRAY_SIZE(pamax_map));
1291c73d848SRichard Henderson         return pamax_map[parange];
1301c73d848SRichard Henderson     }
13159e1b8a2SRichard Henderson 
13271e269fbSPeter Maydell     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
13371e269fbSPeter Maydell         /* v7 or v8 with LPAE */
13422536b13SRichard Henderson         return 40;
13522536b13SRichard Henderson     }
13622536b13SRichard Henderson     /* Anything else */
13722536b13SRichard Henderson     return 32;
13822536b13SRichard Henderson }
1391c73d848SRichard Henderson 
1401d261255SRichard Henderson /*
1411d261255SRichard Henderson  * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
1421d261255SRichard Henderson  */
1431d261255SRichard Henderson ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1441d261255SRichard Henderson {
1451d261255SRichard Henderson     switch (mmu_idx) {
1461d261255SRichard Henderson     case ARMMMUIdx_E10_0:
1471d261255SRichard Henderson         return ARMMMUIdx_Stage1_E0;
1481d261255SRichard Henderson     case ARMMMUIdx_E10_1:
1491d261255SRichard Henderson         return ARMMMUIdx_Stage1_E1;
1501d261255SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
1511d261255SRichard Henderson         return ARMMMUIdx_Stage1_E1_PAN;
1521d261255SRichard Henderson     default:
1531d261255SRichard Henderson         return mmu_idx;
1541d261255SRichard Henderson     }
1551d261255SRichard Henderson }
1561d261255SRichard Henderson 
1571d261255SRichard Henderson ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1581d261255SRichard Henderson {
1591d261255SRichard Henderson     return stage_1_mmu_idx(arm_mmu_idx(env));
1601d261255SRichard Henderson }
1611d261255SRichard Henderson 
162fcc0b041SPeter Maydell /*
163fcc0b041SPeter Maydell  * Return where we should do ptw loads from for a stage 2 walk.
164fcc0b041SPeter Maydell  * This depends on whether the address we are looking up is a
165fcc0b041SPeter Maydell  * Secure IPA or a NonSecure IPA, which we know from whether this is
166fcc0b041SPeter Maydell  * Stage2 or Stage2_S.
167fcc0b041SPeter Maydell  * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
168fcc0b041SPeter Maydell  */
169fcc0b041SPeter Maydell static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
170fcc0b041SPeter Maydell {
171fcc0b041SPeter Maydell     bool s2walk_secure;
172fcc0b041SPeter Maydell 
173fcc0b041SPeter Maydell     /*
174fcc0b041SPeter Maydell      * We're OK to check the current state of the CPU here because
175da64251eSJean-Philippe Brucker      * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit
176da64251eSJean-Philippe Brucker      * changes.
177fcc0b041SPeter Maydell      * (2) there's no way to do a lookup that cares about Stage 2 for a
178fcc0b041SPeter Maydell      * different security state to the current one for AArch64, and AArch32
179fcc0b041SPeter Maydell      * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
180fcc0b041SPeter Maydell      * an NS stage 1+2 lookup while the NS bit is 0.)
181fcc0b041SPeter Maydell      */
182da64251eSJean-Philippe Brucker     if (!arm_el_is_aa64(env, 3)) {
183fcc0b041SPeter Maydell         return ARMMMUIdx_Phys_NS;
184fcc0b041SPeter Maydell     }
185da64251eSJean-Philippe Brucker 
186da64251eSJean-Philippe Brucker     switch (arm_security_space_below_el3(env)) {
187da64251eSJean-Philippe Brucker     case ARMSS_NonSecure:
188da64251eSJean-Philippe Brucker         return ARMMMUIdx_Phys_NS;
189da64251eSJean-Philippe Brucker     case ARMSS_Realm:
190da64251eSJean-Philippe Brucker         return ARMMMUIdx_Phys_Realm;
191da64251eSJean-Philippe Brucker     case ARMSS_Secure:
192fcc0b041SPeter Maydell         if (stage2idx == ARMMMUIdx_Stage2_S) {
193fcc0b041SPeter Maydell             s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
194fcc0b041SPeter Maydell         } else {
195fcc0b041SPeter Maydell             s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
196fcc0b041SPeter Maydell         }
197fcc0b041SPeter Maydell         return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
198da64251eSJean-Philippe Brucker     default:
199da64251eSJean-Philippe Brucker         g_assert_not_reached();
200da64251eSJean-Philippe Brucker     }
201fcc0b041SPeter Maydell }
202fcc0b041SPeter Maydell 
20311552bb0SRichard Henderson static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
20411552bb0SRichard Henderson {
20511552bb0SRichard Henderson     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
20611552bb0SRichard Henderson }
20711552bb0SRichard Henderson 
2083b318aaeSRichard Henderson /* Return the TTBR associated with this translation regime */
2093b318aaeSRichard Henderson static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
2103b318aaeSRichard Henderson {
2113b318aaeSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2) {
2123b318aaeSRichard Henderson         return env->cp15.vttbr_el2;
2133b318aaeSRichard Henderson     }
2143b318aaeSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2_S) {
2153b318aaeSRichard Henderson         return env->cp15.vsttbr_el2;
2163b318aaeSRichard Henderson     }
2173b318aaeSRichard Henderson     if (ttbrn == 0) {
2183b318aaeSRichard Henderson         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
2193b318aaeSRichard Henderson     } else {
2203b318aaeSRichard Henderson         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
2213b318aaeSRichard Henderson     }
2223b318aaeSRichard Henderson }
2233b318aaeSRichard Henderson 
2248db1a3a0SRichard Henderson /* Return true if the specified stage of address translation is disabled */
2257e80c0a4SRichard Henderson static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
226d1289140SPeter Maydell                                         ARMSecuritySpace space)
2278db1a3a0SRichard Henderson {
2288db1a3a0SRichard Henderson     uint64_t hcr_el2;
2298db1a3a0SRichard Henderson 
2308db1a3a0SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M)) {
2312d12bb96SPeter Maydell         bool is_secure = arm_space_is_secure(space);
2327e80c0a4SRichard Henderson         switch (env->v7m.mpu_ctrl[is_secure] &
2338db1a3a0SRichard Henderson                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
2348db1a3a0SRichard Henderson         case R_V7M_MPU_CTRL_ENABLE_MASK:
2358db1a3a0SRichard Henderson             /* Enabled, but not for HardFault and NMI */
2368db1a3a0SRichard Henderson             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
2378db1a3a0SRichard Henderson         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
2388db1a3a0SRichard Henderson             /* Enabled for all cases */
2398db1a3a0SRichard Henderson             return false;
2408db1a3a0SRichard Henderson         case 0:
2418db1a3a0SRichard Henderson         default:
2428db1a3a0SRichard Henderson             /*
2438db1a3a0SRichard Henderson              * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
2448db1a3a0SRichard Henderson              * we warned about that in armv7m_nvic.c when the guest set it.
2458db1a3a0SRichard Henderson              */
2468db1a3a0SRichard Henderson             return true;
2478db1a3a0SRichard Henderson         }
2488db1a3a0SRichard Henderson     }
2498db1a3a0SRichard Henderson 
2508db1a3a0SRichard Henderson 
2513b2af993SRichard Henderson     switch (mmu_idx) {
2523b2af993SRichard Henderson     case ARMMMUIdx_Stage2:
2533b2af993SRichard Henderson     case ARMMMUIdx_Stage2_S:
2548db1a3a0SRichard Henderson         /* HCR.DC means HCR.VM behaves as 1 */
2552d12bb96SPeter Maydell         hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
2568db1a3a0SRichard Henderson         return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
2578db1a3a0SRichard Henderson 
2583b2af993SRichard Henderson     case ARMMMUIdx_E10_0:
2593b2af993SRichard Henderson     case ARMMMUIdx_E10_1:
2603b2af993SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
261fdf12933SRichard Henderson         /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
2622d12bb96SPeter Maydell         hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
263fdf12933SRichard Henderson         if (hcr_el2 & HCR_TGE) {
2648db1a3a0SRichard Henderson             return true;
2658db1a3a0SRichard Henderson         }
2663b2af993SRichard Henderson         break;
2678db1a3a0SRichard Henderson 
2683b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E0:
2693b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E1:
2703b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
2718db1a3a0SRichard Henderson         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
2722d12bb96SPeter Maydell         hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
2733b2af993SRichard Henderson         if (hcr_el2 & HCR_DC) {
2748db1a3a0SRichard Henderson             return true;
2758db1a3a0SRichard Henderson         }
2763b2af993SRichard Henderson         break;
2773b2af993SRichard Henderson 
2783b2af993SRichard Henderson     case ARMMMUIdx_E20_0:
2793b2af993SRichard Henderson     case ARMMMUIdx_E20_2:
2803b2af993SRichard Henderson     case ARMMMUIdx_E20_2_PAN:
2813b2af993SRichard Henderson     case ARMMMUIdx_E2:
2823b2af993SRichard Henderson     case ARMMMUIdx_E3:
2833b2af993SRichard Henderson         break;
2843b2af993SRichard Henderson 
285a1ce3084SRichard Henderson     case ARMMMUIdx_Phys_S:
286bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_NS:
287bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Root:
288bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Realm:
289a1ce3084SRichard Henderson         /* No translation for physical address spaces. */
290a1ce3084SRichard Henderson         return true;
291a1ce3084SRichard Henderson 
2923b2af993SRichard Henderson     default:
2933b2af993SRichard Henderson         g_assert_not_reached();
2943b2af993SRichard Henderson     }
2958db1a3a0SRichard Henderson 
2968db1a3a0SRichard Henderson     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
2978db1a3a0SRichard Henderson }
2988db1a3a0SRichard Henderson 
29946f38c97SRichard Henderson static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
30046f38c97SRichard Henderson                                      ARMSecuritySpace pspace,
30146f38c97SRichard Henderson                                      ARMMMUFaultInfo *fi)
30246f38c97SRichard Henderson {
30346f38c97SRichard Henderson     MemTxAttrs attrs = {
30446f38c97SRichard Henderson         .secure = true,
30546f38c97SRichard Henderson         .space = ARMSS_Root,
30646f38c97SRichard Henderson     };
30746f38c97SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
30846f38c97SRichard Henderson     uint64_t gpccr = env->cp15.gpccr_el3;
30946f38c97SRichard Henderson     unsigned pps, pgs, l0gptsz, level = 0;
31046f38c97SRichard Henderson     uint64_t tableaddr, pps_mask, align, entry, index;
31146f38c97SRichard Henderson     AddressSpace *as;
31246f38c97SRichard Henderson     MemTxResult result;
31346f38c97SRichard Henderson     int gpi;
31446f38c97SRichard Henderson 
31546f38c97SRichard Henderson     if (!FIELD_EX64(gpccr, GPCCR, GPC)) {
31646f38c97SRichard Henderson         return true;
31746f38c97SRichard Henderson     }
31846f38c97SRichard Henderson 
31946f38c97SRichard Henderson     /*
32046f38c97SRichard Henderson      * GPC Priority 1 (R_GMGRR):
32146f38c97SRichard Henderson      * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
32246f38c97SRichard Henderson      * the access fails as GPT walk fault at level 0.
32346f38c97SRichard Henderson      */
32446f38c97SRichard Henderson 
32546f38c97SRichard Henderson     /*
32646f38c97SRichard Henderson      * Configuration of PPS to a value exceeding the implemented
32746f38c97SRichard Henderson      * physical address size is invalid.
32846f38c97SRichard Henderson      */
32946f38c97SRichard Henderson     pps = FIELD_EX64(gpccr, GPCCR, PPS);
33046f38c97SRichard Henderson     if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
33146f38c97SRichard Henderson         goto fault_walk;
33246f38c97SRichard Henderson     }
33346f38c97SRichard Henderson     pps = pamax_map[pps];
33446f38c97SRichard Henderson     pps_mask = MAKE_64BIT_MASK(0, pps);
33546f38c97SRichard Henderson 
33646f38c97SRichard Henderson     switch (FIELD_EX64(gpccr, GPCCR, SH)) {
33746f38c97SRichard Henderson     case 0b10: /* outer shareable */
33846f38c97SRichard Henderson         break;
33946f38c97SRichard Henderson     case 0b00: /* non-shareable */
34046f38c97SRichard Henderson     case 0b11: /* inner shareable */
34146f38c97SRichard Henderson         /* Inner and Outer non-cacheable requires Outer shareable. */
34246f38c97SRichard Henderson         if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 &&
34346f38c97SRichard Henderson             FIELD_EX64(gpccr, GPCCR, IRGN) == 0) {
34446f38c97SRichard Henderson             goto fault_walk;
34546f38c97SRichard Henderson         }
34646f38c97SRichard Henderson         break;
34746f38c97SRichard Henderson     default:   /* reserved */
34846f38c97SRichard Henderson         goto fault_walk;
34946f38c97SRichard Henderson     }
35046f38c97SRichard Henderson 
35146f38c97SRichard Henderson     switch (FIELD_EX64(gpccr, GPCCR, PGS)) {
35246f38c97SRichard Henderson     case 0b00: /* 4KB */
35346f38c97SRichard Henderson         pgs = 12;
35446f38c97SRichard Henderson         break;
35546f38c97SRichard Henderson     case 0b01: /* 64KB */
35646f38c97SRichard Henderson         pgs = 16;
35746f38c97SRichard Henderson         break;
35846f38c97SRichard Henderson     case 0b10: /* 16KB */
35946f38c97SRichard Henderson         pgs = 14;
36046f38c97SRichard Henderson         break;
36146f38c97SRichard Henderson     default: /* reserved */
36246f38c97SRichard Henderson         goto fault_walk;
36346f38c97SRichard Henderson     }
36446f38c97SRichard Henderson 
36546f38c97SRichard Henderson     /* Note this field is read-only and fixed at reset. */
36646f38c97SRichard Henderson     l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
36746f38c97SRichard Henderson 
36846f38c97SRichard Henderson     /*
36946f38c97SRichard Henderson      * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
37046f38c97SRichard Henderson      * R_CPDSB: A NonSecure physical address input exceeding PPS
37146f38c97SRichard Henderson      * does not experience any fault.
37246f38c97SRichard Henderson      */
37346f38c97SRichard Henderson     if (paddress & ~pps_mask) {
37446f38c97SRichard Henderson         if (pspace == ARMSS_NonSecure) {
37546f38c97SRichard Henderson             return true;
37646f38c97SRichard Henderson         }
37746f38c97SRichard Henderson         goto fault_size;
37846f38c97SRichard Henderson     }
37946f38c97SRichard Henderson 
38046f38c97SRichard Henderson     /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
38146f38c97SRichard Henderson     tableaddr = env->cp15.gptbr_el3 << 12;
38246f38c97SRichard Henderson     if (tableaddr & ~pps_mask) {
38346f38c97SRichard Henderson         goto fault_size;
38446f38c97SRichard Henderson     }
38546f38c97SRichard Henderson 
38646f38c97SRichard Henderson     /*
38746f38c97SRichard Henderson      * BADDR is aligned per a function of PPS and L0GPTSZ.
38846f38c97SRichard Henderson      * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
38946f38c97SRichard Henderson      * unlike the RES0 bits of the GPT entries (R_XNKFZ).
39046f38c97SRichard Henderson      */
39146f38c97SRichard Henderson     align = MAX(pps - l0gptsz + 3, 12);
39246f38c97SRichard Henderson     align = MAKE_64BIT_MASK(0, align);
39346f38c97SRichard Henderson     tableaddr &= ~align;
39446f38c97SRichard Henderson 
39546f38c97SRichard Henderson     as = arm_addressspace(env_cpu(env), attrs);
39646f38c97SRichard Henderson 
39746f38c97SRichard Henderson     /* Level 0 lookup. */
39846f38c97SRichard Henderson     index = extract64(paddress, l0gptsz, pps - l0gptsz);
39946f38c97SRichard Henderson     tableaddr += index * 8;
40046f38c97SRichard Henderson     entry = address_space_ldq_le(as, tableaddr, attrs, &result);
40146f38c97SRichard Henderson     if (result != MEMTX_OK) {
40246f38c97SRichard Henderson         goto fault_eabt;
40346f38c97SRichard Henderson     }
40446f38c97SRichard Henderson 
40546f38c97SRichard Henderson     switch (extract32(entry, 0, 4)) {
40646f38c97SRichard Henderson     case 1: /* block descriptor */
40746f38c97SRichard Henderson         if (entry >> 8) {
40846f38c97SRichard Henderson             goto fault_walk; /* RES0 bits not 0 */
40946f38c97SRichard Henderson         }
41046f38c97SRichard Henderson         gpi = extract32(entry, 4, 4);
41146f38c97SRichard Henderson         goto found;
41246f38c97SRichard Henderson     case 3: /* table descriptor */
41346f38c97SRichard Henderson         tableaddr = entry & ~0xf;
41446f38c97SRichard Henderson         align = MAX(l0gptsz - pgs - 1, 12);
41546f38c97SRichard Henderson         align = MAKE_64BIT_MASK(0, align);
41646f38c97SRichard Henderson         if (tableaddr & (~pps_mask | align)) {
41746f38c97SRichard Henderson             goto fault_walk; /* RES0 bits not 0 */
41846f38c97SRichard Henderson         }
41946f38c97SRichard Henderson         break;
42046f38c97SRichard Henderson     default: /* invalid */
42146f38c97SRichard Henderson         goto fault_walk;
42246f38c97SRichard Henderson     }
42346f38c97SRichard Henderson 
42446f38c97SRichard Henderson     /* Level 1 lookup */
42546f38c97SRichard Henderson     level = 1;
42646f38c97SRichard Henderson     index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4);
42746f38c97SRichard Henderson     tableaddr += index * 8;
42846f38c97SRichard Henderson     entry = address_space_ldq_le(as, tableaddr, attrs, &result);
42946f38c97SRichard Henderson     if (result != MEMTX_OK) {
43046f38c97SRichard Henderson         goto fault_eabt;
43146f38c97SRichard Henderson     }
43246f38c97SRichard Henderson 
43346f38c97SRichard Henderson     switch (extract32(entry, 0, 4)) {
43446f38c97SRichard Henderson     case 1: /* contiguous descriptor */
43546f38c97SRichard Henderson         if (entry >> 10) {
43646f38c97SRichard Henderson             goto fault_walk; /* RES0 bits not 0 */
43746f38c97SRichard Henderson         }
43846f38c97SRichard Henderson         /*
43946f38c97SRichard Henderson          * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
44046f38c97SRichard Henderson          * and because we cannot invalidate by pa, and thus will always
44146f38c97SRichard Henderson          * flush entire tlbs, we don't actually care about the range here
44246f38c97SRichard Henderson          * and can simply extract the GPI as the result.
44346f38c97SRichard Henderson          */
44446f38c97SRichard Henderson         if (extract32(entry, 8, 2) == 0) {
44546f38c97SRichard Henderson             goto fault_walk; /* reserved contig */
44646f38c97SRichard Henderson         }
44746f38c97SRichard Henderson         gpi = extract32(entry, 4, 4);
44846f38c97SRichard Henderson         break;
44946f38c97SRichard Henderson     default:
45046f38c97SRichard Henderson         index = extract64(paddress, pgs, 4);
45146f38c97SRichard Henderson         gpi = extract64(entry, index * 4, 4);
45246f38c97SRichard Henderson         break;
45346f38c97SRichard Henderson     }
45446f38c97SRichard Henderson 
45546f38c97SRichard Henderson  found:
45646f38c97SRichard Henderson     switch (gpi) {
45746f38c97SRichard Henderson     case 0b0000: /* no access */
45846f38c97SRichard Henderson         break;
45946f38c97SRichard Henderson     case 0b1111: /* all access */
46046f38c97SRichard Henderson         return true;
46146f38c97SRichard Henderson     case 0b1000:
46246f38c97SRichard Henderson     case 0b1001:
46346f38c97SRichard Henderson     case 0b1010:
46446f38c97SRichard Henderson     case 0b1011:
46546f38c97SRichard Henderson         if (pspace == (gpi & 3)) {
46646f38c97SRichard Henderson             return true;
46746f38c97SRichard Henderson         }
46846f38c97SRichard Henderson         break;
46946f38c97SRichard Henderson     default:
47046f38c97SRichard Henderson         goto fault_walk; /* reserved */
47146f38c97SRichard Henderson     }
47246f38c97SRichard Henderson 
47346f38c97SRichard Henderson     fi->gpcf = GPCF_Fail;
47446f38c97SRichard Henderson     goto fault_common;
47546f38c97SRichard Henderson  fault_eabt:
47646f38c97SRichard Henderson     fi->gpcf = GPCF_EABT;
47746f38c97SRichard Henderson     goto fault_common;
47846f38c97SRichard Henderson  fault_size:
47946f38c97SRichard Henderson     fi->gpcf = GPCF_AddressSize;
48046f38c97SRichard Henderson     goto fault_common;
48146f38c97SRichard Henderson  fault_walk:
48246f38c97SRichard Henderson     fi->gpcf = GPCF_Walk;
48346f38c97SRichard Henderson  fault_common:
48446f38c97SRichard Henderson     fi->level = level;
48546f38c97SRichard Henderson     fi->paddr = paddress;
48646f38c97SRichard Henderson     fi->paddr_space = pspace;
48746f38c97SRichard Henderson     return false;
48846f38c97SRichard Henderson }
48946f38c97SRichard Henderson 
490728b923fSRichard Henderson static bool S1_attrs_are_device(uint8_t attrs)
491728b923fSRichard Henderson {
492728b923fSRichard Henderson     /*
493728b923fSRichard Henderson      * This slightly under-decodes the MAIR_ELx field:
494728b923fSRichard Henderson      * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE;
495728b923fSRichard Henderson      * 0b0000dd1x is UNPREDICTABLE.
496728b923fSRichard Henderson      */
497728b923fSRichard Henderson     return (attrs & 0xf0) == 0;
498728b923fSRichard Henderson }
499728b923fSRichard Henderson 
500f3639a64SRichard Henderson static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
50111552bb0SRichard Henderson {
50211552bb0SRichard Henderson     /*
50311552bb0SRichard Henderson      * For an S1 page table walk, the stage 1 attributes are always
50411552bb0SRichard Henderson      * some form of "this is Normal memory". The combined S1+S2
50511552bb0SRichard Henderson      * attributes are therefore only Device if stage 2 specifies Device.
50611552bb0SRichard Henderson      * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
50711552bb0SRichard Henderson      * ie when cacheattrs.attrs bits [3:2] are 0b00.
50811552bb0SRichard Henderson      * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
50911552bb0SRichard Henderson      * when cacheattrs.attrs bit [2] is 0.
51011552bb0SRichard Henderson      */
511ac76c2e5SRichard Henderson     if (hcr & HCR_FWB) {
512f3639a64SRichard Henderson         return (attrs & 0x4) == 0;
51311552bb0SRichard Henderson     } else {
514f3639a64SRichard Henderson         return (attrs & 0xc) == 0;
51511552bb0SRichard Henderson     }
51611552bb0SRichard Henderson }
51711552bb0SRichard Henderson 
5183f74da44SPeter Maydell static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
5193f74da44SPeter Maydell                                           ARMMMUIdx s2_mmu_idx)
5203f74da44SPeter Maydell {
5213f74da44SPeter Maydell     /*
5223f74da44SPeter Maydell      * Return the security space to use for stage 2 when doing
5233f74da44SPeter Maydell      * the S1 page table descriptor load.
5243f74da44SPeter Maydell      */
5253f74da44SPeter Maydell     if (regime_is_stage2(s2_mmu_idx)) {
5263f74da44SPeter Maydell         /*
5273f74da44SPeter Maydell          * The security space for ptw reads is almost always the same
5283f74da44SPeter Maydell          * as that of the security space of the stage 1 translation.
5293f74da44SPeter Maydell          * The only exception is when stage 1 is Secure; in that case
5303f74da44SPeter Maydell          * the ptw read might be to the Secure or the NonSecure space
5313f74da44SPeter Maydell          * (but never Realm or Root), and the s2_mmu_idx tells us which.
5323f74da44SPeter Maydell          * Root translations are always single-stage.
5333f74da44SPeter Maydell          */
5343f74da44SPeter Maydell         if (s1_space == ARMSS_Secure) {
5353f74da44SPeter Maydell             return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
5363f74da44SPeter Maydell         } else {
5373f74da44SPeter Maydell             assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
5383f74da44SPeter Maydell             assert(s1_space != ARMSS_Root);
5393f74da44SPeter Maydell             return s1_space;
5403f74da44SPeter Maydell         }
5413f74da44SPeter Maydell     } else {
5423f74da44SPeter Maydell         /* ptw loads are from phys: the mmu idx itself says which space */
5433f74da44SPeter Maydell         return arm_phys_to_space(s2_mmu_idx);
5443f74da44SPeter Maydell     }
5453f74da44SPeter Maydell }
5463f74da44SPeter Maydell 
5474f51edd3SPeter Maydell static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx)
5484f51edd3SPeter Maydell {
5494f51edd3SPeter Maydell     /*
5504f51edd3SPeter Maydell      * For stage 2 faults in Secure EL22, S1NS indicates
5514f51edd3SPeter Maydell      * whether the faulting IPA is in the Secure or NonSecure
5524f51edd3SPeter Maydell      * IPA space. For all other kinds of fault, it is false.
5534f51edd3SPeter Maydell      */
5544f51edd3SPeter Maydell     return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx)
5554f51edd3SPeter Maydell         && s2_mmu_idx == ARMMMUIdx_Stage2_S;
5564f51edd3SPeter Maydell }
5574f51edd3SPeter Maydell 
55811552bb0SRichard Henderson /* Translate a S1 pagetable walk through S2 if needed.  */
5596d2654ffSRichard Henderson static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
5606d2654ffSRichard Henderson                              hwaddr addr, ARMMMUFaultInfo *fi)
56111552bb0SRichard Henderson {
562f3639a64SRichard Henderson     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
56348da29e4SRichard Henderson     ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
564f3639a64SRichard Henderson     uint8_t pte_attrs;
565bf25b7b0SRichard Henderson 
56671943a1eSRichard Henderson     ptw->out_virt = addr;
56771943a1eSRichard Henderson 
568f3639a64SRichard Henderson     if (unlikely(ptw->in_debug)) {
569f3639a64SRichard Henderson         /*
570f3639a64SRichard Henderson          * From gdbstub, do not use softmmu so that we don't modify the
571f3639a64SRichard Henderson          * state of the cpu at all, including softmmu tlb contents.
572f3639a64SRichard Henderson          */
5733f74da44SPeter Maydell         ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
5746d2654ffSRichard Henderson         S1Translate s2ptw = {
5756d2654ffSRichard Henderson             .in_mmu_idx = s2_mmu_idx,
576fcc0b041SPeter Maydell             .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
5773f74da44SPeter Maydell             .in_space = s2_space,
578f3639a64SRichard Henderson             .in_debug = true,
5796d2654ffSRichard Henderson         };
580f3639a64SRichard Henderson         GetPhysAddrResult s2 = { };
58148da29e4SRichard Henderson 
58246f38c97SRichard Henderson         if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) {
583f3639a64SRichard Henderson             goto fail;
584f3639a64SRichard Henderson         }
58546f38c97SRichard Henderson 
586f3639a64SRichard Henderson         ptw->out_phys = s2.f.phys_addr;
587f3639a64SRichard Henderson         pte_attrs = s2.cacheattrs.attrs;
588f3639a64SRichard Henderson         ptw->out_host = NULL;
58971943a1eSRichard Henderson         ptw->out_rw = false;
590fe4a5472SRichard Henderson         ptw->out_space = s2.f.attrs.space;
591f3639a64SRichard Henderson     } else {
5920d3de77aSFabiano Rosas #ifdef CONFIG_TCG
593f3639a64SRichard Henderson         CPUTLBEntryFull *full;
594f3639a64SRichard Henderson         int flags;
59511552bb0SRichard Henderson 
596f3639a64SRichard Henderson         env->tlb_fi = fi;
5976d03226bSAlex Bennée         flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD,
598f3639a64SRichard Henderson                                       arm_to_core_mmu_idx(s2_mmu_idx),
5996d03226bSAlex Bennée                                       &ptw->out_host, &full);
600f3639a64SRichard Henderson         env->tlb_fi = NULL;
601f3639a64SRichard Henderson 
602f3639a64SRichard Henderson         if (unlikely(flags & TLB_INVALID_MASK)) {
603f3639a64SRichard Henderson             goto fail;
604f3639a64SRichard Henderson         }
6059d2617acSRichard Henderson         ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
60671943a1eSRichard Henderson         ptw->out_rw = full->prot & PAGE_WRITE;
607a81fef4bSAnton Johansson         pte_attrs = full->extra.arm.pte_attrs;
60890c66293SRichard Henderson         ptw->out_space = full->attrs.space;
6090d3de77aSFabiano Rosas #else
6100d3de77aSFabiano Rosas         g_assert_not_reached();
6110d3de77aSFabiano Rosas #endif
61211552bb0SRichard Henderson     }
613ac76c2e5SRichard Henderson 
61448da29e4SRichard Henderson     if (regime_is_stage2(s2_mmu_idx)) {
6152d12bb96SPeter Maydell         uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
616f3639a64SRichard Henderson 
617f3639a64SRichard Henderson         if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
61811552bb0SRichard Henderson             /*
61911552bb0SRichard Henderson              * PTW set and S1 walk touched S2 Device memory:
62011552bb0SRichard Henderson              * generate Permission fault.
62111552bb0SRichard Henderson              */
62211552bb0SRichard Henderson             fi->type = ARMFault_Permission;
62311552bb0SRichard Henderson             fi->s2addr = addr;
62411552bb0SRichard Henderson             fi->stage2 = true;
62511552bb0SRichard Henderson             fi->s1ptw = true;
6264f51edd3SPeter Maydell             fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
6276d2654ffSRichard Henderson             return false;
62811552bb0SRichard Henderson         }
629f3639a64SRichard Henderson     }
63011552bb0SRichard Henderson 
631f3639a64SRichard Henderson     ptw->out_be = regime_translation_big_endian(env, mmu_idx);
6326d2654ffSRichard Henderson     return true;
633f3639a64SRichard Henderson 
634f3639a64SRichard Henderson  fail:
635f3639a64SRichard Henderson     assert(fi->type != ARMFault_None);
63646f38c97SRichard Henderson     if (fi->type == ARMFault_GPCFOnOutput) {
63746f38c97SRichard Henderson         fi->type = ARMFault_GPCFOnWalk;
63846f38c97SRichard Henderson     }
639f3639a64SRichard Henderson     fi->s2addr = addr;
640f6415660SPeter Maydell     fi->stage2 = regime_is_stage2(s2_mmu_idx);
641f6415660SPeter Maydell     fi->s1ptw = fi->stage2;
6424f51edd3SPeter Maydell     fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
643f3639a64SRichard Henderson     return false;
64411552bb0SRichard Henderson }
64511552bb0SRichard Henderson 
64611552bb0SRichard Henderson /* All loads done in the course of a page table walk go through here. */
64793e5b3a6SRichard Henderson static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
6486d2654ffSRichard Henderson                             ARMMMUFaultInfo *fi)
64911552bb0SRichard Henderson {
6505e79887bSRichard Henderson     CPUState *cs = env_cpu(env);
65171943a1eSRichard Henderson     void *host = ptw->out_host;
65211552bb0SRichard Henderson     uint32_t data;
65311552bb0SRichard Henderson 
65471943a1eSRichard Henderson     if (likely(host)) {
655f3639a64SRichard Henderson         /* Page tables are in RAM, and we have the host address. */
65671943a1eSRichard Henderson         data = qatomic_read((uint32_t *)host);
6574e7a2c98SRichard Henderson         if (ptw->out_be) {
65871943a1eSRichard Henderson             data = be32_to_cpu(data);
65911552bb0SRichard Henderson         } else {
66071943a1eSRichard Henderson             data = le32_to_cpu(data);
66111552bb0SRichard Henderson         }
662f3639a64SRichard Henderson     } else {
663f3639a64SRichard Henderson         /* Page tables are in MMIO. */
66490c66293SRichard Henderson         MemTxAttrs attrs = {
66590c66293SRichard Henderson             .space = ptw->out_space,
666b02f5e06SPeter Maydell             .secure = arm_space_is_secure(ptw->out_space),
66790c66293SRichard Henderson         };
668f3639a64SRichard Henderson         AddressSpace *as = arm_addressspace(cs, attrs);
669f3639a64SRichard Henderson         MemTxResult result = MEMTX_OK;
670f3639a64SRichard Henderson 
671f3639a64SRichard Henderson         if (ptw->out_be) {
672f3639a64SRichard Henderson             data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
673f3639a64SRichard Henderson         } else {
674f3639a64SRichard Henderson             data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
67511552bb0SRichard Henderson         }
676f3639a64SRichard Henderson         if (unlikely(result != MEMTX_OK)) {
67711552bb0SRichard Henderson             fi->type = ARMFault_SyncExternalOnWalk;
67811552bb0SRichard Henderson             fi->ea = arm_extabort_type(result);
67911552bb0SRichard Henderson             return 0;
68011552bb0SRichard Henderson         }
681f3639a64SRichard Henderson     }
682f3639a64SRichard Henderson     return data;
683f3639a64SRichard Henderson }
68411552bb0SRichard Henderson 
68593e5b3a6SRichard Henderson static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
6866d2654ffSRichard Henderson                             ARMMMUFaultInfo *fi)
68711552bb0SRichard Henderson {
6885e79887bSRichard Henderson     CPUState *cs = env_cpu(env);
68971943a1eSRichard Henderson     void *host = ptw->out_host;
69011552bb0SRichard Henderson     uint64_t data;
69111552bb0SRichard Henderson 
69271943a1eSRichard Henderson     if (likely(host)) {
693f3639a64SRichard Henderson         /* Page tables are in RAM, and we have the host address. */
69471943a1eSRichard Henderson #ifdef CONFIG_ATOMIC64
69571943a1eSRichard Henderson         data = qatomic_read__nocheck((uint64_t *)host);
6964e7a2c98SRichard Henderson         if (ptw->out_be) {
69771943a1eSRichard Henderson             data = be64_to_cpu(data);
69811552bb0SRichard Henderson         } else {
69971943a1eSRichard Henderson             data = le64_to_cpu(data);
70011552bb0SRichard Henderson         }
70171943a1eSRichard Henderson #else
70271943a1eSRichard Henderson         if (ptw->out_be) {
70371943a1eSRichard Henderson             data = ldq_be_p(host);
70471943a1eSRichard Henderson         } else {
70571943a1eSRichard Henderson             data = ldq_le_p(host);
70671943a1eSRichard Henderson         }
70771943a1eSRichard Henderson #endif
708f3639a64SRichard Henderson     } else {
709f3639a64SRichard Henderson         /* Page tables are in MMIO. */
71090c66293SRichard Henderson         MemTxAttrs attrs = {
71190c66293SRichard Henderson             .space = ptw->out_space,
712b02f5e06SPeter Maydell             .secure = arm_space_is_secure(ptw->out_space),
71390c66293SRichard Henderson         };
714f3639a64SRichard Henderson         AddressSpace *as = arm_addressspace(cs, attrs);
715f3639a64SRichard Henderson         MemTxResult result = MEMTX_OK;
716f3639a64SRichard Henderson 
717f3639a64SRichard Henderson         if (ptw->out_be) {
718f3639a64SRichard Henderson             data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
719f3639a64SRichard Henderson         } else {
720f3639a64SRichard Henderson             data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
72111552bb0SRichard Henderson         }
722f3639a64SRichard Henderson         if (unlikely(result != MEMTX_OK)) {
72311552bb0SRichard Henderson             fi->type = ARMFault_SyncExternalOnWalk;
72411552bb0SRichard Henderson             fi->ea = arm_extabort_type(result);
72511552bb0SRichard Henderson             return 0;
72611552bb0SRichard Henderson         }
727f3639a64SRichard Henderson     }
728f3639a64SRichard Henderson     return data;
729f3639a64SRichard Henderson }
73011552bb0SRichard Henderson 
73171943a1eSRichard Henderson static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
73271943a1eSRichard Henderson                              uint64_t new_val, S1Translate *ptw,
73371943a1eSRichard Henderson                              ARMMMUFaultInfo *fi)
73471943a1eSRichard Henderson {
735465af4dbSAlex Bennée #if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
73671943a1eSRichard Henderson     uint64_t cur_val;
73771943a1eSRichard Henderson     void *host = ptw->out_host;
73871943a1eSRichard Henderson 
73971943a1eSRichard Henderson     if (unlikely(!host)) {
7407421ddc4SJonathan Cameron         /* Page table in MMIO Memory Region */
7417421ddc4SJonathan Cameron         CPUState *cs = env_cpu(env);
7427421ddc4SJonathan Cameron         MemTxAttrs attrs = {
7437421ddc4SJonathan Cameron             .space = ptw->out_space,
7447421ddc4SJonathan Cameron             .secure = arm_space_is_secure(ptw->out_space),
7457421ddc4SJonathan Cameron         };
7467421ddc4SJonathan Cameron         AddressSpace *as = arm_addressspace(cs, attrs);
7477421ddc4SJonathan Cameron         MemTxResult result = MEMTX_OK;
7487421ddc4SJonathan Cameron         bool need_lock = !bql_locked();
7497421ddc4SJonathan Cameron 
7507421ddc4SJonathan Cameron         if (need_lock) {
7517421ddc4SJonathan Cameron             bql_lock();
7527421ddc4SJonathan Cameron         }
7537421ddc4SJonathan Cameron         if (ptw->out_be) {
7547421ddc4SJonathan Cameron             cur_val = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
7557421ddc4SJonathan Cameron             if (unlikely(result != MEMTX_OK)) {
7567421ddc4SJonathan Cameron                 fi->type = ARMFault_SyncExternalOnWalk;
7577421ddc4SJonathan Cameron                 fi->ea = arm_extabort_type(result);
7587421ddc4SJonathan Cameron                 if (need_lock) {
7597421ddc4SJonathan Cameron                     bql_unlock();
7607421ddc4SJonathan Cameron                 }
7617421ddc4SJonathan Cameron                 return old_val;
7627421ddc4SJonathan Cameron             }
7637421ddc4SJonathan Cameron             if (cur_val == old_val) {
7647421ddc4SJonathan Cameron                 address_space_stq_be(as, ptw->out_phys, new_val, attrs, &result);
7657421ddc4SJonathan Cameron                 if (unlikely(result != MEMTX_OK)) {
7667421ddc4SJonathan Cameron                     fi->type = ARMFault_SyncExternalOnWalk;
7677421ddc4SJonathan Cameron                     fi->ea = arm_extabort_type(result);
7687421ddc4SJonathan Cameron                     if (need_lock) {
7697421ddc4SJonathan Cameron                         bql_unlock();
7707421ddc4SJonathan Cameron                     }
7717421ddc4SJonathan Cameron                     return old_val;
7727421ddc4SJonathan Cameron                 }
7737421ddc4SJonathan Cameron                 cur_val = new_val;
7747421ddc4SJonathan Cameron             }
7757421ddc4SJonathan Cameron         } else {
7767421ddc4SJonathan Cameron             cur_val = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
7777421ddc4SJonathan Cameron             if (unlikely(result != MEMTX_OK)) {
7787421ddc4SJonathan Cameron                 fi->type = ARMFault_SyncExternalOnWalk;
7797421ddc4SJonathan Cameron                 fi->ea = arm_extabort_type(result);
7807421ddc4SJonathan Cameron                 if (need_lock) {
7817421ddc4SJonathan Cameron                     bql_unlock();
7827421ddc4SJonathan Cameron                 }
7837421ddc4SJonathan Cameron                 return old_val;
7847421ddc4SJonathan Cameron             }
7857421ddc4SJonathan Cameron             if (cur_val == old_val) {
7867421ddc4SJonathan Cameron                 address_space_stq_le(as, ptw->out_phys, new_val, attrs, &result);
7877421ddc4SJonathan Cameron                 if (unlikely(result != MEMTX_OK)) {
7887421ddc4SJonathan Cameron                     fi->type = ARMFault_SyncExternalOnWalk;
7897421ddc4SJonathan Cameron                     fi->ea = arm_extabort_type(result);
7907421ddc4SJonathan Cameron                     if (need_lock) {
7917421ddc4SJonathan Cameron                         bql_unlock();
7927421ddc4SJonathan Cameron                     }
7937421ddc4SJonathan Cameron                     return old_val;
7947421ddc4SJonathan Cameron                 }
7957421ddc4SJonathan Cameron                 cur_val = new_val;
7967421ddc4SJonathan Cameron             }
7977421ddc4SJonathan Cameron         }
7987421ddc4SJonathan Cameron         if (need_lock) {
7997421ddc4SJonathan Cameron             bql_unlock();
8007421ddc4SJonathan Cameron         }
8017421ddc4SJonathan Cameron         return cur_val;
80271943a1eSRichard Henderson     }
80371943a1eSRichard Henderson 
80471943a1eSRichard Henderson     /*
80571943a1eSRichard Henderson      * Raising a stage2 Protection fault for an atomic update to a read-only
80671943a1eSRichard Henderson      * page is delayed until it is certain that there is a change to make.
80771943a1eSRichard Henderson      */
80871943a1eSRichard Henderson     if (unlikely(!ptw->out_rw)) {
80971943a1eSRichard Henderson         int flags;
81071943a1eSRichard Henderson 
81171943a1eSRichard Henderson         env->tlb_fi = fi;
8126d03226bSAlex Bennée         flags = probe_access_full_mmu(env, ptw->out_virt, 0,
8136d03226bSAlex Bennée                                       MMU_DATA_STORE,
81471943a1eSRichard Henderson                                       arm_to_core_mmu_idx(ptw->in_ptw_idx),
8156d03226bSAlex Bennée                                       NULL, NULL);
81671943a1eSRichard Henderson         env->tlb_fi = NULL;
81771943a1eSRichard Henderson 
81871943a1eSRichard Henderson         if (unlikely(flags & TLB_INVALID_MASK)) {
819f6415660SPeter Maydell             /*
820f6415660SPeter Maydell              * We know this must be a stage 2 fault because the granule
821f6415660SPeter Maydell              * protection table does not separately track read and write
822f6415660SPeter Maydell              * permission, so all GPC faults are caught in S1_ptw_translate():
823f6415660SPeter Maydell              * we only get here for "readable but not writeable".
824f6415660SPeter Maydell              */
82571943a1eSRichard Henderson             assert(fi->type != ARMFault_None);
82671943a1eSRichard Henderson             fi->s2addr = ptw->out_virt;
82771943a1eSRichard Henderson             fi->stage2 = true;
82871943a1eSRichard Henderson             fi->s1ptw = true;
8294f51edd3SPeter Maydell             fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx);
83071943a1eSRichard Henderson             return 0;
83171943a1eSRichard Henderson         }
83271943a1eSRichard Henderson 
83371943a1eSRichard Henderson         /* In case CAS mismatches and we loop, remember writability. */
83471943a1eSRichard Henderson         ptw->out_rw = true;
83571943a1eSRichard Henderson     }
83671943a1eSRichard Henderson 
83771943a1eSRichard Henderson #ifdef CONFIG_ATOMIC64
83871943a1eSRichard Henderson     if (ptw->out_be) {
83971943a1eSRichard Henderson         old_val = cpu_to_be64(old_val);
84071943a1eSRichard Henderson         new_val = cpu_to_be64(new_val);
84171943a1eSRichard Henderson         cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
84271943a1eSRichard Henderson         cur_val = be64_to_cpu(cur_val);
84371943a1eSRichard Henderson     } else {
84471943a1eSRichard Henderson         old_val = cpu_to_le64(old_val);
84571943a1eSRichard Henderson         new_val = cpu_to_le64(new_val);
84671943a1eSRichard Henderson         cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
84771943a1eSRichard Henderson         cur_val = le64_to_cpu(cur_val);
84871943a1eSRichard Henderson     }
84971943a1eSRichard Henderson #else
85071943a1eSRichard Henderson     /*
85171943a1eSRichard Henderson      * We can't support the full 64-bit atomic cmpxchg on the host.
85271943a1eSRichard Henderson      * Because this is only used for FEAT_HAFDBS, which is only for AA64,
85371943a1eSRichard Henderson      * we know that TCG_OVERSIZED_GUEST is set, which means that we are
85471943a1eSRichard Henderson      * running in round-robin mode and could only race with dma i/o.
85571943a1eSRichard Henderson      */
856d3ae5f5dSRichard Henderson #if !TCG_OVERSIZED_GUEST
85771943a1eSRichard Henderson # error "Unexpected configuration"
85871943a1eSRichard Henderson #endif
859195801d7SStefan Hajnoczi     bool locked = bql_locked();
86071943a1eSRichard Henderson     if (!locked) {
861195801d7SStefan Hajnoczi         bql_lock();
86271943a1eSRichard Henderson     }
86371943a1eSRichard Henderson     if (ptw->out_be) {
86471943a1eSRichard Henderson         cur_val = ldq_be_p(host);
86571943a1eSRichard Henderson         if (cur_val == old_val) {
86671943a1eSRichard Henderson             stq_be_p(host, new_val);
86771943a1eSRichard Henderson         }
86871943a1eSRichard Henderson     } else {
86971943a1eSRichard Henderson         cur_val = ldq_le_p(host);
87071943a1eSRichard Henderson         if (cur_val == old_val) {
87171943a1eSRichard Henderson             stq_le_p(host, new_val);
87271943a1eSRichard Henderson         }
87371943a1eSRichard Henderson     }
87471943a1eSRichard Henderson     if (!locked) {
875195801d7SStefan Hajnoczi         bql_unlock();
87671943a1eSRichard Henderson     }
87771943a1eSRichard Henderson #endif
87871943a1eSRichard Henderson 
87971943a1eSRichard Henderson     return cur_val;
880d3ae5f5dSRichard Henderson #else
881465af4dbSAlex Bennée     /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
882d3ae5f5dSRichard Henderson     g_assert_not_reached();
883d3ae5f5dSRichard Henderson #endif
88471943a1eSRichard Henderson }
88571943a1eSRichard Henderson 
8864c74ab15SRichard Henderson static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
8874c74ab15SRichard Henderson                                      uint32_t *table, uint32_t address)
8884c74ab15SRichard Henderson {
8894c74ab15SRichard Henderson     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
890c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
8919e70e26cSPeter Maydell     int maskshift = extract32(tcr, 0, 3);
8929e70e26cSPeter Maydell     uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
8939e70e26cSPeter Maydell     uint32_t base_mask;
8944c74ab15SRichard Henderson 
8959e70e26cSPeter Maydell     if (address & mask) {
8969e70e26cSPeter Maydell         if (tcr & TTBCR_PD1) {
8974c74ab15SRichard Henderson             /* Translation table walk disabled for TTBR1 */
8984c74ab15SRichard Henderson             return false;
8994c74ab15SRichard Henderson         }
9004c74ab15SRichard Henderson         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
9014c74ab15SRichard Henderson     } else {
9029e70e26cSPeter Maydell         if (tcr & TTBCR_PD0) {
9034c74ab15SRichard Henderson             /* Translation table walk disabled for TTBR0 */
9044c74ab15SRichard Henderson             return false;
9054c74ab15SRichard Henderson         }
9069e70e26cSPeter Maydell         base_mask = ~((uint32_t)0x3fffu >> maskshift);
9079e70e26cSPeter Maydell         *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
9084c74ab15SRichard Henderson     }
9094c74ab15SRichard Henderson     *table |= (address >> 18) & 0x3ffc;
9104c74ab15SRichard Henderson     return true;
9114c74ab15SRichard Henderson }
9124c74ab15SRichard Henderson 
9134845d3beSRichard Henderson /*
9144845d3beSRichard Henderson  * Translate section/page access permissions to page R/W protection flags
9154845d3beSRichard Henderson  * @env:         CPUARMState
9164845d3beSRichard Henderson  * @mmu_idx:     MMU index indicating required translation regime
9174845d3beSRichard Henderson  * @ap:          The 3-bit access permissions (AP[2:0])
9184845d3beSRichard Henderson  * @domain_prot: The 2-bit domain access permissions
9196f2d9d74STimofey Kutergin  * @is_user: TRUE if accessing from PL0
9204845d3beSRichard Henderson  */
9216f2d9d74STimofey Kutergin static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx,
9226f2d9d74STimofey Kutergin                          int ap, int domain_prot, bool is_user)
9234845d3beSRichard Henderson {
9244845d3beSRichard Henderson     if (domain_prot == 3) {
9254845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
9264845d3beSRichard Henderson     }
9274845d3beSRichard Henderson 
9284845d3beSRichard Henderson     switch (ap) {
9294845d3beSRichard Henderson     case 0:
9304845d3beSRichard Henderson         if (arm_feature(env, ARM_FEATURE_V7)) {
9314845d3beSRichard Henderson             return 0;
9324845d3beSRichard Henderson         }
9334845d3beSRichard Henderson         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
9344845d3beSRichard Henderson         case SCTLR_S:
9354845d3beSRichard Henderson             return is_user ? 0 : PAGE_READ;
9364845d3beSRichard Henderson         case SCTLR_R:
9374845d3beSRichard Henderson             return PAGE_READ;
9384845d3beSRichard Henderson         default:
9394845d3beSRichard Henderson             return 0;
9404845d3beSRichard Henderson         }
9414845d3beSRichard Henderson     case 1:
9424845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
9434845d3beSRichard Henderson     case 2:
9444845d3beSRichard Henderson         if (is_user) {
9454845d3beSRichard Henderson             return PAGE_READ;
9464845d3beSRichard Henderson         } else {
9474845d3beSRichard Henderson             return PAGE_READ | PAGE_WRITE;
9484845d3beSRichard Henderson         }
9494845d3beSRichard Henderson     case 3:
9504845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
9514845d3beSRichard Henderson     case 4: /* Reserved.  */
9524845d3beSRichard Henderson         return 0;
9534845d3beSRichard Henderson     case 5:
9544845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ;
9554845d3beSRichard Henderson     case 6:
9564845d3beSRichard Henderson         return PAGE_READ;
9574845d3beSRichard Henderson     case 7:
9584845d3beSRichard Henderson         if (!arm_feature(env, ARM_FEATURE_V6K)) {
9594845d3beSRichard Henderson             return 0;
9604845d3beSRichard Henderson         }
9614845d3beSRichard Henderson         return PAGE_READ;
9624845d3beSRichard Henderson     default:
9634845d3beSRichard Henderson         g_assert_not_reached();
9644845d3beSRichard Henderson     }
9654845d3beSRichard Henderson }
9664845d3beSRichard Henderson 
9674845d3beSRichard Henderson /*
9686f2d9d74STimofey Kutergin  * Translate section/page access permissions to page R/W protection flags
9696f2d9d74STimofey Kutergin  * @env:         CPUARMState
9706f2d9d74STimofey Kutergin  * @mmu_idx:     MMU index indicating required translation regime
9716f2d9d74STimofey Kutergin  * @ap:          The 3-bit access permissions (AP[2:0])
9726f2d9d74STimofey Kutergin  * @domain_prot: The 2-bit domain access permissions
9736f2d9d74STimofey Kutergin  */
9746f2d9d74STimofey Kutergin static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
9756f2d9d74STimofey Kutergin                          int ap, int domain_prot)
9766f2d9d74STimofey Kutergin {
9776f2d9d74STimofey Kutergin    return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
9786f2d9d74STimofey Kutergin                                 regime_is_user(env, mmu_idx));
9796f2d9d74STimofey Kutergin }
9806f2d9d74STimofey Kutergin 
9816f2d9d74STimofey Kutergin /*
9824845d3beSRichard Henderson  * Translate section/page access permissions to page R/W protection flags.
9834845d3beSRichard Henderson  * @ap:      The 2-bit simple AP (AP[2:1])
9844845d3beSRichard Henderson  * @is_user: TRUE if accessing from PL0
9854845d3beSRichard Henderson  */
9864845d3beSRichard Henderson static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
9874845d3beSRichard Henderson {
9884845d3beSRichard Henderson     switch (ap) {
9894845d3beSRichard Henderson     case 0:
9904845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
9914845d3beSRichard Henderson     case 1:
9924845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
9934845d3beSRichard Henderson     case 2:
9944845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ;
9954845d3beSRichard Henderson     case 3:
9964845d3beSRichard Henderson         return PAGE_READ;
9974845d3beSRichard Henderson     default:
9984845d3beSRichard Henderson         g_assert_not_reached();
9994845d3beSRichard Henderson     }
10004845d3beSRichard Henderson }
10014845d3beSRichard Henderson 
10024845d3beSRichard Henderson static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
10034845d3beSRichard Henderson {
10044845d3beSRichard Henderson     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
10054845d3beSRichard Henderson }
10064845d3beSRichard Henderson 
10076d2654ffSRichard Henderson static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
10086d2654ffSRichard Henderson                              uint32_t address, MMUAccessType access_type,
10096d2654ffSRichard Henderson                              GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1010f2d2f5ceSRichard Henderson {
1011f2d2f5ceSRichard Henderson     int level = 1;
1012f2d2f5ceSRichard Henderson     uint32_t table;
1013f2d2f5ceSRichard Henderson     uint32_t desc;
1014f2d2f5ceSRichard Henderson     int type;
1015f2d2f5ceSRichard Henderson     int ap;
1016f2d2f5ceSRichard Henderson     int domain = 0;
1017f2d2f5ceSRichard Henderson     int domain_prot;
1018f2d2f5ceSRichard Henderson     hwaddr phys_addr;
1019f2d2f5ceSRichard Henderson     uint32_t dacr;
1020f2d2f5ceSRichard Henderson 
1021f2d2f5ceSRichard Henderson     /* Pagetable walk.  */
1022f2d2f5ceSRichard Henderson     /* Lookup l1 descriptor.  */
10236d2654ffSRichard Henderson     if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
1024f2d2f5ceSRichard Henderson         /* Section translation fault if page walk is disabled by PD0 or PD1 */
1025f2d2f5ceSRichard Henderson         fi->type = ARMFault_Translation;
1026f2d2f5ceSRichard Henderson         goto do_fault;
1027f2d2f5ceSRichard Henderson     }
102893e5b3a6SRichard Henderson     if (!S1_ptw_translate(env, ptw, table, fi)) {
102993e5b3a6SRichard Henderson         goto do_fault;
103093e5b3a6SRichard Henderson     }
103193e5b3a6SRichard Henderson     desc = arm_ldl_ptw(env, ptw, fi);
1032f2d2f5ceSRichard Henderson     if (fi->type != ARMFault_None) {
1033f2d2f5ceSRichard Henderson         goto do_fault;
1034f2d2f5ceSRichard Henderson     }
1035f2d2f5ceSRichard Henderson     type = (desc & 3);
1036f2d2f5ceSRichard Henderson     domain = (desc >> 5) & 0x0f;
10376d2654ffSRichard Henderson     if (regime_el(env, ptw->in_mmu_idx) == 1) {
1038f2d2f5ceSRichard Henderson         dacr = env->cp15.dacr_ns;
1039f2d2f5ceSRichard Henderson     } else {
1040f2d2f5ceSRichard Henderson         dacr = env->cp15.dacr_s;
1041f2d2f5ceSRichard Henderson     }
1042f2d2f5ceSRichard Henderson     domain_prot = (dacr >> (domain * 2)) & 3;
1043f2d2f5ceSRichard Henderson     if (type == 0) {
1044f2d2f5ceSRichard Henderson         /* Section translation fault.  */
1045f2d2f5ceSRichard Henderson         fi->type = ARMFault_Translation;
1046f2d2f5ceSRichard Henderson         goto do_fault;
1047f2d2f5ceSRichard Henderson     }
1048f2d2f5ceSRichard Henderson     if (type != 2) {
1049f2d2f5ceSRichard Henderson         level = 2;
1050f2d2f5ceSRichard Henderson     }
1051f2d2f5ceSRichard Henderson     if (domain_prot == 0 || domain_prot == 2) {
1052f2d2f5ceSRichard Henderson         fi->type = ARMFault_Domain;
1053f2d2f5ceSRichard Henderson         goto do_fault;
1054f2d2f5ceSRichard Henderson     }
1055f2d2f5ceSRichard Henderson     if (type == 2) {
1056f2d2f5ceSRichard Henderson         /* 1Mb section.  */
1057f2d2f5ceSRichard Henderson         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1058f2d2f5ceSRichard Henderson         ap = (desc >> 10) & 3;
10597fa7ea8fSRichard Henderson         result->f.lg_page_size = 20; /* 1MB */
1060f2d2f5ceSRichard Henderson     } else {
1061f2d2f5ceSRichard Henderson         /* Lookup l2 entry.  */
1062f2d2f5ceSRichard Henderson         if (type == 1) {
1063f2d2f5ceSRichard Henderson             /* Coarse pagetable.  */
1064f2d2f5ceSRichard Henderson             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1065f2d2f5ceSRichard Henderson         } else {
1066f2d2f5ceSRichard Henderson             /* Fine pagetable.  */
1067f2d2f5ceSRichard Henderson             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1068f2d2f5ceSRichard Henderson         }
106993e5b3a6SRichard Henderson         if (!S1_ptw_translate(env, ptw, table, fi)) {
107093e5b3a6SRichard Henderson             goto do_fault;
107193e5b3a6SRichard Henderson         }
107293e5b3a6SRichard Henderson         desc = arm_ldl_ptw(env, ptw, fi);
1073f2d2f5ceSRichard Henderson         if (fi->type != ARMFault_None) {
1074f2d2f5ceSRichard Henderson             goto do_fault;
1075f2d2f5ceSRichard Henderson         }
1076f2d2f5ceSRichard Henderson         switch (desc & 3) {
1077f2d2f5ceSRichard Henderson         case 0: /* Page translation fault.  */
1078f2d2f5ceSRichard Henderson             fi->type = ARMFault_Translation;
1079f2d2f5ceSRichard Henderson             goto do_fault;
1080f2d2f5ceSRichard Henderson         case 1: /* 64k page.  */
1081f2d2f5ceSRichard Henderson             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1082f2d2f5ceSRichard Henderson             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
10837fa7ea8fSRichard Henderson             result->f.lg_page_size = 16;
1084f2d2f5ceSRichard Henderson             break;
1085f2d2f5ceSRichard Henderson         case 2: /* 4k page.  */
1086f2d2f5ceSRichard Henderson             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1087f2d2f5ceSRichard Henderson             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
10887fa7ea8fSRichard Henderson             result->f.lg_page_size = 12;
1089f2d2f5ceSRichard Henderson             break;
1090f2d2f5ceSRichard Henderson         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
1091f2d2f5ceSRichard Henderson             if (type == 1) {
1092f2d2f5ceSRichard Henderson                 /* ARMv6/XScale extended small page format */
1093f2d2f5ceSRichard Henderson                 if (arm_feature(env, ARM_FEATURE_XSCALE)
1094f2d2f5ceSRichard Henderson                     || arm_feature(env, ARM_FEATURE_V6)) {
1095f2d2f5ceSRichard Henderson                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10967fa7ea8fSRichard Henderson                     result->f.lg_page_size = 12;
1097f2d2f5ceSRichard Henderson                 } else {
1098f2d2f5ceSRichard Henderson                     /*
1099f2d2f5ceSRichard Henderson                      * UNPREDICTABLE in ARMv5; we choose to take a
1100f2d2f5ceSRichard Henderson                      * page translation fault.
1101f2d2f5ceSRichard Henderson                      */
1102f2d2f5ceSRichard Henderson                     fi->type = ARMFault_Translation;
1103f2d2f5ceSRichard Henderson                     goto do_fault;
1104f2d2f5ceSRichard Henderson                 }
1105f2d2f5ceSRichard Henderson             } else {
1106f2d2f5ceSRichard Henderson                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
11077fa7ea8fSRichard Henderson                 result->f.lg_page_size = 10;
1108f2d2f5ceSRichard Henderson             }
1109f2d2f5ceSRichard Henderson             ap = (desc >> 4) & 3;
1110f2d2f5ceSRichard Henderson             break;
1111f2d2f5ceSRichard Henderson         default:
1112f2d2f5ceSRichard Henderson             /* Never happens, but compiler isn't smart enough to tell.  */
1113f2d2f5ceSRichard Henderson             g_assert_not_reached();
1114f2d2f5ceSRichard Henderson         }
1115f2d2f5ceSRichard Henderson     }
11166d2654ffSRichard Henderson     result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
11177fa7ea8fSRichard Henderson     result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
11187fa7ea8fSRichard Henderson     if (!(result->f.prot & (1 << access_type))) {
1119f2d2f5ceSRichard Henderson         /* Access permission fault.  */
1120f2d2f5ceSRichard Henderson         fi->type = ARMFault_Permission;
1121f2d2f5ceSRichard Henderson         goto do_fault;
1122f2d2f5ceSRichard Henderson     }
11237fa7ea8fSRichard Henderson     result->f.phys_addr = phys_addr;
1124f2d2f5ceSRichard Henderson     return false;
1125f2d2f5ceSRichard Henderson do_fault:
1126f2d2f5ceSRichard Henderson     fi->domain = domain;
1127f2d2f5ceSRichard Henderson     fi->level = level;
1128f2d2f5ceSRichard Henderson     return true;
1129f2d2f5ceSRichard Henderson }
1130f2d2f5ceSRichard Henderson 
11316d2654ffSRichard Henderson static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
11326d2654ffSRichard Henderson                              uint32_t address, MMUAccessType access_type,
11336d2654ffSRichard Henderson                              GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
113453c038efSRichard Henderson {
113553c038efSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
11366d2654ffSRichard Henderson     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
113753c038efSRichard Henderson     int level = 1;
113853c038efSRichard Henderson     uint32_t table;
113953c038efSRichard Henderson     uint32_t desc;
114053c038efSRichard Henderson     uint32_t xn;
114153c038efSRichard Henderson     uint32_t pxn = 0;
114253c038efSRichard Henderson     int type;
114353c038efSRichard Henderson     int ap;
114453c038efSRichard Henderson     int domain = 0;
114553c038efSRichard Henderson     int domain_prot;
114653c038efSRichard Henderson     hwaddr phys_addr;
114753c038efSRichard Henderson     uint32_t dacr;
114853c038efSRichard Henderson     bool ns;
11496f2d9d74STimofey Kutergin     int user_prot;
115053c038efSRichard Henderson 
115153c038efSRichard Henderson     /* Pagetable walk.  */
115253c038efSRichard Henderson     /* Lookup l1 descriptor.  */
115353c038efSRichard Henderson     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
115453c038efSRichard Henderson         /* Section translation fault if page walk is disabled by PD0 or PD1 */
115553c038efSRichard Henderson         fi->type = ARMFault_Translation;
115653c038efSRichard Henderson         goto do_fault;
115753c038efSRichard Henderson     }
115893e5b3a6SRichard Henderson     if (!S1_ptw_translate(env, ptw, table, fi)) {
115993e5b3a6SRichard Henderson         goto do_fault;
116093e5b3a6SRichard Henderson     }
116193e5b3a6SRichard Henderson     desc = arm_ldl_ptw(env, ptw, fi);
116253c038efSRichard Henderson     if (fi->type != ARMFault_None) {
116353c038efSRichard Henderson         goto do_fault;
116453c038efSRichard Henderson     }
116553c038efSRichard Henderson     type = (desc & 3);
116653c038efSRichard Henderson     if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
116753c038efSRichard Henderson         /* Section translation fault, or attempt to use the encoding
116853c038efSRichard Henderson          * which is Reserved on implementations without PXN.
116953c038efSRichard Henderson          */
117053c038efSRichard Henderson         fi->type = ARMFault_Translation;
117153c038efSRichard Henderson         goto do_fault;
117253c038efSRichard Henderson     }
117353c038efSRichard Henderson     if ((type == 1) || !(desc & (1 << 18))) {
117453c038efSRichard Henderson         /* Page or Section.  */
117553c038efSRichard Henderson         domain = (desc >> 5) & 0x0f;
117653c038efSRichard Henderson     }
117753c038efSRichard Henderson     if (regime_el(env, mmu_idx) == 1) {
117853c038efSRichard Henderson         dacr = env->cp15.dacr_ns;
117953c038efSRichard Henderson     } else {
118053c038efSRichard Henderson         dacr = env->cp15.dacr_s;
118153c038efSRichard Henderson     }
118253c038efSRichard Henderson     if (type == 1) {
118353c038efSRichard Henderson         level = 2;
118453c038efSRichard Henderson     }
118553c038efSRichard Henderson     domain_prot = (dacr >> (domain * 2)) & 3;
118653c038efSRichard Henderson     if (domain_prot == 0 || domain_prot == 2) {
118753c038efSRichard Henderson         /* Section or Page domain fault */
118853c038efSRichard Henderson         fi->type = ARMFault_Domain;
118953c038efSRichard Henderson         goto do_fault;
119053c038efSRichard Henderson     }
119153c038efSRichard Henderson     if (type != 1) {
119253c038efSRichard Henderson         if (desc & (1 << 18)) {
119353c038efSRichard Henderson             /* Supersection.  */
119453c038efSRichard Henderson             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
119553c038efSRichard Henderson             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
119653c038efSRichard Henderson             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
11977fa7ea8fSRichard Henderson             result->f.lg_page_size = 24;  /* 16MB */
119853c038efSRichard Henderson         } else {
119953c038efSRichard Henderson             /* Section.  */
120053c038efSRichard Henderson             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
12017fa7ea8fSRichard Henderson             result->f.lg_page_size = 20;  /* 1MB */
120253c038efSRichard Henderson         }
120353c038efSRichard Henderson         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
120453c038efSRichard Henderson         xn = desc & (1 << 4);
120553c038efSRichard Henderson         pxn = desc & 1;
120653c038efSRichard Henderson         ns = extract32(desc, 19, 1);
120753c038efSRichard Henderson     } else {
120853c038efSRichard Henderson         if (cpu_isar_feature(aa32_pxn, cpu)) {
120953c038efSRichard Henderson             pxn = (desc >> 2) & 1;
121053c038efSRichard Henderson         }
121153c038efSRichard Henderson         ns = extract32(desc, 3, 1);
121253c038efSRichard Henderson         /* Lookup l2 entry.  */
121353c038efSRichard Henderson         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
121493e5b3a6SRichard Henderson         if (!S1_ptw_translate(env, ptw, table, fi)) {
121593e5b3a6SRichard Henderson             goto do_fault;
121693e5b3a6SRichard Henderson         }
121793e5b3a6SRichard Henderson         desc = arm_ldl_ptw(env, ptw, fi);
121853c038efSRichard Henderson         if (fi->type != ARMFault_None) {
121953c038efSRichard Henderson             goto do_fault;
122053c038efSRichard Henderson         }
122153c038efSRichard Henderson         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
122253c038efSRichard Henderson         switch (desc & 3) {
122353c038efSRichard Henderson         case 0: /* Page translation fault.  */
122453c038efSRichard Henderson             fi->type = ARMFault_Translation;
122553c038efSRichard Henderson             goto do_fault;
122653c038efSRichard Henderson         case 1: /* 64k page.  */
122753c038efSRichard Henderson             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
122853c038efSRichard Henderson             xn = desc & (1 << 15);
12297fa7ea8fSRichard Henderson             result->f.lg_page_size = 16;
123053c038efSRichard Henderson             break;
123153c038efSRichard Henderson         case 2: case 3: /* 4k page.  */
123253c038efSRichard Henderson             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
123353c038efSRichard Henderson             xn = desc & 1;
12347fa7ea8fSRichard Henderson             result->f.lg_page_size = 12;
123553c038efSRichard Henderson             break;
123653c038efSRichard Henderson         default:
123753c038efSRichard Henderson             /* Never happens, but compiler isn't smart enough to tell.  */
123853c038efSRichard Henderson             g_assert_not_reached();
123953c038efSRichard Henderson         }
124053c038efSRichard Henderson     }
124153c038efSRichard Henderson     if (domain_prot == 3) {
12427fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
124353c038efSRichard Henderson     } else {
124453c038efSRichard Henderson         if (pxn && !regime_is_user(env, mmu_idx)) {
124553c038efSRichard Henderson             xn = 1;
124653c038efSRichard Henderson         }
124753c038efSRichard Henderson         if (xn && access_type == MMU_INST_FETCH) {
124853c038efSRichard Henderson             fi->type = ARMFault_Permission;
124953c038efSRichard Henderson             goto do_fault;
125053c038efSRichard Henderson         }
125153c038efSRichard Henderson 
125253c038efSRichard Henderson         if (arm_feature(env, ARM_FEATURE_V6K) &&
125353c038efSRichard Henderson                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
125453c038efSRichard Henderson             /* The simplified model uses AP[0] as an access control bit.  */
125553c038efSRichard Henderson             if ((ap & 1) == 0) {
125653c038efSRichard Henderson                 /* Access flag fault.  */
125753c038efSRichard Henderson                 fi->type = ARMFault_AccessFlag;
125853c038efSRichard Henderson                 goto do_fault;
125953c038efSRichard Henderson             }
12607fa7ea8fSRichard Henderson             result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
12616f2d9d74STimofey Kutergin             user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
126253c038efSRichard Henderson         } else {
12637fa7ea8fSRichard Henderson             result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
12646f2d9d74STimofey Kutergin             user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
126553c038efSRichard Henderson         }
12667fa7ea8fSRichard Henderson         if (result->f.prot && !xn) {
12677fa7ea8fSRichard Henderson             result->f.prot |= PAGE_EXEC;
126853c038efSRichard Henderson         }
12697fa7ea8fSRichard Henderson         if (!(result->f.prot & (1 << access_type))) {
127053c038efSRichard Henderson             /* Access permission fault.  */
127153c038efSRichard Henderson             fi->type = ARMFault_Permission;
127253c038efSRichard Henderson             goto do_fault;
127353c038efSRichard Henderson         }
12746f2d9d74STimofey Kutergin         if (regime_is_pan(env, mmu_idx) &&
12756f2d9d74STimofey Kutergin             !regime_is_user(env, mmu_idx) &&
12766f2d9d74STimofey Kutergin             user_prot &&
12776f2d9d74STimofey Kutergin             access_type != MMU_INST_FETCH) {
12786f2d9d74STimofey Kutergin             /* Privileged Access Never fault */
12796f2d9d74STimofey Kutergin             fi->type = ARMFault_Permission;
12806f2d9d74STimofey Kutergin             goto do_fault;
12816f2d9d74STimofey Kutergin         }
128253c038efSRichard Henderson     }
128353c038efSRichard Henderson     if (ns) {
128453c038efSRichard Henderson         /* The NS bit will (as required by the architecture) have no effect if
128553c038efSRichard Henderson          * the CPU doesn't support TZ or this is a non-secure translation
128653c038efSRichard Henderson          * regime, because the attribute will already be non-secure.
128753c038efSRichard Henderson          */
12887fa7ea8fSRichard Henderson         result->f.attrs.secure = false;
128990c66293SRichard Henderson         result->f.attrs.space = ARMSS_NonSecure;
129053c038efSRichard Henderson     }
12917fa7ea8fSRichard Henderson     result->f.phys_addr = phys_addr;
129253c038efSRichard Henderson     return false;
129353c038efSRichard Henderson do_fault:
129453c038efSRichard Henderson     fi->domain = domain;
129553c038efSRichard Henderson     fi->level = level;
129653c038efSRichard Henderson     return true;
129753c038efSRichard Henderson }
129853c038efSRichard Henderson 
1299f8526edcSRichard Henderson /*
1300f8526edcSRichard Henderson  * Translate S2 section/page access permissions to protection flags
1301f8526edcSRichard Henderson  * @env:     CPUARMState
1302f8526edcSRichard Henderson  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
1303f8526edcSRichard Henderson  * @xn:      XN (execute-never) bits
1304f8526edcSRichard Henderson  * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
1305f8526edcSRichard Henderson  */
13064a7d7702SRichard Henderson static int get_S2prot_noexecute(int s2ap)
1307f8526edcSRichard Henderson {
1308f8526edcSRichard Henderson     int prot = 0;
1309f8526edcSRichard Henderson 
1310f8526edcSRichard Henderson     if (s2ap & 1) {
1311f8526edcSRichard Henderson         prot |= PAGE_READ;
1312f8526edcSRichard Henderson     }
1313f8526edcSRichard Henderson     if (s2ap & 2) {
1314f8526edcSRichard Henderson         prot |= PAGE_WRITE;
1315f8526edcSRichard Henderson     }
13164a7d7702SRichard Henderson     return prot;
13174a7d7702SRichard Henderson }
13184a7d7702SRichard Henderson 
13194a7d7702SRichard Henderson static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
13204a7d7702SRichard Henderson {
13214a7d7702SRichard Henderson     int prot = get_S2prot_noexecute(s2ap);
1322f8526edcSRichard Henderson 
1323f8526edcSRichard Henderson     if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
1324f8526edcSRichard Henderson         switch (xn) {
1325f8526edcSRichard Henderson         case 0:
1326f8526edcSRichard Henderson             prot |= PAGE_EXEC;
1327f8526edcSRichard Henderson             break;
1328f8526edcSRichard Henderson         case 1:
1329f8526edcSRichard Henderson             if (s1_is_el0) {
1330f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
1331f8526edcSRichard Henderson             }
1332f8526edcSRichard Henderson             break;
1333f8526edcSRichard Henderson         case 2:
1334f8526edcSRichard Henderson             break;
1335f8526edcSRichard Henderson         case 3:
1336f8526edcSRichard Henderson             if (!s1_is_el0) {
1337f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
1338f8526edcSRichard Henderson             }
1339f8526edcSRichard Henderson             break;
1340f8526edcSRichard Henderson         default:
1341f8526edcSRichard Henderson             g_assert_not_reached();
1342f8526edcSRichard Henderson         }
1343f8526edcSRichard Henderson     } else {
1344f8526edcSRichard Henderson         if (!extract32(xn, 1, 1)) {
1345f8526edcSRichard Henderson             if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
1346f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
1347f8526edcSRichard Henderson             }
1348f8526edcSRichard Henderson         }
1349f8526edcSRichard Henderson     }
1350f8526edcSRichard Henderson     return prot;
1351f8526edcSRichard Henderson }
1352f8526edcSRichard Henderson 
1353f8526edcSRichard Henderson /*
1354f8526edcSRichard Henderson  * Translate section/page access permissions to protection flags
1355f8526edcSRichard Henderson  * @env:     CPUARMState
1356f8526edcSRichard Henderson  * @mmu_idx: MMU index indicating required translation regime
1357f8526edcSRichard Henderson  * @is_aa64: TRUE if AArch64
1358f8526edcSRichard Henderson  * @ap:      The 2-bit simple AP (AP[2:1])
1359f8526edcSRichard Henderson  * @xn:      XN (execute-never) bit
1360f8526edcSRichard Henderson  * @pxn:     PXN (privileged execute-never) bit
13612f1ff4e7SRichard Henderson  * @in_pa:   The original input pa space
13622f1ff4e7SRichard Henderson  * @out_pa:  The output pa space, modified by NSTable, NS, and NSE
1363f8526edcSRichard Henderson  */
1364f8526edcSRichard Henderson static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
13652f1ff4e7SRichard Henderson                       int ap, int xn, int pxn,
13662f1ff4e7SRichard Henderson                       ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
1367f8526edcSRichard Henderson {
1368dd17143fSPeter Maydell     ARMCPU *cpu = env_archcpu(env);
1369f8526edcSRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
1370f8526edcSRichard Henderson     int prot_rw, user_rw;
1371f8526edcSRichard Henderson     bool have_wxn;
1372f8526edcSRichard Henderson     int wxn = 0;
1373f8526edcSRichard Henderson 
1374edc05dd4SRichard Henderson     assert(!regime_is_stage2(mmu_idx));
1375f8526edcSRichard Henderson 
1376f8526edcSRichard Henderson     user_rw = simple_ap_to_rw_prot_is_user(ap, true);
1377f8526edcSRichard Henderson     if (is_user) {
1378f8526edcSRichard Henderson         prot_rw = user_rw;
1379f8526edcSRichard Henderson     } else {
1380dd17143fSPeter Maydell         /*
1381dd17143fSPeter Maydell          * PAN controls can forbid data accesses but don't affect insn fetch.
1382dd17143fSPeter Maydell          * Plain PAN forbids data accesses if EL0 has data permissions;
1383dd17143fSPeter Maydell          * PAN3 forbids data accesses if EL0 has either data or exec perms.
1384dd17143fSPeter Maydell          * Note that for AArch64 the 'user can exec' case is exactly !xn.
1385dd17143fSPeter Maydell          * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
1386dd17143fSPeter Maydell          * do not affect EPAN.
1387dd17143fSPeter Maydell          */
1388f8526edcSRichard Henderson         if (user_rw && regime_is_pan(env, mmu_idx)) {
1389dd17143fSPeter Maydell             prot_rw = 0;
1390dd17143fSPeter Maydell         } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
1391dd17143fSPeter Maydell                    regime_is_pan(env, mmu_idx) &&
1392dd17143fSPeter Maydell                    (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
1393f8526edcSRichard Henderson             prot_rw = 0;
1394f8526edcSRichard Henderson         } else {
1395f8526edcSRichard Henderson             prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
1396f8526edcSRichard Henderson         }
1397f8526edcSRichard Henderson     }
1398f8526edcSRichard Henderson 
13994a7d7702SRichard Henderson     if (in_pa != out_pa) {
14004a7d7702SRichard Henderson         switch (in_pa) {
14014a7d7702SRichard Henderson         case ARMSS_Root:
14024a7d7702SRichard Henderson             /*
14034a7d7702SRichard Henderson              * R_ZWRVD: permission fault for insn fetched from non-Root,
14044a7d7702SRichard Henderson              * I_WWBFB: SIF has no effect in EL3.
14054a7d7702SRichard Henderson              */
1406f8526edcSRichard Henderson             return prot_rw;
14074a7d7702SRichard Henderson         case ARMSS_Realm:
14084a7d7702SRichard Henderson             /*
14094a7d7702SRichard Henderson              * R_PKTDS: permission fault for insn fetched from non-Realm,
14104a7d7702SRichard Henderson              * for Realm EL2 or EL2&0.  The corresponding fault for EL1&0
14114a7d7702SRichard Henderson              * happens during any stage2 translation.
14124a7d7702SRichard Henderson              */
14134a7d7702SRichard Henderson             switch (mmu_idx) {
14144a7d7702SRichard Henderson             case ARMMMUIdx_E2:
14154a7d7702SRichard Henderson             case ARMMMUIdx_E20_0:
14164a7d7702SRichard Henderson             case ARMMMUIdx_E20_2:
14174a7d7702SRichard Henderson             case ARMMMUIdx_E20_2_PAN:
14184a7d7702SRichard Henderson                 return prot_rw;
14194a7d7702SRichard Henderson             default:
14204a7d7702SRichard Henderson                 break;
14214a7d7702SRichard Henderson             }
14224a7d7702SRichard Henderson             break;
14234a7d7702SRichard Henderson         case ARMSS_Secure:
14244a7d7702SRichard Henderson             if (env->cp15.scr_el3 & SCR_SIF) {
14254a7d7702SRichard Henderson                 return prot_rw;
14264a7d7702SRichard Henderson             }
14274a7d7702SRichard Henderson             break;
14284a7d7702SRichard Henderson         default:
14294a7d7702SRichard Henderson             /* Input NonSecure must have output NonSecure. */
14304a7d7702SRichard Henderson             g_assert_not_reached();
14314a7d7702SRichard Henderson         }
1432f8526edcSRichard Henderson     }
1433f8526edcSRichard Henderson 
1434f8526edcSRichard Henderson     /* TODO have_wxn should be replaced with
1435f8526edcSRichard Henderson      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
1436f8526edcSRichard Henderson      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
1437f8526edcSRichard Henderson      * compatible processors have EL2, which is required for [U]WXN.
1438f8526edcSRichard Henderson      */
1439f8526edcSRichard Henderson     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
1440f8526edcSRichard Henderson 
1441f8526edcSRichard Henderson     if (have_wxn) {
1442f8526edcSRichard Henderson         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
1443f8526edcSRichard Henderson     }
1444f8526edcSRichard Henderson 
1445f8526edcSRichard Henderson     if (is_aa64) {
1446f8526edcSRichard Henderson         if (regime_has_2_ranges(mmu_idx) && !is_user) {
1447f8526edcSRichard Henderson             xn = pxn || (user_rw & PAGE_WRITE);
1448f8526edcSRichard Henderson         }
1449f8526edcSRichard Henderson     } else if (arm_feature(env, ARM_FEATURE_V7)) {
1450f8526edcSRichard Henderson         switch (regime_el(env, mmu_idx)) {
1451f8526edcSRichard Henderson         case 1:
1452f8526edcSRichard Henderson         case 3:
1453f8526edcSRichard Henderson             if (is_user) {
1454f8526edcSRichard Henderson                 xn = xn || !(user_rw & PAGE_READ);
1455f8526edcSRichard Henderson             } else {
1456f8526edcSRichard Henderson                 int uwxn = 0;
1457f8526edcSRichard Henderson                 if (have_wxn) {
1458f8526edcSRichard Henderson                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
1459f8526edcSRichard Henderson                 }
1460f8526edcSRichard Henderson                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
1461f8526edcSRichard Henderson                      (uwxn && (user_rw & PAGE_WRITE));
1462f8526edcSRichard Henderson             }
1463f8526edcSRichard Henderson             break;
1464f8526edcSRichard Henderson         case 2:
1465f8526edcSRichard Henderson             break;
1466f8526edcSRichard Henderson         }
1467f8526edcSRichard Henderson     } else {
1468f8526edcSRichard Henderson         xn = wxn = 0;
1469f8526edcSRichard Henderson     }
1470f8526edcSRichard Henderson 
1471f8526edcSRichard Henderson     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
1472f8526edcSRichard Henderson         return prot_rw;
1473f8526edcSRichard Henderson     }
1474f8526edcSRichard Henderson     return prot_rw | PAGE_EXEC;
1475f8526edcSRichard Henderson }
1476f8526edcSRichard Henderson 
14772f0ec92eSRichard Henderson static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
14782f0ec92eSRichard Henderson                                           ARMMMUIdx mmu_idx)
14792f0ec92eSRichard Henderson {
1480c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
14812f0ec92eSRichard Henderson     uint32_t el = regime_el(env, mmu_idx);
14822f0ec92eSRichard Henderson     int select, tsz;
14832f0ec92eSRichard Henderson     bool epd, hpd;
14842f0ec92eSRichard Henderson 
14852f0ec92eSRichard Henderson     assert(mmu_idx != ARMMMUIdx_Stage2_S);
14862f0ec92eSRichard Henderson 
14872f0ec92eSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2) {
14882f0ec92eSRichard Henderson         /* VTCR */
14892f0ec92eSRichard Henderson         bool sext = extract32(tcr, 4, 1);
14902f0ec92eSRichard Henderson         bool sign = extract32(tcr, 3, 1);
14912f0ec92eSRichard Henderson 
14922f0ec92eSRichard Henderson         /*
14932f0ec92eSRichard Henderson          * If the sign-extend bit is not the same as t0sz[3], the result
14942f0ec92eSRichard Henderson          * is unpredictable. Flag this as a guest error.
14952f0ec92eSRichard Henderson          */
14962f0ec92eSRichard Henderson         if (sign != sext) {
14972f0ec92eSRichard Henderson             qemu_log_mask(LOG_GUEST_ERROR,
14982f0ec92eSRichard Henderson                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
14992f0ec92eSRichard Henderson         }
15002f0ec92eSRichard Henderson         tsz = sextract32(tcr, 0, 4) + 8;
15012f0ec92eSRichard Henderson         select = 0;
15022f0ec92eSRichard Henderson         hpd = false;
15032f0ec92eSRichard Henderson         epd = false;
15042f0ec92eSRichard Henderson     } else if (el == 2) {
15052f0ec92eSRichard Henderson         /* HTCR */
15062f0ec92eSRichard Henderson         tsz = extract32(tcr, 0, 3);
15072f0ec92eSRichard Henderson         select = 0;
15082f0ec92eSRichard Henderson         hpd = extract64(tcr, 24, 1);
15092f0ec92eSRichard Henderson         epd = false;
15102f0ec92eSRichard Henderson     } else {
15112f0ec92eSRichard Henderson         int t0sz = extract32(tcr, 0, 3);
15122f0ec92eSRichard Henderson         int t1sz = extract32(tcr, 16, 3);
15132f0ec92eSRichard Henderson 
15142f0ec92eSRichard Henderson         if (t1sz == 0) {
15152f0ec92eSRichard Henderson             select = va > (0xffffffffu >> t0sz);
15162f0ec92eSRichard Henderson         } else {
15172f0ec92eSRichard Henderson             /* Note that we will detect errors later.  */
15182f0ec92eSRichard Henderson             select = va >= ~(0xffffffffu >> t1sz);
15192f0ec92eSRichard Henderson         }
15202f0ec92eSRichard Henderson         if (!select) {
15212f0ec92eSRichard Henderson             tsz = t0sz;
15222f0ec92eSRichard Henderson             epd = extract32(tcr, 7, 1);
15232f0ec92eSRichard Henderson             hpd = extract64(tcr, 41, 1);
15242f0ec92eSRichard Henderson         } else {
15252f0ec92eSRichard Henderson             tsz = t1sz;
15262f0ec92eSRichard Henderson             epd = extract32(tcr, 23, 1);
15272f0ec92eSRichard Henderson             hpd = extract64(tcr, 42, 1);
15282f0ec92eSRichard Henderson         }
15292f0ec92eSRichard Henderson         /* For aarch32, hpd0 is not enabled without t2e as well.  */
15302f0ec92eSRichard Henderson         hpd &= extract32(tcr, 6, 1);
15312f0ec92eSRichard Henderson     }
15322f0ec92eSRichard Henderson 
15332f0ec92eSRichard Henderson     return (ARMVAParameters) {
15342f0ec92eSRichard Henderson         .tsz = tsz,
15352f0ec92eSRichard Henderson         .select = select,
15362f0ec92eSRichard Henderson         .epd = epd,
15372f0ec92eSRichard Henderson         .hpd = hpd,
15382f0ec92eSRichard Henderson     };
15392f0ec92eSRichard Henderson }
15402f0ec92eSRichard Henderson 
1541c5168785SRichard Henderson /*
1542c5168785SRichard Henderson  * check_s2_mmu_setup
1543c5168785SRichard Henderson  * @cpu:        ARMCPU
1544c5168785SRichard Henderson  * @is_aa64:    True if the translation regime is in AArch64 state
15450ffe5b7bSRichard Henderson  * @tcr:        VTCR_EL2 or VSTCR_EL2
15460ffe5b7bSRichard Henderson  * @ds:         Effective value of TCR.DS.
15470ffe5b7bSRichard Henderson  * @iasize:     Bitsize of IPAs
1548c5168785SRichard Henderson  * @stride:     Page-table stride (See the ARM ARM)
1549c5168785SRichard Henderson  *
15500ffe5b7bSRichard Henderson  * Decode the starting level of the S2 lookup, returning INT_MIN if
15510ffe5b7bSRichard Henderson  * the configuration is invalid.
1552c5168785SRichard Henderson  */
15530ffe5b7bSRichard Henderson static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
15540ffe5b7bSRichard Henderson                               bool ds, int iasize, int stride)
1555c5168785SRichard Henderson {
15560ffe5b7bSRichard Henderson     int sl0, sl2, startlevel, granulebits, levels;
15570ffe5b7bSRichard Henderson     int s1_min_iasize, s1_max_iasize;
15580ffe5b7bSRichard Henderson 
15590ffe5b7bSRichard Henderson     sl0 = extract32(tcr, 6, 2);
15600ffe5b7bSRichard Henderson     if (is_aa64) {
15610ffe5b7bSRichard Henderson         /*
15620ffe5b7bSRichard Henderson          * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
15630ffe5b7bSRichard Henderson          * so interleave AArch64.S2StartLevel.
1564c5168785SRichard Henderson          */
1565c5168785SRichard Henderson         switch (stride) {
15660ffe5b7bSRichard Henderson         case 9: /* 4KB */
15670ffe5b7bSRichard Henderson             /* SL2 is RES0 unless DS=1 & 4KB granule. */
15680ffe5b7bSRichard Henderson             sl2 = extract64(tcr, 33, 1);
15690ffe5b7bSRichard Henderson             if (ds && sl2) {
15700ffe5b7bSRichard Henderson                 if (sl0 != 0) {
15710ffe5b7bSRichard Henderson                     goto fail;
15720ffe5b7bSRichard Henderson                 }
15730ffe5b7bSRichard Henderson                 startlevel = -1;
15740ffe5b7bSRichard Henderson             } else {
15750ffe5b7bSRichard Henderson                 startlevel = 2 - sl0;
15760ffe5b7bSRichard Henderson                 switch (sl0) {
15770ffe5b7bSRichard Henderson                 case 2:
15780ffe5b7bSRichard Henderson                     if (arm_pamax(cpu) < 44) {
15790ffe5b7bSRichard Henderson                         goto fail;
1580c5168785SRichard Henderson                     }
1581c5168785SRichard Henderson                     break;
15820ffe5b7bSRichard Henderson                 case 3:
15830ffe5b7bSRichard Henderson                     if (!cpu_isar_feature(aa64_st, cpu)) {
15840ffe5b7bSRichard Henderson                         goto fail;
15850ffe5b7bSRichard Henderson                     }
15860ffe5b7bSRichard Henderson                     startlevel = 3;
15870ffe5b7bSRichard Henderson                     break;
15880ffe5b7bSRichard Henderson                 }
1589c5168785SRichard Henderson             }
1590c5168785SRichard Henderson             break;
15910ffe5b7bSRichard Henderson         case 11: /* 16KB */
15920ffe5b7bSRichard Henderson             switch (sl0) {
15930ffe5b7bSRichard Henderson             case 2:
15940ffe5b7bSRichard Henderson                 if (arm_pamax(cpu) < 42) {
15950ffe5b7bSRichard Henderson                     goto fail;
1596c5168785SRichard Henderson                 }
1597c5168785SRichard Henderson                 break;
15980ffe5b7bSRichard Henderson             case 3:
15990ffe5b7bSRichard Henderson                 if (!ds) {
16000ffe5b7bSRichard Henderson                     goto fail;
16010ffe5b7bSRichard Henderson                 }
16020ffe5b7bSRichard Henderson                 break;
16030ffe5b7bSRichard Henderson             }
16040ffe5b7bSRichard Henderson             startlevel = 3 - sl0;
16050ffe5b7bSRichard Henderson             break;
16060ffe5b7bSRichard Henderson         case 13: /* 64KB */
16070ffe5b7bSRichard Henderson             switch (sl0) {
16080ffe5b7bSRichard Henderson             case 2:
16090ffe5b7bSRichard Henderson                 if (arm_pamax(cpu) < 44) {
16100ffe5b7bSRichard Henderson                     goto fail;
16110ffe5b7bSRichard Henderson                 }
16120ffe5b7bSRichard Henderson                 break;
16130ffe5b7bSRichard Henderson             case 3:
16140ffe5b7bSRichard Henderson                 goto fail;
16150ffe5b7bSRichard Henderson             }
16160ffe5b7bSRichard Henderson             startlevel = 3 - sl0;
16170ffe5b7bSRichard Henderson             break;
1618c5168785SRichard Henderson         default:
1619c5168785SRichard Henderson             g_assert_not_reached();
1620c5168785SRichard Henderson         }
1621c5168785SRichard Henderson     } else {
16220ffe5b7bSRichard Henderson         /*
16230ffe5b7bSRichard Henderson          * Things are simpler for AArch32 EL2, with only 4k pages.
16240ffe5b7bSRichard Henderson          * There is no separate S2InvalidSL function, but AArch32.S2Walk
16250ffe5b7bSRichard Henderson          * begins with walkparms.sl0 in {'1x'}.
16260ffe5b7bSRichard Henderson          */
1627c5168785SRichard Henderson         assert(stride == 9);
16280ffe5b7bSRichard Henderson         if (sl0 >= 2) {
16290ffe5b7bSRichard Henderson             goto fail;
16300ffe5b7bSRichard Henderson         }
16310ffe5b7bSRichard Henderson         startlevel = 2 - sl0;
16320ffe5b7bSRichard Henderson     }
1633c5168785SRichard Henderson 
16340ffe5b7bSRichard Henderson     /* AArch{64,32}.S2InconsistentSL are functionally equivalent.  */
16350ffe5b7bSRichard Henderson     levels = 3 - startlevel;
16360ffe5b7bSRichard Henderson     granulebits = stride + 3;
16370ffe5b7bSRichard Henderson 
16380ffe5b7bSRichard Henderson     s1_min_iasize = levels * stride + granulebits + 1;
16390ffe5b7bSRichard Henderson     s1_max_iasize = s1_min_iasize + (stride - 1) + 4;
16400ffe5b7bSRichard Henderson 
16410ffe5b7bSRichard Henderson     if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
16420ffe5b7bSRichard Henderson         return startlevel;
1643c5168785SRichard Henderson     }
16440ffe5b7bSRichard Henderson 
16450ffe5b7bSRichard Henderson  fail:
16460ffe5b7bSRichard Henderson     return INT_MIN;
1647c5168785SRichard Henderson }
1648c5168785SRichard Henderson 
1649d53e2507SPeter Maydell static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
1650d53e2507SPeter Maydell                                   ARMGranuleSize gran, int level)
1651d53e2507SPeter Maydell {
1652d53e2507SPeter Maydell     /*
1653d53e2507SPeter Maydell      * See pseudocode AArch46.BlockDescSupported(): block descriptors
1654d53e2507SPeter Maydell      * are not valid at all levels, depending on the page size.
1655d53e2507SPeter Maydell      */
1656d53e2507SPeter Maydell     switch (gran) {
1657d53e2507SPeter Maydell     case Gran4K:
1658d53e2507SPeter Maydell         return (level == 0 && ds) || level == 1 || level == 2;
1659d53e2507SPeter Maydell     case Gran16K:
1660d53e2507SPeter Maydell         return (level == 1 && ds) || level == 2;
1661d53e2507SPeter Maydell     case Gran64K:
1662d53e2507SPeter Maydell         return (level == 1 && arm_pamax(cpu) == 52) || level == 2;
1663d53e2507SPeter Maydell     default:
1664d53e2507SPeter Maydell         g_assert_not_reached();
1665d53e2507SPeter Maydell     }
1666d53e2507SPeter Maydell }
1667d53e2507SPeter Maydell 
1668dea9104aSPeter Maydell static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
1669dea9104aSPeter Maydell {
1670dea9104aSPeter Maydell     uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
1671dea9104aSPeter Maydell     return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
1672dea9104aSPeter Maydell }
1673dea9104aSPeter Maydell 
16743283222aSRichard Henderson /**
16753283222aSRichard Henderson  * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
16763283222aSRichard Henderson  *
16773283222aSRichard Henderson  * Returns false if the translation was successful. Otherwise, phys_ptr,
16783283222aSRichard Henderson  * attrs, prot and page_size may not be filled in, and the populated fsr
16793283222aSRichard Henderson  * value provides information on why the translation aborted, in the format
16803283222aSRichard Henderson  * of a long-format DFSR/IFSR fault register, with the following caveat:
16813283222aSRichard Henderson  * the WnR bit is never set (the caller must do this).
16823283222aSRichard Henderson  *
16833283222aSRichard Henderson  * @env: CPUARMState
16846d2654ffSRichard Henderson  * @ptw: Current and next stage parameters for the walk.
16853283222aSRichard Henderson  * @address: virtual address to get physical address for
16863283222aSRichard Henderson  * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
168703ee9bbeSRichard Henderson  * @result: set on translation success,
16883283222aSRichard Henderson  * @fi: set to fault info if the translation fails
16893283222aSRichard Henderson  */
16906d2654ffSRichard Henderson static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
16916d2654ffSRichard Henderson                                uint64_t address,
16927c19b2d6SRichard Henderson                                MMUAccessType access_type,
1693c23f08a5SRichard Henderson                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
16943283222aSRichard Henderson {
16953283222aSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
16966d2654ffSRichard Henderson     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
169715f8f467SArd Biesheuvel     int32_t level;
16983283222aSRichard Henderson     ARMVAParameters param;
16993283222aSRichard Henderson     uint64_t ttbr;
17003283222aSRichard Henderson     hwaddr descaddr, indexmask, indexmask_grainsize;
17013283222aSRichard Henderson     uint32_t tableattrs;
17023283222aSRichard Henderson     target_ulong page_size;
170345666091SRichard Henderson     uint64_t attrs;
17043283222aSRichard Henderson     int32_t stride;
17053283222aSRichard Henderson     int addrsize, inputsize, outputsize;
1706c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
17072f1ff4e7SRichard Henderson     int ap, xn, pxn;
17083283222aSRichard Henderson     uint32_t el = regime_el(env, mmu_idx);
17093283222aSRichard Henderson     uint64_t descaddrmask;
17103283222aSRichard Henderson     bool aarch64 = arm_el_is_aa64(env, el);
171171943a1eSRichard Henderson     uint64_t descriptor, new_descriptor;
17122f1ff4e7SRichard Henderson     ARMSecuritySpace out_space;
1713728b923fSRichard Henderson     bool device;
17143283222aSRichard Henderson 
17153283222aSRichard Henderson     /* TODO: This code does not support shareability levels. */
17163283222aSRichard Henderson     if (aarch64) {
17173283222aSRichard Henderson         int ps;
17183283222aSRichard Henderson 
17193283222aSRichard Henderson         param = aa64_va_parameters(env, address, mmu_idx,
1720478dccbbSPeter Maydell                                    access_type != MMU_INST_FETCH,
1721478dccbbSPeter Maydell                                    !arm_el_is_aa64(env, 1));
17223283222aSRichard Henderson         level = 0;
17233283222aSRichard Henderson 
17243283222aSRichard Henderson         /*
17253283222aSRichard Henderson          * If TxSZ is programmed to a value larger than the maximum,
17263283222aSRichard Henderson          * or smaller than the effective minimum, it is IMPLEMENTATION
17273283222aSRichard Henderson          * DEFINED whether we behave as if the field were programmed
17283283222aSRichard Henderson          * within bounds, or if a level 0 Translation fault is generated.
17293283222aSRichard Henderson          *
17303283222aSRichard Henderson          * With FEAT_LVA, fault on less than minimum becomes required,
17313283222aSRichard Henderson          * so our choice is to always raise the fault.
17323283222aSRichard Henderson          */
17333283222aSRichard Henderson         if (param.tsz_oob) {
173427c1b81dSRichard Henderson             goto do_translation_fault;
17353283222aSRichard Henderson         }
17363283222aSRichard Henderson 
17373283222aSRichard Henderson         addrsize = 64 - 8 * param.tbi;
17383283222aSRichard Henderson         inputsize = 64 - param.tsz;
17393283222aSRichard Henderson 
17403283222aSRichard Henderson         /*
17413283222aSRichard Henderson          * Bound PS by PARANGE to find the effective output address size.
17423283222aSRichard Henderson          * ID_AA64MMFR0 is a read-only register so values outside of the
17433283222aSRichard Henderson          * supported mappings can be considered an implementation error.
17443283222aSRichard Henderson          */
17453283222aSRichard Henderson         ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
17463283222aSRichard Henderson         ps = MIN(ps, param.ps);
17473283222aSRichard Henderson         assert(ps < ARRAY_SIZE(pamax_map));
17483283222aSRichard Henderson         outputsize = pamax_map[ps];
1749312b71abSArd Biesheuvel 
1750312b71abSArd Biesheuvel         /*
1751312b71abSArd Biesheuvel          * With LPA2, the effective output address (OA) size is at most 48 bits
1752312b71abSArd Biesheuvel          * unless TCR.DS == 1
1753312b71abSArd Biesheuvel          */
1754312b71abSArd Biesheuvel         if (!param.ds && param.gran != Gran64K) {
1755312b71abSArd Biesheuvel             outputsize = MIN(outputsize, 48);
1756312b71abSArd Biesheuvel         }
17573283222aSRichard Henderson     } else {
17583283222aSRichard Henderson         param = aa32_va_parameters(env, address, mmu_idx);
17593283222aSRichard Henderson         level = 1;
17603283222aSRichard Henderson         addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
17613283222aSRichard Henderson         inputsize = addrsize - param.tsz;
17623283222aSRichard Henderson         outputsize = 40;
17633283222aSRichard Henderson     }
17643283222aSRichard Henderson 
17653283222aSRichard Henderson     /*
17663283222aSRichard Henderson      * We determined the region when collecting the parameters, but we
17673283222aSRichard Henderson      * have not yet validated that the address is valid for the region.
17683283222aSRichard Henderson      * Extract the top bits and verify that they all match select.
17693283222aSRichard Henderson      *
17703283222aSRichard Henderson      * For aa32, if inputsize == addrsize, then we have selected the
17713283222aSRichard Henderson      * region by exclusion in aa32_va_parameters and there is no more
17723283222aSRichard Henderson      * validation to do here.
17733283222aSRichard Henderson      */
17743283222aSRichard Henderson     if (inputsize < addrsize) {
17753283222aSRichard Henderson         target_ulong top_bits = sextract64(address, inputsize,
17763283222aSRichard Henderson                                            addrsize - inputsize);
17773283222aSRichard Henderson         if (-top_bits != param.select) {
17783283222aSRichard Henderson             /* The gap between the two regions is a Translation fault */
177927c1b81dSRichard Henderson             goto do_translation_fault;
17803283222aSRichard Henderson         }
17813283222aSRichard Henderson     }
17823283222aSRichard Henderson 
17833c003f70SPeter Maydell     stride = arm_granule_bits(param.gran) - 3;
17843283222aSRichard Henderson 
17853283222aSRichard Henderson     /*
17863283222aSRichard Henderson      * Note that QEMU ignores shareability and cacheability attributes,
17873283222aSRichard Henderson      * so we don't need to do anything with the SH, ORGN, IRGN fields
17883283222aSRichard Henderson      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
17893283222aSRichard Henderson      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
17903283222aSRichard Henderson      * implement any ASID-like capability so we can ignore it (instead
17913283222aSRichard Henderson      * we will always flush the TLB any time the ASID is changed).
17923283222aSRichard Henderson      */
17933283222aSRichard Henderson     ttbr = regime_ttbr(env, mmu_idx, param.select);
17943283222aSRichard Henderson 
17953283222aSRichard Henderson     /*
17963283222aSRichard Henderson      * Here we should have set up all the parameters for the translation:
17973283222aSRichard Henderson      * inputsize, ttbr, epd, stride, tbi
17983283222aSRichard Henderson      */
17993283222aSRichard Henderson 
18003283222aSRichard Henderson     if (param.epd) {
18013283222aSRichard Henderson         /*
18023283222aSRichard Henderson          * Translation table walk disabled => Translation fault on TLB miss
18033283222aSRichard Henderson          * Note: This is always 0 on 64-bit EL2 and EL3.
18043283222aSRichard Henderson          */
180527c1b81dSRichard Henderson         goto do_translation_fault;
18063283222aSRichard Henderson     }
18073283222aSRichard Henderson 
1808edc05dd4SRichard Henderson     if (!regime_is_stage2(mmu_idx)) {
18093283222aSRichard Henderson         /*
18103283222aSRichard Henderson          * The starting level depends on the virtual address size (which can
18113283222aSRichard Henderson          * be up to 48 bits) and the translation granule size. It indicates
18123283222aSRichard Henderson          * the number of strides (stride bits at a time) needed to
18133283222aSRichard Henderson          * consume the bits of the input address. In the pseudocode this is:
18143283222aSRichard Henderson          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
18153283222aSRichard Henderson          * where their 'inputsize' is our 'inputsize', 'grainsize' is
18163283222aSRichard Henderson          * our 'stride + 3' and 'stride' is our 'stride'.
18173283222aSRichard Henderson          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
18183283222aSRichard Henderson          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
18193283222aSRichard Henderson          * = 4 - (inputsize - 4) / stride;
18203283222aSRichard Henderson          */
18213283222aSRichard Henderson         level = 4 - (inputsize - 4) / stride;
18223283222aSRichard Henderson     } else {
18230ffe5b7bSRichard Henderson         int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
18240ffe5b7bSRichard Henderson                                             inputsize, stride);
18250ffe5b7bSRichard Henderson         if (startlevel == INT_MIN) {
18263283222aSRichard Henderson             level = 0;
182727c1b81dSRichard Henderson             goto do_translation_fault;
18283283222aSRichard Henderson         }
18293283222aSRichard Henderson         level = startlevel;
18303283222aSRichard Henderson     }
18313283222aSRichard Henderson 
18323283222aSRichard Henderson     indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
18333283222aSRichard Henderson     indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
18343283222aSRichard Henderson 
18353283222aSRichard Henderson     /* Now we can extract the actual base address from the TTBR */
18363283222aSRichard Henderson     descaddr = extract64(ttbr, 0, 48);
18373283222aSRichard Henderson 
18383283222aSRichard Henderson     /*
18393283222aSRichard Henderson      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
18403283222aSRichard Henderson      *
18413283222aSRichard Henderson      * Otherwise, if the base address is out of range, raise AddressSizeFault.
18423283222aSRichard Henderson      * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
18433283222aSRichard Henderson      * but we've just cleared the bits above 47, so simplify the test.
18443283222aSRichard Henderson      */
18453283222aSRichard Henderson     if (outputsize > 48) {
18463283222aSRichard Henderson         descaddr |= extract64(ttbr, 2, 4) << 48;
18473283222aSRichard Henderson     } else if (descaddr >> outputsize) {
18483283222aSRichard Henderson         level = 0;
184927c1b81dSRichard Henderson         fi->type = ARMFault_AddressSize;
18503283222aSRichard Henderson         goto do_fault;
18513283222aSRichard Henderson     }
18523283222aSRichard Henderson 
18533283222aSRichard Henderson     /*
18543283222aSRichard Henderson      * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
18553283222aSRichard Henderson      * and also to mask out CnP (bit 0) which could validly be non-zero.
18563283222aSRichard Henderson      */
18573283222aSRichard Henderson     descaddr &= ~indexmask;
18583283222aSRichard Henderson 
18593283222aSRichard Henderson     /*
18603283222aSRichard Henderson      * For AArch32, the address field in the descriptor goes up to bit 39
18613283222aSRichard Henderson      * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
18623283222aSRichard Henderson      * or an AddressSize fault is raised.  So for v8 we extract those SBZ
18633283222aSRichard Henderson      * bits as part of the address, which will be checked via outputsize.
18643283222aSRichard Henderson      * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
18653283222aSRichard Henderson      * the highest bits of a 52-bit output are placed elsewhere.
18663283222aSRichard Henderson      */
18673283222aSRichard Henderson     if (param.ds) {
18683283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 50);
18693283222aSRichard Henderson     } else if (arm_feature(env, ARM_FEATURE_V8)) {
18703283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 48);
18713283222aSRichard Henderson     } else {
18723283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 40);
18733283222aSRichard Henderson     }
18743283222aSRichard Henderson     descaddrmask &= ~indexmask_grainsize;
187526d19945SRichard Henderson     tableattrs = 0;
18763283222aSRichard Henderson 
1877fe4ddc15SRichard Henderson  next_level:
18783283222aSRichard Henderson     descaddr |= (address >> (stride * (4 - level))) & indexmask;
18793283222aSRichard Henderson     descaddr &= ~7ULL;
188026d19945SRichard Henderson 
188126d19945SRichard Henderson     /*
188226d19945SRichard Henderson      * Process the NSTable bit from the previous level.  This changes
188326d19945SRichard Henderson      * the table address space and the output space from Secure to
188426d19945SRichard Henderson      * NonSecure.  With RME, the EL3 translation regime does not change
188526d19945SRichard Henderson      * from Root to NonSecure.
188626d19945SRichard Henderson      */
188726d19945SRichard Henderson     if (ptw->in_space == ARMSS_Secure
188826d19945SRichard Henderson         && !regime_is_stage2(mmu_idx)
188926d19945SRichard Henderson         && extract32(tableattrs, 4, 1)) {
189048da29e4SRichard Henderson         /*
189148da29e4SRichard Henderson          * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1892d38fa967SRichard Henderson          * Assert the relative order of the secure/non-secure indexes.
189348da29e4SRichard Henderson          */
1894d38fa967SRichard Henderson         QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
1895d38fa967SRichard Henderson         QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
1896d38fa967SRichard Henderson         ptw->in_ptw_idx += 1;
189726d19945SRichard Henderson         ptw->in_space = ARMSS_NonSecure;
189848da29e4SRichard Henderson     }
189926d19945SRichard Henderson 
190093e5b3a6SRichard Henderson     if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
190193e5b3a6SRichard Henderson         goto do_fault;
190293e5b3a6SRichard Henderson     }
190393e5b3a6SRichard Henderson     descriptor = arm_ldq_ptw(env, ptw, fi);
19043283222aSRichard Henderson     if (fi->type != ARMFault_None) {
19053283222aSRichard Henderson         goto do_fault;
19063283222aSRichard Henderson     }
190771943a1eSRichard Henderson     new_descriptor = descriptor;
19083283222aSRichard Henderson 
190971943a1eSRichard Henderson  restart_atomic_update:
1910d53e2507SPeter Maydell     if (!(descriptor & 1) ||
1911d53e2507SPeter Maydell         (!(descriptor & 2) &&
1912d53e2507SPeter Maydell          !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) {
1913d53e2507SPeter Maydell         /* Invalid, or a block descriptor at an invalid level */
191427c1b81dSRichard Henderson         goto do_translation_fault;
19153283222aSRichard Henderson     }
19163283222aSRichard Henderson 
19173283222aSRichard Henderson     descaddr = descriptor & descaddrmask;
19183283222aSRichard Henderson 
19193283222aSRichard Henderson     /*
19203283222aSRichard Henderson      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
19213283222aSRichard Henderson      * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
19223283222aSRichard Henderson      * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
19233283222aSRichard Henderson      * raise AddressSizeFault.
19243283222aSRichard Henderson      */
19253283222aSRichard Henderson     if (outputsize > 48) {
19263283222aSRichard Henderson         if (param.ds) {
19273283222aSRichard Henderson             descaddr |= extract64(descriptor, 8, 2) << 50;
19283283222aSRichard Henderson         } else {
19293283222aSRichard Henderson             descaddr |= extract64(descriptor, 12, 4) << 48;
19303283222aSRichard Henderson         }
19313283222aSRichard Henderson     } else if (descaddr >> outputsize) {
193227c1b81dSRichard Henderson         fi->type = ARMFault_AddressSize;
19333283222aSRichard Henderson         goto do_fault;
19343283222aSRichard Henderson     }
19353283222aSRichard Henderson 
19363283222aSRichard Henderson     if ((descriptor & 2) && (level < 3)) {
19373283222aSRichard Henderson         /*
19383283222aSRichard Henderson          * Table entry. The top five bits are attributes which may
19393283222aSRichard Henderson          * propagate down through lower levels of the table (and
19403283222aSRichard Henderson          * which are all arranged so that 0 means "no effect", so
19413283222aSRichard Henderson          * we can gather them up by ORing in the bits at each level).
19423283222aSRichard Henderson          */
19433283222aSRichard Henderson         tableattrs |= extract64(descriptor, 59, 5);
19443283222aSRichard Henderson         level++;
19453283222aSRichard Henderson         indexmask = indexmask_grainsize;
1946fe4ddc15SRichard Henderson         goto next_level;
19473283222aSRichard Henderson     }
1948fe4ddc15SRichard Henderson 
19493283222aSRichard Henderson     /*
19503283222aSRichard Henderson      * Block entry at level 1 or 2, or page entry at level 3.
19513283222aSRichard Henderson      * These are basically the same thing, although the number
19523283222aSRichard Henderson      * of bits we pull in from the vaddr varies. Note that although
19533283222aSRichard Henderson      * descaddrmask masks enough of the low bits of the descriptor
19543283222aSRichard Henderson      * to give a correct page or table address, the address field
19553283222aSRichard Henderson      * in a block descriptor is smaller; so we need to explicitly
19563283222aSRichard Henderson      * clear the lower bits here before ORing in the low vaddr bits.
195771943a1eSRichard Henderson      *
195871943a1eSRichard Henderson      * Afterward, descaddr is the final physical address.
19593283222aSRichard Henderson      */
19603283222aSRichard Henderson     page_size = (1ULL << ((stride * (4 - level)) + 3));
1961c2360eaaSPeter Maydell     descaddr &= ~(hwaddr)(page_size - 1);
19623283222aSRichard Henderson     descaddr |= (address & (page_size - 1));
19633283222aSRichard Henderson 
196471943a1eSRichard Henderson     if (likely(!ptw->in_debug)) {
196534a57faeSRichard Henderson         /*
196671943a1eSRichard Henderson          * Access flag.
196771943a1eSRichard Henderson          * If HA is enabled, prepare to update the descriptor below.
196871943a1eSRichard Henderson          * Otherwise, pass the access fault on to software.
196934a57faeSRichard Henderson          */
197071943a1eSRichard Henderson         if (!(descriptor & (1 << 10))) {
197171943a1eSRichard Henderson             if (param.ha) {
197271943a1eSRichard Henderson                 new_descriptor |= 1 << 10; /* AF */
197371943a1eSRichard Henderson             } else {
197471943a1eSRichard Henderson                 fi->type = ARMFault_AccessFlag;
197571943a1eSRichard Henderson                 goto do_fault;
197671943a1eSRichard Henderson             }
197771943a1eSRichard Henderson         }
197865c123fdSRichard Henderson 
197965c123fdSRichard Henderson         /*
198065c123fdSRichard Henderson          * Dirty Bit.
198165c123fdSRichard Henderson          * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
198265c123fdSRichard Henderson          * bit for writeback. The actual write protection test may still be
198365c123fdSRichard Henderson          * overridden by tableattrs, to be merged below.
198465c123fdSRichard Henderson          */
198565c123fdSRichard Henderson         if (param.hd
198665c123fdSRichard Henderson             && extract64(descriptor, 51, 1)  /* DBM */
198765c123fdSRichard Henderson             && access_type == MMU_DATA_STORE) {
198865c123fdSRichard Henderson             if (regime_is_stage2(mmu_idx)) {
198965c123fdSRichard Henderson                 new_descriptor |= 1ull << 7;    /* set S2AP[1] */
199065c123fdSRichard Henderson             } else {
199165c123fdSRichard Henderson                 new_descriptor &= ~(1ull << 7); /* clear AP[2] */
199265c123fdSRichard Henderson             }
199365c123fdSRichard Henderson         }
199471943a1eSRichard Henderson     }
199571943a1eSRichard Henderson 
199671943a1eSRichard Henderson     /*
199771943a1eSRichard Henderson      * Extract attributes from the (modified) descriptor, and apply
199871943a1eSRichard Henderson      * table descriptors. Stage 2 table descriptors do not include
199971943a1eSRichard Henderson      * any attribute fields. HPD disables all the table attributes
2000b9c139dcSPeter Maydell      * except NSTable (which we have already handled).
200171943a1eSRichard Henderson      */
200271943a1eSRichard Henderson     attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
200334a57faeSRichard Henderson     if (!regime_is_stage2(mmu_idx)) {
200434a57faeSRichard Henderson         if (!param.hpd) {
200545666091SRichard Henderson             attrs |= extract64(tableattrs, 0, 2) << 53;     /* XN, PXN */
20063283222aSRichard Henderson             /*
20073283222aSRichard Henderson              * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
20083283222aSRichard Henderson              * means "force PL1 access only", which means forcing AP[1] to 0.
20093283222aSRichard Henderson              */
201045666091SRichard Henderson             attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
201145666091SRichard Henderson             attrs |= extract32(tableattrs, 3, 1) << 7;    /* APT[1] => AP[2] */
201234a57faeSRichard Henderson         }
201334a57faeSRichard Henderson     }
2014fe4ddc15SRichard Henderson 
201545666091SRichard Henderson     ap = extract32(attrs, 6, 2);
20162f1ff4e7SRichard Henderson     out_space = ptw->in_space;
2017edc05dd4SRichard Henderson     if (regime_is_stage2(mmu_idx)) {
20182f1ff4e7SRichard Henderson         /*
20192f1ff4e7SRichard Henderson          * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
20202f1ff4e7SRichard Henderson          * The bit remains ignored for other security states.
20214a7d7702SRichard Henderson          * R_YMCSL: Executing an insn fetched from non-Realm causes
20224a7d7702SRichard Henderson          * a stage2 permission fault.
20232f1ff4e7SRichard Henderson          */
20242f1ff4e7SRichard Henderson         if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
20252f1ff4e7SRichard Henderson             out_space = ARMSS_NonSecure;
20264a7d7702SRichard Henderson             result->f.prot = get_S2prot_noexecute(ap);
20274a7d7702SRichard Henderson         } else {
202845666091SRichard Henderson             xn = extract64(attrs, 53, 2);
20297c19b2d6SRichard Henderson             result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
20304a7d7702SRichard Henderson         }
20313283222aSRichard Henderson     } else {
20322f1ff4e7SRichard Henderson         int nse, ns = extract32(attrs, 5, 1);
20332f1ff4e7SRichard Henderson         switch (out_space) {
20342f1ff4e7SRichard Henderson         case ARMSS_Root:
20352f1ff4e7SRichard Henderson             /*
20362f1ff4e7SRichard Henderson              * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
20372f1ff4e7SRichard Henderson              * R_XTYPW: NSE and NS together select the output pa space.
20382f1ff4e7SRichard Henderson              */
20392f1ff4e7SRichard Henderson             nse = extract32(attrs, 11, 1);
20402f1ff4e7SRichard Henderson             out_space = (nse << 1) | ns;
20412f1ff4e7SRichard Henderson             if (out_space == ARMSS_Secure &&
20422f1ff4e7SRichard Henderson                 !cpu_isar_feature(aa64_sel2, cpu)) {
20432f1ff4e7SRichard Henderson                 out_space = ARMSS_NonSecure;
20442f1ff4e7SRichard Henderson             }
20452f1ff4e7SRichard Henderson             break;
20462f1ff4e7SRichard Henderson         case ARMSS_Secure:
20472f1ff4e7SRichard Henderson             if (ns) {
20482f1ff4e7SRichard Henderson                 out_space = ARMSS_NonSecure;
20492f1ff4e7SRichard Henderson             }
20502f1ff4e7SRichard Henderson             break;
20512f1ff4e7SRichard Henderson         case ARMSS_Realm:
20522f1ff4e7SRichard Henderson             switch (mmu_idx) {
20532f1ff4e7SRichard Henderson             case ARMMMUIdx_Stage1_E0:
20542f1ff4e7SRichard Henderson             case ARMMMUIdx_Stage1_E1:
20552f1ff4e7SRichard Henderson             case ARMMMUIdx_Stage1_E1_PAN:
20562f1ff4e7SRichard Henderson                 /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
20572f1ff4e7SRichard Henderson                 break;
20582f1ff4e7SRichard Henderson             case ARMMMUIdx_E2:
20592f1ff4e7SRichard Henderson             case ARMMMUIdx_E20_0:
20602f1ff4e7SRichard Henderson             case ARMMMUIdx_E20_2:
20612f1ff4e7SRichard Henderson             case ARMMMUIdx_E20_2_PAN:
20622f1ff4e7SRichard Henderson                 /*
20632f1ff4e7SRichard Henderson                  * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
20642f1ff4e7SRichard Henderson                  * NS changes the output to non-secure space.
20652f1ff4e7SRichard Henderson                  */
20662f1ff4e7SRichard Henderson                 if (ns) {
20672f1ff4e7SRichard Henderson                     out_space = ARMSS_NonSecure;
20682f1ff4e7SRichard Henderson                 }
20692f1ff4e7SRichard Henderson                 break;
20702f1ff4e7SRichard Henderson             default:
20712f1ff4e7SRichard Henderson                 g_assert_not_reached();
20722f1ff4e7SRichard Henderson             }
20732f1ff4e7SRichard Henderson             break;
20742f1ff4e7SRichard Henderson         case ARMSS_NonSecure:
20752f1ff4e7SRichard Henderson             /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
20762f1ff4e7SRichard Henderson             break;
20772f1ff4e7SRichard Henderson         default:
20782f1ff4e7SRichard Henderson             g_assert_not_reached();
20792f1ff4e7SRichard Henderson         }
208045666091SRichard Henderson         xn = extract64(attrs, 54, 1);
208145666091SRichard Henderson         pxn = extract64(attrs, 53, 1);
20822f1ff4e7SRichard Henderson 
2083dea9104aSPeter Maydell         if (el == 1 && nv_nv1_enabled(env, ptw)) {
2084dea9104aSPeter Maydell             /*
2085dea9104aSPeter Maydell              * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page
2086dea9104aSPeter Maydell              * descriptor bit 54 holds PXN, 53 is RES0, and the effective value
2087dea9104aSPeter Maydell              * of UXN is 0. Similarly for bits 59 and 60 in table descriptors
2088dea9104aSPeter Maydell              * (which we have already folded into bits 53 and 54 of attrs).
2089dea9104aSPeter Maydell              * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
2090dea9104aSPeter Maydell              * Similarly, APTable[0] from the table descriptor is treated as 0;
2091dea9104aSPeter Maydell              * we already folded this into AP[1] and squashing that to 0 does
2092dea9104aSPeter Maydell              * the right thing.
2093dea9104aSPeter Maydell              */
2094dea9104aSPeter Maydell             pxn = xn;
2095dea9104aSPeter Maydell             xn = 0;
2096dea9104aSPeter Maydell             ap &= ~1;
2097dea9104aSPeter Maydell         }
20982f1ff4e7SRichard Henderson         /*
20992f1ff4e7SRichard Henderson          * Note that we modified ptw->in_space earlier for NSTable, but
21002f1ff4e7SRichard Henderson          * result->f.attrs retains a copy of the original security space.
21012f1ff4e7SRichard Henderson          */
21022f1ff4e7SRichard Henderson         result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
21032f1ff4e7SRichard Henderson                                     result->f.attrs.space, out_space);
21043283222aSRichard Henderson     }
21053283222aSRichard Henderson 
21067fa7ea8fSRichard Henderson     if (!(result->f.prot & (1 << access_type))) {
210727c1b81dSRichard Henderson         fi->type = ARMFault_Permission;
21083283222aSRichard Henderson         goto do_fault;
21093283222aSRichard Henderson     }
21103283222aSRichard Henderson 
211171943a1eSRichard Henderson     /* If FEAT_HAFDBS has made changes, update the PTE. */
211271943a1eSRichard Henderson     if (new_descriptor != descriptor) {
211371943a1eSRichard Henderson         new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
211471943a1eSRichard Henderson         if (fi->type != ARMFault_None) {
211571943a1eSRichard Henderson             goto do_fault;
211671943a1eSRichard Henderson         }
211771943a1eSRichard Henderson         /*
211871943a1eSRichard Henderson          * I_YZSVV says that if the in-memory descriptor has changed,
211971943a1eSRichard Henderson          * then we must use the information in that new value
212071943a1eSRichard Henderson          * (which might include a different output address, different
212171943a1eSRichard Henderson          * attributes, or generate a fault).
212271943a1eSRichard Henderson          * Restart the handling of the descriptor value from scratch.
212371943a1eSRichard Henderson          */
212471943a1eSRichard Henderson         if (new_descriptor != descriptor) {
212571943a1eSRichard Henderson             descriptor = new_descriptor;
212671943a1eSRichard Henderson             goto restart_atomic_update;
212771943a1eSRichard Henderson         }
212871943a1eSRichard Henderson     }
212971943a1eSRichard Henderson 
21302f1ff4e7SRichard Henderson     result->f.attrs.space = out_space;
21312f1ff4e7SRichard Henderson     result->f.attrs.secure = arm_space_is_secure(out_space);
2132937f2245SRichard Henderson 
2133edc05dd4SRichard Henderson     if (regime_is_stage2(mmu_idx)) {
213403ee9bbeSRichard Henderson         result->cacheattrs.is_s2_format = true;
213545666091SRichard Henderson         result->cacheattrs.attrs = extract32(attrs, 2, 4);
2136728b923fSRichard Henderson         /*
2137728b923fSRichard Henderson          * Security state does not really affect HCR_EL2.FWB;
2138728b923fSRichard Henderson          * we only need to filter FWB for aa32 or other FEAT.
2139728b923fSRichard Henderson          */
2140728b923fSRichard Henderson         device = S2_attrs_are_device(arm_hcr_el2_eff(env),
2141728b923fSRichard Henderson                                      result->cacheattrs.attrs);
21423283222aSRichard Henderson     } else {
21433283222aSRichard Henderson         /* Index into MAIR registers for cache attributes */
214445666091SRichard Henderson         uint8_t attrindx = extract32(attrs, 2, 3);
21453283222aSRichard Henderson         uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
21463283222aSRichard Henderson         assert(attrindx <= 7);
214703ee9bbeSRichard Henderson         result->cacheattrs.is_s2_format = false;
214803ee9bbeSRichard Henderson         result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
21496a3b1e44SRichard Henderson 
21506a3b1e44SRichard Henderson         /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
21516a3b1e44SRichard Henderson         if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
2152a81fef4bSAnton Johansson             result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
21536a3b1e44SRichard Henderson         }
2154728b923fSRichard Henderson         device = S1_attrs_are_device(result->cacheattrs.attrs);
2155728b923fSRichard Henderson     }
2156728b923fSRichard Henderson 
2157728b923fSRichard Henderson     /*
2158728b923fSRichard Henderson      * Enable alignment checks on Device memory.
2159728b923fSRichard Henderson      *
2160728b923fSRichard Henderson      * Per R_XCHFJ, this check is mis-ordered. The correct ordering
2161728b923fSRichard Henderson      * for alignment, permission, and stage 2 faults should be:
2162728b923fSRichard Henderson      *    - Alignment fault caused by the memory type
2163728b923fSRichard Henderson      *    - Permission fault
2164728b923fSRichard Henderson      *    - A stage 2 fault on the memory access
2165728b923fSRichard Henderson      * but due to the way the TCG softmmu TLB operates, we will have
2166728b923fSRichard Henderson      * implicitly done the permission check and the stage2 lookup in
2167728b923fSRichard Henderson      * finding the TLB entry, so the alignment check cannot be done sooner.
2168728b923fSRichard Henderson      *
2169728b923fSRichard Henderson      * In v7, for a CPU without the Virtualization Extensions this
2170728b923fSRichard Henderson      * access is UNPREDICTABLE; we choose to make it take the alignment
2171728b923fSRichard Henderson      * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
2172728b923fSRichard Henderson      * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
2173728b923fSRichard Henderson      */
2174728b923fSRichard Henderson     if (device) {
2175728b923fSRichard Henderson         result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED;
21763283222aSRichard Henderson     }
21773283222aSRichard Henderson 
21783283222aSRichard Henderson     /*
21793283222aSRichard Henderson      * For FEAT_LPA2 and effective DS, the SH field in the attributes
21803283222aSRichard Henderson      * was re-purposed for output address bits.  The SH attribute in
21813283222aSRichard Henderson      * that case comes from TCR_ELx, which we extracted earlier.
21823283222aSRichard Henderson      */
21833283222aSRichard Henderson     if (param.ds) {
218403ee9bbeSRichard Henderson         result->cacheattrs.shareability = param.sh;
21853283222aSRichard Henderson     } else {
218645666091SRichard Henderson         result->cacheattrs.shareability = extract32(attrs, 8, 2);
21873283222aSRichard Henderson     }
21883283222aSRichard Henderson 
21897fa7ea8fSRichard Henderson     result->f.phys_addr = descaddr;
21907fa7ea8fSRichard Henderson     result->f.lg_page_size = ctz64(page_size);
21913283222aSRichard Henderson     return false;
21923283222aSRichard Henderson 
219327c1b81dSRichard Henderson  do_translation_fault:
219427c1b81dSRichard Henderson     fi->type = ARMFault_Translation;
21953283222aSRichard Henderson  do_fault:
2196a729d636SPeter Maydell     if (fi->s1ptw) {
2197a729d636SPeter Maydell         /* Retain the existing stage 2 fi->level */
2198a729d636SPeter Maydell         assert(fi->stage2);
2199a729d636SPeter Maydell     } else {
22003283222aSRichard Henderson         fi->level = level;
2201a729d636SPeter Maydell         fi->stage2 = regime_is_stage2(mmu_idx);
2202a729d636SPeter Maydell     }
22034f51edd3SPeter Maydell     fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx);
22043283222aSRichard Henderson     return true;
22053283222aSRichard Henderson }
22063283222aSRichard Henderson 
2207a5637becSPeter Maydell static bool get_phys_addr_pmsav5(CPUARMState *env,
2208a5637becSPeter Maydell                                  S1Translate *ptw,
2209a5637becSPeter Maydell                                  uint32_t address,
2210a5637becSPeter Maydell                                  MMUAccessType access_type,
2211a5637becSPeter Maydell                                  GetPhysAddrResult *result,
22129a12fb36SRichard Henderson                                  ARMMMUFaultInfo *fi)
22139a12fb36SRichard Henderson {
22149a12fb36SRichard Henderson     int n;
22159a12fb36SRichard Henderson     uint32_t mask;
22169a12fb36SRichard Henderson     uint32_t base;
2217a5637becSPeter Maydell     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
22189a12fb36SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
22199a12fb36SRichard Henderson 
2220d1289140SPeter Maydell     if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
22219a12fb36SRichard Henderson         /* MPU disabled.  */
22227fa7ea8fSRichard Henderson         result->f.phys_addr = address;
22237fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
22249a12fb36SRichard Henderson         return false;
22259a12fb36SRichard Henderson     }
22269a12fb36SRichard Henderson 
22277fa7ea8fSRichard Henderson     result->f.phys_addr = address;
22289a12fb36SRichard Henderson     for (n = 7; n >= 0; n--) {
22299a12fb36SRichard Henderson         base = env->cp15.c6_region[n];
22309a12fb36SRichard Henderson         if ((base & 1) == 0) {
22319a12fb36SRichard Henderson             continue;
22329a12fb36SRichard Henderson         }
22339a12fb36SRichard Henderson         mask = 1 << ((base >> 1) & 0x1f);
22349a12fb36SRichard Henderson         /* Keep this shift separate from the above to avoid an
22359a12fb36SRichard Henderson            (undefined) << 32.  */
22369a12fb36SRichard Henderson         mask = (mask << 1) - 1;
22379a12fb36SRichard Henderson         if (((base ^ address) & ~mask) == 0) {
22389a12fb36SRichard Henderson             break;
22399a12fb36SRichard Henderson         }
22409a12fb36SRichard Henderson     }
22419a12fb36SRichard Henderson     if (n < 0) {
22429a12fb36SRichard Henderson         fi->type = ARMFault_Background;
22439a12fb36SRichard Henderson         return true;
22449a12fb36SRichard Henderson     }
22459a12fb36SRichard Henderson 
22469a12fb36SRichard Henderson     if (access_type == MMU_INST_FETCH) {
22479a12fb36SRichard Henderson         mask = env->cp15.pmsav5_insn_ap;
22489a12fb36SRichard Henderson     } else {
22499a12fb36SRichard Henderson         mask = env->cp15.pmsav5_data_ap;
22509a12fb36SRichard Henderson     }
22519a12fb36SRichard Henderson     mask = (mask >> (n * 4)) & 0xf;
22529a12fb36SRichard Henderson     switch (mask) {
22539a12fb36SRichard Henderson     case 0:
22549a12fb36SRichard Henderson         fi->type = ARMFault_Permission;
22559a12fb36SRichard Henderson         fi->level = 1;
22569a12fb36SRichard Henderson         return true;
22579a12fb36SRichard Henderson     case 1:
22589a12fb36SRichard Henderson         if (is_user) {
22599a12fb36SRichard Henderson             fi->type = ARMFault_Permission;
22609a12fb36SRichard Henderson             fi->level = 1;
22619a12fb36SRichard Henderson             return true;
22629a12fb36SRichard Henderson         }
22637fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE;
22649a12fb36SRichard Henderson         break;
22659a12fb36SRichard Henderson     case 2:
22667fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
22679a12fb36SRichard Henderson         if (!is_user) {
22687fa7ea8fSRichard Henderson             result->f.prot |= PAGE_WRITE;
22699a12fb36SRichard Henderson         }
22709a12fb36SRichard Henderson         break;
22719a12fb36SRichard Henderson     case 3:
22727fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE;
22739a12fb36SRichard Henderson         break;
22749a12fb36SRichard Henderson     case 5:
22759a12fb36SRichard Henderson         if (is_user) {
22769a12fb36SRichard Henderson             fi->type = ARMFault_Permission;
22779a12fb36SRichard Henderson             fi->level = 1;
22789a12fb36SRichard Henderson             return true;
22799a12fb36SRichard Henderson         }
22807fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
22819a12fb36SRichard Henderson         break;
22829a12fb36SRichard Henderson     case 6:
22837fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
22849a12fb36SRichard Henderson         break;
22859a12fb36SRichard Henderson     default:
22869a12fb36SRichard Henderson         /* Bad permission.  */
22879a12fb36SRichard Henderson         fi->type = ARMFault_Permission;
22889a12fb36SRichard Henderson         fi->level = 1;
22899a12fb36SRichard Henderson         return true;
22909a12fb36SRichard Henderson     }
22917fa7ea8fSRichard Henderson     result->f.prot |= PAGE_EXEC;
22929a12fb36SRichard Henderson     return false;
22939a12fb36SRichard Henderson }
22949a12fb36SRichard Henderson 
2295fedbaa05SRichard Henderson static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
22967fa7ea8fSRichard Henderson                                          int32_t address, uint8_t *prot)
22977d2e08c9SRichard Henderson {
22987d2e08c9SRichard Henderson     if (!arm_feature(env, ARM_FEATURE_M)) {
22997d2e08c9SRichard Henderson         *prot = PAGE_READ | PAGE_WRITE;
23007d2e08c9SRichard Henderson         switch (address) {
23017d2e08c9SRichard Henderson         case 0xF0000000 ... 0xFFFFFFFF:
23027d2e08c9SRichard Henderson             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
23037d2e08c9SRichard Henderson                 /* hivecs execing is ok */
23047d2e08c9SRichard Henderson                 *prot |= PAGE_EXEC;
23057d2e08c9SRichard Henderson             }
23067d2e08c9SRichard Henderson             break;
23077d2e08c9SRichard Henderson         case 0x00000000 ... 0x7FFFFFFF:
23087d2e08c9SRichard Henderson             *prot |= PAGE_EXEC;
23097d2e08c9SRichard Henderson             break;
23107d2e08c9SRichard Henderson         }
23117d2e08c9SRichard Henderson     } else {
23127d2e08c9SRichard Henderson         /* Default system address map for M profile cores.
23137d2e08c9SRichard Henderson          * The architecture specifies which regions are execute-never;
23147d2e08c9SRichard Henderson          * at the MPU level no other checks are defined.
23157d2e08c9SRichard Henderson          */
23167d2e08c9SRichard Henderson         switch (address) {
23177d2e08c9SRichard Henderson         case 0x00000000 ... 0x1fffffff: /* ROM */
23187d2e08c9SRichard Henderson         case 0x20000000 ... 0x3fffffff: /* SRAM */
23197d2e08c9SRichard Henderson         case 0x60000000 ... 0x7fffffff: /* RAM */
23207d2e08c9SRichard Henderson         case 0x80000000 ... 0x9fffffff: /* RAM */
23217d2e08c9SRichard Henderson             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
23227d2e08c9SRichard Henderson             break;
23237d2e08c9SRichard Henderson         case 0x40000000 ... 0x5fffffff: /* Peripheral */
23247d2e08c9SRichard Henderson         case 0xa0000000 ... 0xbfffffff: /* Device */
23257d2e08c9SRichard Henderson         case 0xc0000000 ... 0xdfffffff: /* Device */
23267d2e08c9SRichard Henderson         case 0xe0000000 ... 0xffffffff: /* System */
23277d2e08c9SRichard Henderson             *prot = PAGE_READ | PAGE_WRITE;
23287d2e08c9SRichard Henderson             break;
23297d2e08c9SRichard Henderson         default:
23307d2e08c9SRichard Henderson             g_assert_not_reached();
23317d2e08c9SRichard Henderson         }
23327d2e08c9SRichard Henderson     }
23337d2e08c9SRichard Henderson }
23347d2e08c9SRichard Henderson 
233547ff5ba9SRichard Henderson static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
233647ff5ba9SRichard Henderson {
233747ff5ba9SRichard Henderson     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
233847ff5ba9SRichard Henderson     return arm_feature(env, ARM_FEATURE_M) &&
233947ff5ba9SRichard Henderson         extract32(address, 20, 12) == 0xe00;
234047ff5ba9SRichard Henderson }
234147ff5ba9SRichard Henderson 
234247ff5ba9SRichard Henderson static bool m_is_system_region(CPUARMState *env, uint32_t address)
234347ff5ba9SRichard Henderson {
234447ff5ba9SRichard Henderson     /*
234547ff5ba9SRichard Henderson      * True if address is in the M profile system region
234647ff5ba9SRichard Henderson      * 0xe0000000 - 0xffffffff
234747ff5ba9SRichard Henderson      */
234847ff5ba9SRichard Henderson     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
234947ff5ba9SRichard Henderson }
235047ff5ba9SRichard Henderson 
2351c8e436c9SRichard Henderson static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
23521a469cf7SRichard Henderson                                          bool is_secure, bool is_user)
2353c8e436c9SRichard Henderson {
2354c8e436c9SRichard Henderson     /*
2355c8e436c9SRichard Henderson      * Return true if we should use the default memory map as a
2356c8e436c9SRichard Henderson      * "background" region if there are no hits against any MPU regions.
2357c8e436c9SRichard Henderson      */
2358c8e436c9SRichard Henderson     CPUARMState *env = &cpu->env;
2359c8e436c9SRichard Henderson 
2360c8e436c9SRichard Henderson     if (is_user) {
2361c8e436c9SRichard Henderson         return false;
2362c8e436c9SRichard Henderson     }
2363c8e436c9SRichard Henderson 
2364c8e436c9SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M)) {
23651a469cf7SRichard Henderson         return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
2366c8e436c9SRichard Henderson     }
2367fca45e34STobias Röhmel 
2368fca45e34STobias Röhmel     if (mmu_idx == ARMMMUIdx_Stage2) {
2369fca45e34STobias Röhmel         return false;
2370fca45e34STobias Röhmel     }
2371fca45e34STobias Röhmel 
2372fca45e34STobias Röhmel     return regime_sctlr(env, mmu_idx) & SCTLR_BR;
2373c8e436c9SRichard Henderson }
2374c8e436c9SRichard Henderson 
2375a5637becSPeter Maydell static bool get_phys_addr_pmsav7(CPUARMState *env,
2376a5637becSPeter Maydell                                  S1Translate *ptw,
2377a5637becSPeter Maydell                                  uint32_t address,
2378a5637becSPeter Maydell                                  MMUAccessType access_type,
2379a5637becSPeter Maydell                                  GetPhysAddrResult *result,
23801f2e87e5SRichard Henderson                                  ARMMMUFaultInfo *fi)
23811f2e87e5SRichard Henderson {
23821f2e87e5SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
23831f2e87e5SRichard Henderson     int n;
2384a5637becSPeter Maydell     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
23851f2e87e5SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
2386a5637becSPeter Maydell     bool secure = arm_space_is_secure(ptw->in_space);
23871f2e87e5SRichard Henderson 
23887fa7ea8fSRichard Henderson     result->f.phys_addr = address;
23897fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
23907fa7ea8fSRichard Henderson     result->f.prot = 0;
23911f2e87e5SRichard Henderson 
2392d1289140SPeter Maydell     if (regime_translation_disabled(env, mmu_idx, ptw->in_space) ||
23931f2e87e5SRichard Henderson         m_is_ppb_region(env, address)) {
23941f2e87e5SRichard Henderson         /*
23951f2e87e5SRichard Henderson          * MPU disabled or M profile PPB access: use default memory map.
23961f2e87e5SRichard Henderson          * The other case which uses the default memory map in the
23971f2e87e5SRichard Henderson          * v7M ARM ARM pseudocode is exception vector reads from the vector
23981f2e87e5SRichard Henderson          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
23991f2e87e5SRichard Henderson          * which always does a direct read using address_space_ldl(), rather
24001f2e87e5SRichard Henderson          * than going via this function, so we don't need to check that here.
24011f2e87e5SRichard Henderson          */
24027fa7ea8fSRichard Henderson         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
24031f2e87e5SRichard Henderson     } else { /* MPU enabled */
24041f2e87e5SRichard Henderson         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
24051f2e87e5SRichard Henderson             /* region search */
24061f2e87e5SRichard Henderson             uint32_t base = env->pmsav7.drbar[n];
24071f2e87e5SRichard Henderson             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
24081f2e87e5SRichard Henderson             uint32_t rmask;
24091f2e87e5SRichard Henderson             bool srdis = false;
24101f2e87e5SRichard Henderson 
24111f2e87e5SRichard Henderson             if (!(env->pmsav7.drsr[n] & 0x1)) {
24121f2e87e5SRichard Henderson                 continue;
24131f2e87e5SRichard Henderson             }
24141f2e87e5SRichard Henderson 
24151f2e87e5SRichard Henderson             if (!rsize) {
24161f2e87e5SRichard Henderson                 qemu_log_mask(LOG_GUEST_ERROR,
24171f2e87e5SRichard Henderson                               "DRSR[%d]: Rsize field cannot be 0\n", n);
24181f2e87e5SRichard Henderson                 continue;
24191f2e87e5SRichard Henderson             }
24201f2e87e5SRichard Henderson             rsize++;
24211f2e87e5SRichard Henderson             rmask = (1ull << rsize) - 1;
24221f2e87e5SRichard Henderson 
24231f2e87e5SRichard Henderson             if (base & rmask) {
24241f2e87e5SRichard Henderson                 qemu_log_mask(LOG_GUEST_ERROR,
24251f2e87e5SRichard Henderson                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
24261f2e87e5SRichard Henderson                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
24271f2e87e5SRichard Henderson                               n, base, rmask);
24281f2e87e5SRichard Henderson                 continue;
24291f2e87e5SRichard Henderson             }
24301f2e87e5SRichard Henderson 
24311f2e87e5SRichard Henderson             if (address < base || address > base + rmask) {
24321f2e87e5SRichard Henderson                 /*
24331f2e87e5SRichard Henderson                  * Address not in this region. We must check whether the
24341f2e87e5SRichard Henderson                  * region covers addresses in the same page as our address.
24351f2e87e5SRichard Henderson                  * In that case we must not report a size that covers the
24361f2e87e5SRichard Henderson                  * whole page for a subsequent hit against a different MPU
24371f2e87e5SRichard Henderson                  * region or the background region, because it would result in
24381f2e87e5SRichard Henderson                  * incorrect TLB hits for subsequent accesses to addresses that
24391f2e87e5SRichard Henderson                  * are in this MPU region.
24401f2e87e5SRichard Henderson                  */
24411f2e87e5SRichard Henderson                 if (ranges_overlap(base, rmask,
24421f2e87e5SRichard Henderson                                    address & TARGET_PAGE_MASK,
24431f2e87e5SRichard Henderson                                    TARGET_PAGE_SIZE)) {
24447fa7ea8fSRichard Henderson                     result->f.lg_page_size = 0;
24451f2e87e5SRichard Henderson                 }
24461f2e87e5SRichard Henderson                 continue;
24471f2e87e5SRichard Henderson             }
24481f2e87e5SRichard Henderson 
24491f2e87e5SRichard Henderson             /* Region matched */
24501f2e87e5SRichard Henderson 
24511f2e87e5SRichard Henderson             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
24521f2e87e5SRichard Henderson                 int i, snd;
24531f2e87e5SRichard Henderson                 uint32_t srdis_mask;
24541f2e87e5SRichard Henderson 
24551f2e87e5SRichard Henderson                 rsize -= 3; /* sub region size (power of 2) */
24561f2e87e5SRichard Henderson                 snd = ((address - base) >> rsize) & 0x7;
24571f2e87e5SRichard Henderson                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
24581f2e87e5SRichard Henderson 
24591f2e87e5SRichard Henderson                 srdis_mask = srdis ? 0x3 : 0x0;
24601f2e87e5SRichard Henderson                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
24611f2e87e5SRichard Henderson                     /*
24621f2e87e5SRichard Henderson                      * This will check in groups of 2, 4 and then 8, whether
24631f2e87e5SRichard Henderson                      * the subregion bits are consistent. rsize is incremented
24641f2e87e5SRichard Henderson                      * back up to give the region size, considering consistent
24651f2e87e5SRichard Henderson                      * adjacent subregions as one region. Stop testing if rsize
24661f2e87e5SRichard Henderson                      * is already big enough for an entire QEMU page.
24671f2e87e5SRichard Henderson                      */
24681f2e87e5SRichard Henderson                     int snd_rounded = snd & ~(i - 1);
24691f2e87e5SRichard Henderson                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
24701f2e87e5SRichard Henderson                                                      snd_rounded + 8, i);
24711f2e87e5SRichard Henderson                     if (srdis_mask ^ srdis_multi) {
24721f2e87e5SRichard Henderson                         break;
24731f2e87e5SRichard Henderson                     }
24741f2e87e5SRichard Henderson                     srdis_mask = (srdis_mask << i) | srdis_mask;
24751f2e87e5SRichard Henderson                     rsize++;
24761f2e87e5SRichard Henderson                 }
24771f2e87e5SRichard Henderson             }
24781f2e87e5SRichard Henderson             if (srdis) {
24791f2e87e5SRichard Henderson                 continue;
24801f2e87e5SRichard Henderson             }
24811f2e87e5SRichard Henderson             if (rsize < TARGET_PAGE_BITS) {
24827fa7ea8fSRichard Henderson                 result->f.lg_page_size = rsize;
24831f2e87e5SRichard Henderson             }
24841f2e87e5SRichard Henderson             break;
24851f2e87e5SRichard Henderson         }
24861f2e87e5SRichard Henderson 
24871f2e87e5SRichard Henderson         if (n == -1) { /* no hits */
24881a469cf7SRichard Henderson             if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
24891f2e87e5SRichard Henderson                 /* background fault */
24901f2e87e5SRichard Henderson                 fi->type = ARMFault_Background;
24911f2e87e5SRichard Henderson                 return true;
24921f2e87e5SRichard Henderson             }
24937fa7ea8fSRichard Henderson             get_phys_addr_pmsav7_default(env, mmu_idx, address,
24947fa7ea8fSRichard Henderson                                          &result->f.prot);
24951f2e87e5SRichard Henderson         } else { /* a MPU hit! */
24961f2e87e5SRichard Henderson             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
24971f2e87e5SRichard Henderson             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
24981f2e87e5SRichard Henderson 
24991f2e87e5SRichard Henderson             if (m_is_system_region(env, address)) {
25001f2e87e5SRichard Henderson                 /* System space is always execute never */
25011f2e87e5SRichard Henderson                 xn = 1;
25021f2e87e5SRichard Henderson             }
25031f2e87e5SRichard Henderson 
25041f2e87e5SRichard Henderson             if (is_user) { /* User mode AP bit decoding */
25051f2e87e5SRichard Henderson                 switch (ap) {
25061f2e87e5SRichard Henderson                 case 0:
25071f2e87e5SRichard Henderson                 case 1:
25081f2e87e5SRichard Henderson                 case 5:
25091f2e87e5SRichard Henderson                     break; /* no access */
25101f2e87e5SRichard Henderson                 case 3:
25117fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_WRITE;
25121f2e87e5SRichard Henderson                     /* fall through */
25131f2e87e5SRichard Henderson                 case 2:
25141f2e87e5SRichard Henderson                 case 6:
25157fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_READ | PAGE_EXEC;
25161f2e87e5SRichard Henderson                     break;
25171f2e87e5SRichard Henderson                 case 7:
25181f2e87e5SRichard Henderson                     /* for v7M, same as 6; for R profile a reserved value */
25191f2e87e5SRichard Henderson                     if (arm_feature(env, ARM_FEATURE_M)) {
25207fa7ea8fSRichard Henderson                         result->f.prot |= PAGE_READ | PAGE_EXEC;
25211f2e87e5SRichard Henderson                         break;
25221f2e87e5SRichard Henderson                     }
25231f2e87e5SRichard Henderson                     /* fall through */
25241f2e87e5SRichard Henderson                 default:
25251f2e87e5SRichard Henderson                     qemu_log_mask(LOG_GUEST_ERROR,
25261f2e87e5SRichard Henderson                                   "DRACR[%d]: Bad value for AP bits: 0x%"
25271f2e87e5SRichard Henderson                                   PRIx32 "\n", n, ap);
25281f2e87e5SRichard Henderson                 }
25291f2e87e5SRichard Henderson             } else { /* Priv. mode AP bits decoding */
25301f2e87e5SRichard Henderson                 switch (ap) {
25311f2e87e5SRichard Henderson                 case 0:
25321f2e87e5SRichard Henderson                     break; /* no access */
25331f2e87e5SRichard Henderson                 case 1:
25341f2e87e5SRichard Henderson                 case 2:
25351f2e87e5SRichard Henderson                 case 3:
25367fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_WRITE;
25371f2e87e5SRichard Henderson                     /* fall through */
25381f2e87e5SRichard Henderson                 case 5:
25391f2e87e5SRichard Henderson                 case 6:
25407fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_READ | PAGE_EXEC;
25411f2e87e5SRichard Henderson                     break;
25421f2e87e5SRichard Henderson                 case 7:
25431f2e87e5SRichard Henderson                     /* for v7M, same as 6; for R profile a reserved value */
25441f2e87e5SRichard Henderson                     if (arm_feature(env, ARM_FEATURE_M)) {
25457fa7ea8fSRichard Henderson                         result->f.prot |= PAGE_READ | PAGE_EXEC;
25461f2e87e5SRichard Henderson                         break;
25471f2e87e5SRichard Henderson                     }
25481f2e87e5SRichard Henderson                     /* fall through */
25491f2e87e5SRichard Henderson                 default:
25501f2e87e5SRichard Henderson                     qemu_log_mask(LOG_GUEST_ERROR,
25511f2e87e5SRichard Henderson                                   "DRACR[%d]: Bad value for AP bits: 0x%"
25521f2e87e5SRichard Henderson                                   PRIx32 "\n", n, ap);
25531f2e87e5SRichard Henderson                 }
25541f2e87e5SRichard Henderson             }
25551f2e87e5SRichard Henderson 
25561f2e87e5SRichard Henderson             /* execute never */
25571f2e87e5SRichard Henderson             if (xn) {
25587fa7ea8fSRichard Henderson                 result->f.prot &= ~PAGE_EXEC;
25591f2e87e5SRichard Henderson             }
25601f2e87e5SRichard Henderson         }
25611f2e87e5SRichard Henderson     }
25621f2e87e5SRichard Henderson 
25631f2e87e5SRichard Henderson     fi->type = ARMFault_Permission;
25641f2e87e5SRichard Henderson     fi->level = 1;
25657fa7ea8fSRichard Henderson     return !(result->f.prot & (1 << access_type));
25661f2e87e5SRichard Henderson }
25671f2e87e5SRichard Henderson 
2568fca45e34STobias Röhmel static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
2569fca45e34STobias Röhmel                              uint32_t secure)
2570fca45e34STobias Röhmel {
2571fca45e34STobias Röhmel     if (regime_el(env, mmu_idx) == 2) {
2572fca45e34STobias Röhmel         return env->pmsav8.hprbar;
2573fca45e34STobias Röhmel     } else {
2574fca45e34STobias Röhmel         return env->pmsav8.rbar[secure];
2575fca45e34STobias Röhmel     }
2576fca45e34STobias Röhmel }
2577fca45e34STobias Röhmel 
2578fca45e34STobias Röhmel static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
2579fca45e34STobias Röhmel                              uint32_t secure)
2580fca45e34STobias Röhmel {
2581fca45e34STobias Röhmel     if (regime_el(env, mmu_idx) == 2) {
2582fca45e34STobias Röhmel         return env->pmsav8.hprlar;
2583fca45e34STobias Röhmel     } else {
2584fca45e34STobias Röhmel         return env->pmsav8.rlar[secure];
2585fca45e34STobias Röhmel     }
2586fca45e34STobias Röhmel }
2587fca45e34STobias Röhmel 
2588fedbaa05SRichard Henderson bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
2589fedbaa05SRichard Henderson                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
2590e9fb7090SRichard Henderson                        bool secure, GetPhysAddrResult *result,
2591e9fb7090SRichard Henderson                        ARMMMUFaultInfo *fi, uint32_t *mregion)
2592fedbaa05SRichard Henderson {
2593fedbaa05SRichard Henderson     /*
2594fedbaa05SRichard Henderson      * Perform a PMSAv8 MPU lookup (without also doing the SAU check
2595fedbaa05SRichard Henderson      * that a full phys-to-virt translation does).
2596fedbaa05SRichard Henderson      * mregion is (if not NULL) set to the region number which matched,
2597fedbaa05SRichard Henderson      * or -1 if no region number is returned (MPU off, address did not
2598fedbaa05SRichard Henderson      * hit a region, address hit in multiple regions).
2599652c750eSRichard Henderson      * If the region hit doesn't cover the entire TARGET_PAGE the address
2600652c750eSRichard Henderson      * is within, then we set the result page_size to 1 to force the
2601652c750eSRichard Henderson      * memory system to use a subpage.
2602fedbaa05SRichard Henderson      */
2603fedbaa05SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
2604fedbaa05SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
2605fedbaa05SRichard Henderson     int n;
2606fedbaa05SRichard Henderson     int matchregion = -1;
2607fedbaa05SRichard Henderson     bool hit = false;
2608fedbaa05SRichard Henderson     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2609fedbaa05SRichard Henderson     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2610fca45e34STobias Röhmel     int region_counter;
2611fca45e34STobias Röhmel 
2612fca45e34STobias Röhmel     if (regime_el(env, mmu_idx) == 2) {
2613fca45e34STobias Röhmel         region_counter = cpu->pmsav8r_hdregion;
2614fca45e34STobias Röhmel     } else {
2615fca45e34STobias Röhmel         region_counter = cpu->pmsav7_dregion;
2616fca45e34STobias Röhmel     }
2617fedbaa05SRichard Henderson 
26187fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
26197fa7ea8fSRichard Henderson     result->f.phys_addr = address;
26207fa7ea8fSRichard Henderson     result->f.prot = 0;
2621fedbaa05SRichard Henderson     if (mregion) {
2622fedbaa05SRichard Henderson         *mregion = -1;
2623fedbaa05SRichard Henderson     }
2624fedbaa05SRichard Henderson 
2625fca45e34STobias Röhmel     if (mmu_idx == ARMMMUIdx_Stage2) {
2626fca45e34STobias Röhmel         fi->stage2 = true;
2627fca45e34STobias Röhmel     }
2628fca45e34STobias Röhmel 
2629fedbaa05SRichard Henderson     /*
2630fedbaa05SRichard Henderson      * Unlike the ARM ARM pseudocode, we don't need to check whether this
2631fedbaa05SRichard Henderson      * was an exception vector read from the vector table (which is always
2632fedbaa05SRichard Henderson      * done using the default system address map), because those accesses
2633fedbaa05SRichard Henderson      * are done in arm_v7m_load_vector(), which always does a direct
2634fedbaa05SRichard Henderson      * read using address_space_ldl(), rather than going via this function.
2635fedbaa05SRichard Henderson      */
2636d1289140SPeter Maydell     if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) {
2637d1289140SPeter Maydell         /* MPU disabled */
2638fedbaa05SRichard Henderson         hit = true;
2639fedbaa05SRichard Henderson     } else if (m_is_ppb_region(env, address)) {
2640fedbaa05SRichard Henderson         hit = true;
2641fedbaa05SRichard Henderson     } else {
26421a469cf7SRichard Henderson         if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
2643fedbaa05SRichard Henderson             hit = true;
2644fedbaa05SRichard Henderson         }
2645fedbaa05SRichard Henderson 
2646fca45e34STobias Röhmel         uint32_t bitmask;
2647fca45e34STobias Röhmel         if (arm_feature(env, ARM_FEATURE_M)) {
2648fca45e34STobias Röhmel             bitmask = 0x1f;
2649fca45e34STobias Röhmel         } else {
2650fca45e34STobias Röhmel             bitmask = 0x3f;
2651fca45e34STobias Röhmel             fi->level = 0;
2652fca45e34STobias Röhmel         }
2653fca45e34STobias Röhmel 
2654fca45e34STobias Röhmel         for (n = region_counter - 1; n >= 0; n--) {
2655fedbaa05SRichard Henderson             /* region search */
2656fedbaa05SRichard Henderson             /*
2657fca45e34STobias Röhmel              * Note that the base address is bits [31:x] from the register
2658fca45e34STobias Röhmel              * with bits [x-1:0] all zeroes, but the limit address is bits
2659fca45e34STobias Röhmel              * [31:x] from the register with bits [x:0] all ones. Where x is
2660fca45e34STobias Röhmel              * 5 for Cortex-M and 6 for Cortex-R
2661fedbaa05SRichard Henderson              */
2662fca45e34STobias Röhmel             uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask;
2663fca45e34STobias Röhmel             uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask;
2664fedbaa05SRichard Henderson 
2665fca45e34STobias Röhmel             if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) {
2666fedbaa05SRichard Henderson                 /* Region disabled */
2667fedbaa05SRichard Henderson                 continue;
2668fedbaa05SRichard Henderson             }
2669fedbaa05SRichard Henderson 
2670fedbaa05SRichard Henderson             if (address < base || address > limit) {
2671fedbaa05SRichard Henderson                 /*
2672fedbaa05SRichard Henderson                  * Address not in this region. We must check whether the
2673fedbaa05SRichard Henderson                  * region covers addresses in the same page as our address.
2674fedbaa05SRichard Henderson                  * In that case we must not report a size that covers the
2675fedbaa05SRichard Henderson                  * whole page for a subsequent hit against a different MPU
2676fedbaa05SRichard Henderson                  * region or the background region, because it would result in
2677fedbaa05SRichard Henderson                  * incorrect TLB hits for subsequent accesses to addresses that
2678fedbaa05SRichard Henderson                  * are in this MPU region.
2679fedbaa05SRichard Henderson                  */
2680fedbaa05SRichard Henderson                 if (limit >= base &&
2681fedbaa05SRichard Henderson                     ranges_overlap(base, limit - base + 1,
2682fedbaa05SRichard Henderson                                    addr_page_base,
2683fedbaa05SRichard Henderson                                    TARGET_PAGE_SIZE)) {
26847fa7ea8fSRichard Henderson                     result->f.lg_page_size = 0;
2685fedbaa05SRichard Henderson                 }
2686fedbaa05SRichard Henderson                 continue;
2687fedbaa05SRichard Henderson             }
2688fedbaa05SRichard Henderson 
2689fedbaa05SRichard Henderson             if (base > addr_page_base || limit < addr_page_limit) {
26907fa7ea8fSRichard Henderson                 result->f.lg_page_size = 0;
2691fedbaa05SRichard Henderson             }
2692fedbaa05SRichard Henderson 
2693fedbaa05SRichard Henderson             if (matchregion != -1) {
2694fedbaa05SRichard Henderson                 /*
2695fedbaa05SRichard Henderson                  * Multiple regions match -- always a failure (unlike
2696fedbaa05SRichard Henderson                  * PMSAv7 where highest-numbered-region wins)
2697fedbaa05SRichard Henderson                  */
2698fedbaa05SRichard Henderson                 fi->type = ARMFault_Permission;
2699fca45e34STobias Röhmel                 if (arm_feature(env, ARM_FEATURE_M)) {
2700fedbaa05SRichard Henderson                     fi->level = 1;
2701fca45e34STobias Röhmel                 }
2702fedbaa05SRichard Henderson                 return true;
2703fedbaa05SRichard Henderson             }
2704fedbaa05SRichard Henderson 
2705fedbaa05SRichard Henderson             matchregion = n;
2706fedbaa05SRichard Henderson             hit = true;
2707fedbaa05SRichard Henderson         }
2708fedbaa05SRichard Henderson     }
2709fedbaa05SRichard Henderson 
2710fedbaa05SRichard Henderson     if (!hit) {
2711fca45e34STobias Röhmel         if (arm_feature(env, ARM_FEATURE_M)) {
2712fedbaa05SRichard Henderson             fi->type = ARMFault_Background;
2713fca45e34STobias Röhmel         } else {
2714fca45e34STobias Röhmel             fi->type = ARMFault_Permission;
2715fca45e34STobias Röhmel         }
2716fedbaa05SRichard Henderson         return true;
2717fedbaa05SRichard Henderson     }
2718fedbaa05SRichard Henderson 
2719fedbaa05SRichard Henderson     if (matchregion == -1) {
2720fedbaa05SRichard Henderson         /* hit using the background region */
27217fa7ea8fSRichard Henderson         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
2722fedbaa05SRichard Henderson     } else {
2723fca45e34STobias Röhmel         uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion];
2724fca45e34STobias Röhmel         uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion];
2725fca45e34STobias Röhmel         uint32_t ap = extract32(matched_rbar, 1, 2);
2726fca45e34STobias Röhmel         uint32_t xn = extract32(matched_rbar, 0, 1);
2727fedbaa05SRichard Henderson         bool pxn = false;
2728fedbaa05SRichard Henderson 
2729fedbaa05SRichard Henderson         if (arm_feature(env, ARM_FEATURE_V8_1M)) {
2730fca45e34STobias Röhmel             pxn = extract32(matched_rlar, 4, 1);
2731fedbaa05SRichard Henderson         }
2732fedbaa05SRichard Henderson 
2733fedbaa05SRichard Henderson         if (m_is_system_region(env, address)) {
2734fedbaa05SRichard Henderson             /* System space is always execute never */
2735fedbaa05SRichard Henderson             xn = 1;
2736fedbaa05SRichard Henderson         }
2737fedbaa05SRichard Henderson 
2738fca45e34STobias Röhmel         if (regime_el(env, mmu_idx) == 2) {
2739fca45e34STobias Röhmel             result->f.prot = simple_ap_to_rw_prot_is_user(ap,
2740fca45e34STobias Röhmel                                             mmu_idx != ARMMMUIdx_E2);
2741fca45e34STobias Röhmel         } else {
27427fa7ea8fSRichard Henderson             result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
2743fca45e34STobias Röhmel         }
2744fca45e34STobias Röhmel 
2745fca45e34STobias Röhmel         if (!arm_feature(env, ARM_FEATURE_M)) {
2746fca45e34STobias Röhmel             uint8_t attrindx = extract32(matched_rlar, 1, 3);
2747fca45e34STobias Röhmel             uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
2748fca45e34STobias Röhmel             uint8_t sh = extract32(matched_rlar, 3, 2);
2749fca45e34STobias Röhmel 
2750fca45e34STobias Röhmel             if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
2751fca45e34STobias Röhmel                 result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) {
2752fca45e34STobias Röhmel                 xn = 0x1;
2753fca45e34STobias Röhmel             }
2754fca45e34STobias Röhmel 
2755fca45e34STobias Röhmel             if ((regime_el(env, mmu_idx) == 1) &&
2756fca45e34STobias Röhmel                 regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
2757fca45e34STobias Röhmel                 pxn = 0x1;
2758fca45e34STobias Röhmel             }
2759fca45e34STobias Röhmel 
2760fca45e34STobias Röhmel             result->cacheattrs.is_s2_format = false;
2761fca45e34STobias Röhmel             result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
2762fca45e34STobias Röhmel             result->cacheattrs.shareability = sh;
2763fca45e34STobias Röhmel         }
2764fca45e34STobias Röhmel 
27657fa7ea8fSRichard Henderson         if (result->f.prot && !xn && !(pxn && !is_user)) {
27667fa7ea8fSRichard Henderson             result->f.prot |= PAGE_EXEC;
2767fedbaa05SRichard Henderson         }
2768fca45e34STobias Röhmel 
2769fedbaa05SRichard Henderson         if (mregion) {
2770fedbaa05SRichard Henderson             *mregion = matchregion;
2771fedbaa05SRichard Henderson         }
2772fedbaa05SRichard Henderson     }
2773fedbaa05SRichard Henderson 
2774fedbaa05SRichard Henderson     fi->type = ARMFault_Permission;
2775fca45e34STobias Röhmel     if (arm_feature(env, ARM_FEATURE_M)) {
2776fedbaa05SRichard Henderson         fi->level = 1;
2777fca45e34STobias Röhmel     }
27787fa7ea8fSRichard Henderson     return !(result->f.prot & (1 << access_type));
2779fedbaa05SRichard Henderson }
2780fedbaa05SRichard Henderson 
27812c1f429dSRichard Henderson static bool v8m_is_sau_exempt(CPUARMState *env,
27822c1f429dSRichard Henderson                               uint32_t address, MMUAccessType access_type)
27832c1f429dSRichard Henderson {
27842c1f429dSRichard Henderson     /*
27852c1f429dSRichard Henderson      * The architecture specifies that certain address ranges are
27862c1f429dSRichard Henderson      * exempt from v8M SAU/IDAU checks.
27872c1f429dSRichard Henderson      */
27882c1f429dSRichard Henderson     return
27892c1f429dSRichard Henderson         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
27902c1f429dSRichard Henderson         (address >= 0xe0000000 && address <= 0xe0002fff) ||
27912c1f429dSRichard Henderson         (address >= 0xe000e000 && address <= 0xe000efff) ||
27922c1f429dSRichard Henderson         (address >= 0xe002e000 && address <= 0xe002efff) ||
27932c1f429dSRichard Henderson         (address >= 0xe0040000 && address <= 0xe0041fff) ||
27942c1f429dSRichard Henderson         (address >= 0xe00ff000 && address <= 0xe00fffff);
27952c1f429dSRichard Henderson }
27962c1f429dSRichard Henderson 
27972c1f429dSRichard Henderson void v8m_security_lookup(CPUARMState *env, uint32_t address,
27982c1f429dSRichard Henderson                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
2799dbf2a71aSRichard Henderson                          bool is_secure, V8M_SAttributes *sattrs)
28002c1f429dSRichard Henderson {
28012c1f429dSRichard Henderson     /*
28022c1f429dSRichard Henderson      * Look up the security attributes for this address. Compare the
28032c1f429dSRichard Henderson      * pseudocode SecurityCheck() function.
28042c1f429dSRichard Henderson      * We assume the caller has zero-initialized *sattrs.
28052c1f429dSRichard Henderson      */
28062c1f429dSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
28072c1f429dSRichard Henderson     int r;
28082c1f429dSRichard Henderson     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
28092c1f429dSRichard Henderson     int idau_region = IREGION_NOTVALID;
28102c1f429dSRichard Henderson     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
28112c1f429dSRichard Henderson     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
28122c1f429dSRichard Henderson 
28132c1f429dSRichard Henderson     if (cpu->idau) {
28142c1f429dSRichard Henderson         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
28152c1f429dSRichard Henderson         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
28162c1f429dSRichard Henderson 
28172c1f429dSRichard Henderson         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
28182c1f429dSRichard Henderson                    &idau_nsc);
28192c1f429dSRichard Henderson     }
28202c1f429dSRichard Henderson 
28212c1f429dSRichard Henderson     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
28222c1f429dSRichard Henderson         /* 0xf0000000..0xffffffff is always S for insn fetches */
28232c1f429dSRichard Henderson         return;
28242c1f429dSRichard Henderson     }
28252c1f429dSRichard Henderson 
28262c1f429dSRichard Henderson     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
2827dbf2a71aSRichard Henderson         sattrs->ns = !is_secure;
28282c1f429dSRichard Henderson         return;
28292c1f429dSRichard Henderson     }
28302c1f429dSRichard Henderson 
28312c1f429dSRichard Henderson     if (idau_region != IREGION_NOTVALID) {
28322c1f429dSRichard Henderson         sattrs->irvalid = true;
28332c1f429dSRichard Henderson         sattrs->iregion = idau_region;
28342c1f429dSRichard Henderson     }
28352c1f429dSRichard Henderson 
28362c1f429dSRichard Henderson     switch (env->sau.ctrl & 3) {
28372c1f429dSRichard Henderson     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
28382c1f429dSRichard Henderson         break;
28392c1f429dSRichard Henderson     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
28402c1f429dSRichard Henderson         sattrs->ns = true;
28412c1f429dSRichard Henderson         break;
28422c1f429dSRichard Henderson     default: /* SAU.ENABLE == 1 */
28432c1f429dSRichard Henderson         for (r = 0; r < cpu->sau_sregion; r++) {
28442c1f429dSRichard Henderson             if (env->sau.rlar[r] & 1) {
28452c1f429dSRichard Henderson                 uint32_t base = env->sau.rbar[r] & ~0x1f;
28462c1f429dSRichard Henderson                 uint32_t limit = env->sau.rlar[r] | 0x1f;
28472c1f429dSRichard Henderson 
28482c1f429dSRichard Henderson                 if (base <= address && limit >= address) {
28492c1f429dSRichard Henderson                     if (base > addr_page_base || limit < addr_page_limit) {
28502c1f429dSRichard Henderson                         sattrs->subpage = true;
28512c1f429dSRichard Henderson                     }
28522c1f429dSRichard Henderson                     if (sattrs->srvalid) {
28532c1f429dSRichard Henderson                         /*
28542c1f429dSRichard Henderson                          * If we hit in more than one region then we must report
28552c1f429dSRichard Henderson                          * as Secure, not NS-Callable, with no valid region
28562c1f429dSRichard Henderson                          * number info.
28572c1f429dSRichard Henderson                          */
28582c1f429dSRichard Henderson                         sattrs->ns = false;
28592c1f429dSRichard Henderson                         sattrs->nsc = false;
28602c1f429dSRichard Henderson                         sattrs->sregion = 0;
28612c1f429dSRichard Henderson                         sattrs->srvalid = false;
28622c1f429dSRichard Henderson                         break;
28632c1f429dSRichard Henderson                     } else {
28642c1f429dSRichard Henderson                         if (env->sau.rlar[r] & 2) {
28652c1f429dSRichard Henderson                             sattrs->nsc = true;
28662c1f429dSRichard Henderson                         } else {
28672c1f429dSRichard Henderson                             sattrs->ns = true;
28682c1f429dSRichard Henderson                         }
28692c1f429dSRichard Henderson                         sattrs->srvalid = true;
28702c1f429dSRichard Henderson                         sattrs->sregion = r;
28712c1f429dSRichard Henderson                     }
28722c1f429dSRichard Henderson                 } else {
28732c1f429dSRichard Henderson                     /*
28742c1f429dSRichard Henderson                      * Address not in this region. We must check whether the
28752c1f429dSRichard Henderson                      * region covers addresses in the same page as our address.
28762c1f429dSRichard Henderson                      * In that case we must not report a size that covers the
28772c1f429dSRichard Henderson                      * whole page for a subsequent hit against a different MPU
28782c1f429dSRichard Henderson                      * region or the background region, because it would result
28792c1f429dSRichard Henderson                      * in incorrect TLB hits for subsequent accesses to
28802c1f429dSRichard Henderson                      * addresses that are in this MPU region.
28812c1f429dSRichard Henderson                      */
28822c1f429dSRichard Henderson                     if (limit >= base &&
28832c1f429dSRichard Henderson                         ranges_overlap(base, limit - base + 1,
28842c1f429dSRichard Henderson                                        addr_page_base,
28852c1f429dSRichard Henderson                                        TARGET_PAGE_SIZE)) {
28862c1f429dSRichard Henderson                         sattrs->subpage = true;
28872c1f429dSRichard Henderson                     }
28882c1f429dSRichard Henderson                 }
28892c1f429dSRichard Henderson             }
28902c1f429dSRichard Henderson         }
28912c1f429dSRichard Henderson         break;
28922c1f429dSRichard Henderson     }
28932c1f429dSRichard Henderson 
28942c1f429dSRichard Henderson     /*
28952c1f429dSRichard Henderson      * The IDAU will override the SAU lookup results if it specifies
28962c1f429dSRichard Henderson      * higher security than the SAU does.
28972c1f429dSRichard Henderson      */
28982c1f429dSRichard Henderson     if (!idau_ns) {
28992c1f429dSRichard Henderson         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
29002c1f429dSRichard Henderson             sattrs->ns = false;
29012c1f429dSRichard Henderson             sattrs->nsc = idau_nsc;
29022c1f429dSRichard Henderson         }
29032c1f429dSRichard Henderson     }
29042c1f429dSRichard Henderson }
29052c1f429dSRichard Henderson 
2906a5637becSPeter Maydell static bool get_phys_addr_pmsav8(CPUARMState *env,
2907a5637becSPeter Maydell                                  S1Translate *ptw,
2908a5637becSPeter Maydell                                  uint32_t address,
2909a5637becSPeter Maydell                                  MMUAccessType access_type,
2910a5637becSPeter Maydell                                  GetPhysAddrResult *result,
2911730d5c31SRichard Henderson                                  ARMMMUFaultInfo *fi)
2912730d5c31SRichard Henderson {
2913730d5c31SRichard Henderson     V8M_SAttributes sattrs = {};
2914a5637becSPeter Maydell     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
2915a5637becSPeter Maydell     bool secure = arm_space_is_secure(ptw->in_space);
2916730d5c31SRichard Henderson     bool ret;
2917730d5c31SRichard Henderson 
2918730d5c31SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2919dbf2a71aSRichard Henderson         v8m_security_lookup(env, address, access_type, mmu_idx,
2920dbf2a71aSRichard Henderson                             secure, &sattrs);
2921730d5c31SRichard Henderson         if (access_type == MMU_INST_FETCH) {
2922730d5c31SRichard Henderson             /*
2923730d5c31SRichard Henderson              * Instruction fetches always use the MMU bank and the
2924730d5c31SRichard Henderson              * transaction attribute determined by the fetch address,
2925730d5c31SRichard Henderson              * regardless of CPU state. This is painful for QEMU
2926730d5c31SRichard Henderson              * to handle, because it would mean we need to encode
2927730d5c31SRichard Henderson              * into the mmu_idx not just the (user, negpri) information
2928730d5c31SRichard Henderson              * for the current security state but also that for the
2929730d5c31SRichard Henderson              * other security state, which would balloon the number
2930730d5c31SRichard Henderson              * of mmu_idx values needed alarmingly.
2931730d5c31SRichard Henderson              * Fortunately we can avoid this because it's not actually
2932730d5c31SRichard Henderson              * possible to arbitrarily execute code from memory with
2933730d5c31SRichard Henderson              * the wrong security attribute: it will always generate
2934730d5c31SRichard Henderson              * an exception of some kind or another, apart from the
2935730d5c31SRichard Henderson              * special case of an NS CPU executing an SG instruction
2936730d5c31SRichard Henderson              * in S&NSC memory. So we always just fail the translation
2937730d5c31SRichard Henderson              * here and sort things out in the exception handler
2938730d5c31SRichard Henderson              * (including possibly emulating an SG instruction).
2939730d5c31SRichard Henderson              */
2940730d5c31SRichard Henderson             if (sattrs.ns != !secure) {
2941730d5c31SRichard Henderson                 if (sattrs.nsc) {
2942730d5c31SRichard Henderson                     fi->type = ARMFault_QEMU_NSCExec;
2943730d5c31SRichard Henderson                 } else {
2944730d5c31SRichard Henderson                     fi->type = ARMFault_QEMU_SFault;
2945730d5c31SRichard Henderson                 }
29467fa7ea8fSRichard Henderson                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
29477fa7ea8fSRichard Henderson                 result->f.phys_addr = address;
29487fa7ea8fSRichard Henderson                 result->f.prot = 0;
2949730d5c31SRichard Henderson                 return true;
2950730d5c31SRichard Henderson             }
2951730d5c31SRichard Henderson         } else {
2952730d5c31SRichard Henderson             /*
2953730d5c31SRichard Henderson              * For data accesses we always use the MMU bank indicated
2954730d5c31SRichard Henderson              * by the current CPU state, but the security attributes
2955730d5c31SRichard Henderson              * might downgrade a secure access to nonsecure.
2956730d5c31SRichard Henderson              */
2957730d5c31SRichard Henderson             if (sattrs.ns) {
29587fa7ea8fSRichard Henderson                 result->f.attrs.secure = false;
295990c66293SRichard Henderson                 result->f.attrs.space = ARMSS_NonSecure;
2960730d5c31SRichard Henderson             } else if (!secure) {
2961730d5c31SRichard Henderson                 /*
2962730d5c31SRichard Henderson                  * NS access to S memory must fault.
2963730d5c31SRichard Henderson                  * Architecturally we should first check whether the
2964730d5c31SRichard Henderson                  * MPU information for this address indicates that we
2965730d5c31SRichard Henderson                  * are doing an unaligned access to Device memory, which
2966730d5c31SRichard Henderson                  * should generate a UsageFault instead. QEMU does not
2967730d5c31SRichard Henderson                  * currently check for that kind of unaligned access though.
2968730d5c31SRichard Henderson                  * If we added it we would need to do so as a special case
2969730d5c31SRichard Henderson                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2970730d5c31SRichard Henderson                  */
2971730d5c31SRichard Henderson                 fi->type = ARMFault_QEMU_SFault;
29727fa7ea8fSRichard Henderson                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
29737fa7ea8fSRichard Henderson                 result->f.phys_addr = address;
29747fa7ea8fSRichard Henderson                 result->f.prot = 0;
2975730d5c31SRichard Henderson                 return true;
2976730d5c31SRichard Henderson             }
2977730d5c31SRichard Henderson         }
2978730d5c31SRichard Henderson     }
2979730d5c31SRichard Henderson 
2980e9fb7090SRichard Henderson     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2981652c750eSRichard Henderson                             result, fi, NULL);
2982652c750eSRichard Henderson     if (sattrs.subpage) {
29837fa7ea8fSRichard Henderson         result->f.lg_page_size = 0;
2984652c750eSRichard Henderson     }
2985730d5c31SRichard Henderson     return ret;
2986730d5c31SRichard Henderson }
2987730d5c31SRichard Henderson 
2988966f4bb7SRichard Henderson /*
2989966f4bb7SRichard Henderson  * Translate from the 4-bit stage 2 representation of
2990966f4bb7SRichard Henderson  * memory attributes (without cache-allocation hints) to
2991966f4bb7SRichard Henderson  * the 8-bit representation of the stage 1 MAIR registers
2992966f4bb7SRichard Henderson  * (which includes allocation hints).
2993966f4bb7SRichard Henderson  *
2994966f4bb7SRichard Henderson  * ref: shared/translation/attrs/S2AttrDecode()
2995966f4bb7SRichard Henderson  *      .../S2ConvertAttrsHints()
2996966f4bb7SRichard Henderson  */
2997ac76c2e5SRichard Henderson static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
2998966f4bb7SRichard Henderson {
2999966f4bb7SRichard Henderson     uint8_t hiattr = extract32(s2attrs, 2, 2);
3000966f4bb7SRichard Henderson     uint8_t loattr = extract32(s2attrs, 0, 2);
3001966f4bb7SRichard Henderson     uint8_t hihint = 0, lohint = 0;
3002966f4bb7SRichard Henderson 
3003966f4bb7SRichard Henderson     if (hiattr != 0) { /* normal memory */
3004ac76c2e5SRichard Henderson         if (hcr & HCR_CD) { /* cache disabled */
3005966f4bb7SRichard Henderson             hiattr = loattr = 1; /* non-cacheable */
3006966f4bb7SRichard Henderson         } else {
3007966f4bb7SRichard Henderson             if (hiattr != 1) { /* Write-through or write-back */
3008966f4bb7SRichard Henderson                 hihint = 3; /* RW allocate */
3009966f4bb7SRichard Henderson             }
3010966f4bb7SRichard Henderson             if (loattr != 1) { /* Write-through or write-back */
3011966f4bb7SRichard Henderson                 lohint = 3; /* RW allocate */
3012966f4bb7SRichard Henderson             }
3013966f4bb7SRichard Henderson         }
3014966f4bb7SRichard Henderson     }
3015966f4bb7SRichard Henderson 
3016966f4bb7SRichard Henderson     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
3017966f4bb7SRichard Henderson }
3018966f4bb7SRichard Henderson 
3019966f4bb7SRichard Henderson /*
3020966f4bb7SRichard Henderson  * Combine either inner or outer cacheability attributes for normal
3021966f4bb7SRichard Henderson  * memory, according to table D4-42 and pseudocode procedure
3022966f4bb7SRichard Henderson  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
3023966f4bb7SRichard Henderson  *
3024966f4bb7SRichard Henderson  * NB: only stage 1 includes allocation hints (RW bits), leading to
3025966f4bb7SRichard Henderson  * some asymmetry.
3026966f4bb7SRichard Henderson  */
3027966f4bb7SRichard Henderson static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
3028966f4bb7SRichard Henderson {
3029966f4bb7SRichard Henderson     if (s1 == 4 || s2 == 4) {
3030966f4bb7SRichard Henderson         /* non-cacheable has precedence */
3031966f4bb7SRichard Henderson         return 4;
3032966f4bb7SRichard Henderson     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
3033966f4bb7SRichard Henderson         /* stage 1 write-through takes precedence */
3034966f4bb7SRichard Henderson         return s1;
3035966f4bb7SRichard Henderson     } else if (extract32(s2, 2, 2) == 2) {
3036966f4bb7SRichard Henderson         /* stage 2 write-through takes precedence, but the allocation hint
3037966f4bb7SRichard Henderson          * is still taken from stage 1
3038966f4bb7SRichard Henderson          */
3039966f4bb7SRichard Henderson         return (2 << 2) | extract32(s1, 0, 2);
3040966f4bb7SRichard Henderson     } else { /* write-back */
3041966f4bb7SRichard Henderson         return s1;
3042966f4bb7SRichard Henderson     }
3043966f4bb7SRichard Henderson }
3044966f4bb7SRichard Henderson 
3045966f4bb7SRichard Henderson /*
3046966f4bb7SRichard Henderson  * Combine the memory type and cacheability attributes of
3047966f4bb7SRichard Henderson  * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
3048966f4bb7SRichard Henderson  * combined attributes in MAIR_EL1 format.
3049966f4bb7SRichard Henderson  */
3050ac76c2e5SRichard Henderson static uint8_t combined_attrs_nofwb(uint64_t hcr,
3051966f4bb7SRichard Henderson                                     ARMCacheAttrs s1, ARMCacheAttrs s2)
3052966f4bb7SRichard Henderson {
3053966f4bb7SRichard Henderson     uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
3054966f4bb7SRichard Henderson 
3055faa1451eSTobias Röhmel     if (s2.is_s2_format) {
3056ac76c2e5SRichard Henderson         s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
3057faa1451eSTobias Röhmel     } else {
3058faa1451eSTobias Röhmel         s2_mair_attrs = s2.attrs;
3059faa1451eSTobias Röhmel     }
3060966f4bb7SRichard Henderson 
3061966f4bb7SRichard Henderson     s1lo = extract32(s1.attrs, 0, 4);
3062966f4bb7SRichard Henderson     s2lo = extract32(s2_mair_attrs, 0, 4);
3063966f4bb7SRichard Henderson     s1hi = extract32(s1.attrs, 4, 4);
3064966f4bb7SRichard Henderson     s2hi = extract32(s2_mair_attrs, 4, 4);
3065966f4bb7SRichard Henderson 
3066966f4bb7SRichard Henderson     /* Combine memory type and cacheability attributes */
3067966f4bb7SRichard Henderson     if (s1hi == 0 || s2hi == 0) {
3068966f4bb7SRichard Henderson         /* Device has precedence over normal */
3069966f4bb7SRichard Henderson         if (s1lo == 0 || s2lo == 0) {
3070966f4bb7SRichard Henderson             /* nGnRnE has precedence over anything */
3071966f4bb7SRichard Henderson             ret_attrs = 0;
3072966f4bb7SRichard Henderson         } else if (s1lo == 4 || s2lo == 4) {
3073966f4bb7SRichard Henderson             /* non-Reordering has precedence over Reordering */
3074966f4bb7SRichard Henderson             ret_attrs = 4;  /* nGnRE */
3075966f4bb7SRichard Henderson         } else if (s1lo == 8 || s2lo == 8) {
3076966f4bb7SRichard Henderson             /* non-Gathering has precedence over Gathering */
3077966f4bb7SRichard Henderson             ret_attrs = 8;  /* nGRE */
3078966f4bb7SRichard Henderson         } else {
3079966f4bb7SRichard Henderson             ret_attrs = 0xc; /* GRE */
3080966f4bb7SRichard Henderson         }
3081966f4bb7SRichard Henderson     } else { /* Normal memory */
3082966f4bb7SRichard Henderson         /* Outer/inner cacheability combine independently */
3083966f4bb7SRichard Henderson         ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
3084966f4bb7SRichard Henderson                   | combine_cacheattr_nibble(s1lo, s2lo);
3085966f4bb7SRichard Henderson     }
3086966f4bb7SRichard Henderson     return ret_attrs;
3087966f4bb7SRichard Henderson }
3088966f4bb7SRichard Henderson 
3089966f4bb7SRichard Henderson static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
3090966f4bb7SRichard Henderson {
3091966f4bb7SRichard Henderson     /*
3092966f4bb7SRichard Henderson      * Given the 4 bits specifying the outer or inner cacheability
3093966f4bb7SRichard Henderson      * in MAIR format, return a value specifying Normal Write-Back,
3094966f4bb7SRichard Henderson      * with the allocation and transient hints taken from the input
3095966f4bb7SRichard Henderson      * if the input specified some kind of cacheable attribute.
3096966f4bb7SRichard Henderson      */
3097966f4bb7SRichard Henderson     if (attr == 0 || attr == 4) {
3098966f4bb7SRichard Henderson         /*
3099966f4bb7SRichard Henderson          * 0 == an UNPREDICTABLE encoding
3100966f4bb7SRichard Henderson          * 4 == Non-cacheable
3101966f4bb7SRichard Henderson          * Either way, force Write-Back RW allocate non-transient
3102966f4bb7SRichard Henderson          */
3103966f4bb7SRichard Henderson         return 0xf;
3104966f4bb7SRichard Henderson     }
3105966f4bb7SRichard Henderson     /* Change WriteThrough to WriteBack, keep allocation and transient hints */
3106966f4bb7SRichard Henderson     return attr | 4;
3107966f4bb7SRichard Henderson }
3108966f4bb7SRichard Henderson 
3109966f4bb7SRichard Henderson /*
3110966f4bb7SRichard Henderson  * Combine the memory type and cacheability attributes of
3111966f4bb7SRichard Henderson  * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
3112966f4bb7SRichard Henderson  * combined attributes in MAIR_EL1 format.
3113966f4bb7SRichard Henderson  */
311472cef09cSRichard Henderson static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
3115966f4bb7SRichard Henderson {
3116faa1451eSTobias Röhmel     assert(s2.is_s2_format && !s1.is_s2_format);
3117faa1451eSTobias Röhmel 
3118966f4bb7SRichard Henderson     switch (s2.attrs) {
3119966f4bb7SRichard Henderson     case 7:
3120966f4bb7SRichard Henderson         /* Use stage 1 attributes */
3121966f4bb7SRichard Henderson         return s1.attrs;
3122966f4bb7SRichard Henderson     case 6:
3123966f4bb7SRichard Henderson         /*
3124966f4bb7SRichard Henderson          * Force Normal Write-Back. Note that if S1 is Normal cacheable
3125966f4bb7SRichard Henderson          * then we take the allocation hints from it; otherwise it is
3126966f4bb7SRichard Henderson          * RW allocate, non-transient.
3127966f4bb7SRichard Henderson          */
3128966f4bb7SRichard Henderson         if ((s1.attrs & 0xf0) == 0) {
3129966f4bb7SRichard Henderson             /* S1 is Device */
3130966f4bb7SRichard Henderson             return 0xff;
3131966f4bb7SRichard Henderson         }
3132966f4bb7SRichard Henderson         /* Need to check the Inner and Outer nibbles separately */
3133966f4bb7SRichard Henderson         return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
3134966f4bb7SRichard Henderson             force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
3135966f4bb7SRichard Henderson     case 5:
3136966f4bb7SRichard Henderson         /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
3137966f4bb7SRichard Henderson         if ((s1.attrs & 0xf0) == 0) {
3138966f4bb7SRichard Henderson             return s1.attrs;
3139966f4bb7SRichard Henderson         }
3140966f4bb7SRichard Henderson         return 0x44;
3141966f4bb7SRichard Henderson     case 0 ... 3:
3142966f4bb7SRichard Henderson         /* Force Device, of subtype specified by S2 */
3143966f4bb7SRichard Henderson         return s2.attrs << 2;
3144966f4bb7SRichard Henderson     default:
3145966f4bb7SRichard Henderson         /*
3146966f4bb7SRichard Henderson          * RESERVED values (including RES0 descriptor bit [5] being nonzero);
3147966f4bb7SRichard Henderson          * arbitrarily force Device.
3148966f4bb7SRichard Henderson          */
3149966f4bb7SRichard Henderson         return 0;
3150966f4bb7SRichard Henderson     }
3151966f4bb7SRichard Henderson }
3152966f4bb7SRichard Henderson 
3153966f4bb7SRichard Henderson /*
3154966f4bb7SRichard Henderson  * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
3155966f4bb7SRichard Henderson  * and CombineS1S2Desc()
3156966f4bb7SRichard Henderson  *
3157966f4bb7SRichard Henderson  * @env:     CPUARMState
3158966f4bb7SRichard Henderson  * @s1:      Attributes from stage 1 walk
3159966f4bb7SRichard Henderson  * @s2:      Attributes from stage 2 walk
3160966f4bb7SRichard Henderson  */
3161ac76c2e5SRichard Henderson static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
3162966f4bb7SRichard Henderson                                         ARMCacheAttrs s1, ARMCacheAttrs s2)
3163966f4bb7SRichard Henderson {
3164966f4bb7SRichard Henderson     ARMCacheAttrs ret;
3165966f4bb7SRichard Henderson     bool tagged = false;
3166966f4bb7SRichard Henderson 
3167faa1451eSTobias Röhmel     assert(!s1.is_s2_format);
3168966f4bb7SRichard Henderson     ret.is_s2_format = false;
3169966f4bb7SRichard Henderson 
3170966f4bb7SRichard Henderson     if (s1.attrs == 0xf0) {
3171966f4bb7SRichard Henderson         tagged = true;
3172966f4bb7SRichard Henderson         s1.attrs = 0xff;
3173966f4bb7SRichard Henderson     }
3174966f4bb7SRichard Henderson 
3175966f4bb7SRichard Henderson     /* Combine shareability attributes (table D4-43) */
3176966f4bb7SRichard Henderson     if (s1.shareability == 2 || s2.shareability == 2) {
3177966f4bb7SRichard Henderson         /* if either are outer-shareable, the result is outer-shareable */
3178966f4bb7SRichard Henderson         ret.shareability = 2;
3179966f4bb7SRichard Henderson     } else if (s1.shareability == 3 || s2.shareability == 3) {
3180966f4bb7SRichard Henderson         /* if either are inner-shareable, the result is inner-shareable */
3181966f4bb7SRichard Henderson         ret.shareability = 3;
3182966f4bb7SRichard Henderson     } else {
3183966f4bb7SRichard Henderson         /* both non-shareable */
3184966f4bb7SRichard Henderson         ret.shareability = 0;
3185966f4bb7SRichard Henderson     }
3186966f4bb7SRichard Henderson 
3187966f4bb7SRichard Henderson     /* Combine memory type and cacheability attributes */
3188ac76c2e5SRichard Henderson     if (hcr & HCR_FWB) {
318972cef09cSRichard Henderson         ret.attrs = combined_attrs_fwb(s1, s2);
3190966f4bb7SRichard Henderson     } else {
3191ac76c2e5SRichard Henderson         ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
3192966f4bb7SRichard Henderson     }
3193966f4bb7SRichard Henderson 
3194966f4bb7SRichard Henderson     /*
3195966f4bb7SRichard Henderson      * Any location for which the resultant memory type is any
3196966f4bb7SRichard Henderson      * type of Device memory is always treated as Outer Shareable.
3197966f4bb7SRichard Henderson      * Any location for which the resultant memory type is Normal
3198966f4bb7SRichard Henderson      * Inner Non-cacheable, Outer Non-cacheable is always treated
3199966f4bb7SRichard Henderson      * as Outer Shareable.
3200966f4bb7SRichard Henderson      * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
3201966f4bb7SRichard Henderson      */
3202966f4bb7SRichard Henderson     if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
3203966f4bb7SRichard Henderson         ret.shareability = 2;
3204966f4bb7SRichard Henderson     }
3205966f4bb7SRichard Henderson 
3206966f4bb7SRichard Henderson     /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
3207966f4bb7SRichard Henderson     if (tagged && ret.attrs == 0xff) {
3208966f4bb7SRichard Henderson         ret.attrs = 0xf0;
3209966f4bb7SRichard Henderson     }
3210966f4bb7SRichard Henderson 
3211966f4bb7SRichard Henderson     return ret;
3212966f4bb7SRichard Henderson }
3213966f4bb7SRichard Henderson 
3214448e42fdSRichard Henderson /*
3215448e42fdSRichard Henderson  * MMU disabled.  S1 addresses within aa64 translation regimes are
3216448e42fdSRichard Henderson  * still checked for bounds -- see AArch64.S1DisabledOutput().
3217448e42fdSRichard Henderson  */
3218a5637becSPeter Maydell static bool get_phys_addr_disabled(CPUARMState *env,
3219a5637becSPeter Maydell                                    S1Translate *ptw,
3220*67d762e7SArd Biesheuvel                                    vaddr address,
3221448e42fdSRichard Henderson                                    MMUAccessType access_type,
3222448e42fdSRichard Henderson                                    GetPhysAddrResult *result,
3223448e42fdSRichard Henderson                                    ARMMMUFaultInfo *fi)
3224448e42fdSRichard Henderson {
3225a5637becSPeter Maydell     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
32265b74f9b4SRichard Henderson     uint8_t memattr = 0x00;    /* Device nGnRnE */
322746f38c97SRichard Henderson     uint8_t shareability = 0;  /* non-shareable */
3228a1ce3084SRichard Henderson     int r_el;
3229448e42fdSRichard Henderson 
3230a1ce3084SRichard Henderson     switch (mmu_idx) {
3231a1ce3084SRichard Henderson     case ARMMMUIdx_Stage2:
3232a1ce3084SRichard Henderson     case ARMMMUIdx_Stage2_S:
3233a1ce3084SRichard Henderson     case ARMMMUIdx_Phys_S:
3234bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_NS:
3235bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Root:
3236bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Realm:
3237a1ce3084SRichard Henderson         break;
32385b74f9b4SRichard Henderson 
3239a1ce3084SRichard Henderson     default:
3240a1ce3084SRichard Henderson         r_el = regime_el(env, mmu_idx);
3241448e42fdSRichard Henderson         if (arm_el_is_aa64(env, r_el)) {
3242448e42fdSRichard Henderson             int pamax = arm_pamax(env_archcpu(env));
3243448e42fdSRichard Henderson             uint64_t tcr = env->cp15.tcr_el[r_el];
3244448e42fdSRichard Henderson             int addrtop, tbi;
3245448e42fdSRichard Henderson 
3246448e42fdSRichard Henderson             tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
3247448e42fdSRichard Henderson             if (access_type == MMU_INST_FETCH) {
3248448e42fdSRichard Henderson                 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
3249448e42fdSRichard Henderson             }
3250448e42fdSRichard Henderson             tbi = (tbi >> extract64(address, 55, 1)) & 1;
3251448e42fdSRichard Henderson             addrtop = (tbi ? 55 : 63);
3252448e42fdSRichard Henderson 
3253448e42fdSRichard Henderson             if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
3254448e42fdSRichard Henderson                 fi->type = ARMFault_AddressSize;
3255448e42fdSRichard Henderson                 fi->level = 0;
3256448e42fdSRichard Henderson                 fi->stage2 = false;
3257448e42fdSRichard Henderson                 return 1;
3258448e42fdSRichard Henderson             }
3259448e42fdSRichard Henderson 
3260448e42fdSRichard Henderson             /*
3261448e42fdSRichard Henderson              * When TBI is disabled, we've just validated that all of the
3262448e42fdSRichard Henderson              * bits above PAMax are zero, so logically we only need to
3263448e42fdSRichard Henderson              * clear the top byte for TBI.  But it's clearer to follow
3264448e42fdSRichard Henderson              * the pseudocode set of addrdesc.paddress.
3265448e42fdSRichard Henderson              */
3266448e42fdSRichard Henderson             address = extract64(address, 0, 52);
3267448e42fdSRichard Henderson         }
3268448e42fdSRichard Henderson 
3269448e42fdSRichard Henderson         /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
32705b74f9b4SRichard Henderson         if (r_el == 1) {
32712d12bb96SPeter Maydell             uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
3272448e42fdSRichard Henderson             if (hcr & HCR_DC) {
3273448e42fdSRichard Henderson                 if (hcr & HCR_DCT) {
3274448e42fdSRichard Henderson                     memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
3275448e42fdSRichard Henderson                 } else {
3276448e42fdSRichard Henderson                     memattr = 0xff;  /* Normal, WB, RWA */
3277448e42fdSRichard Henderson                 }
32785b74f9b4SRichard Henderson             }
32795b74f9b4SRichard Henderson         }
32803d9ca962SPeter Maydell         if (memattr == 0) {
32813d9ca962SPeter Maydell             if (access_type == MMU_INST_FETCH) {
3282448e42fdSRichard Henderson                 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
3283448e42fdSRichard Henderson                     memattr = 0xee;  /* Normal, WT, RA, NT */
3284448e42fdSRichard Henderson                 } else {
3285448e42fdSRichard Henderson                     memattr = 0x44;  /* Normal, NC, No */
3286448e42fdSRichard Henderson                 }
32873d9ca962SPeter Maydell             }
328846f38c97SRichard Henderson             shareability = 2; /* outer shareable */
3289448e42fdSRichard Henderson         }
32905b74f9b4SRichard Henderson         result->cacheattrs.is_s2_format = false;
3291a1ce3084SRichard Henderson         break;
32925b74f9b4SRichard Henderson     }
32935b74f9b4SRichard Henderson 
32947fa7ea8fSRichard Henderson     result->f.phys_addr = address;
32957fa7ea8fSRichard Henderson     result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
32967fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
32975b74f9b4SRichard Henderson     result->cacheattrs.shareability = shareability;
3298448e42fdSRichard Henderson     result->cacheattrs.attrs = memattr;
32996b72c542SRichard Henderson     return false;
3300448e42fdSRichard Henderson }
3301448e42fdSRichard Henderson 
33023f5a74c5SRichard Henderson static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
3303*67d762e7SArd Biesheuvel                                    vaddr address,
33044a358556SRichard Henderson                                    MMUAccessType access_type,
33054a358556SRichard Henderson                                    GetPhysAddrResult *result,
3306def8aa5bSRichard Henderson                                    ARMMMUFaultInfo *fi)
33078ae08860SRichard Henderson {
33088ae08860SRichard Henderson     hwaddr ipa;
3309c8d6c286SRichard Henderson     int s1_prot, s1_lgpgsz;
3310eeb9578cSPeter Maydell     ARMSecuritySpace in_space = ptw->in_space;
33114c09abeaSPeter Maydell     bool ret, ipa_secure, s1_guarded;
3312de05a709SRichard Henderson     ARMCacheAttrs cacheattrs1;
331390c66293SRichard Henderson     ARMSecuritySpace ipa_space;
3314ac76c2e5SRichard Henderson     uint64_t hcr;
33158ae08860SRichard Henderson 
331646f38c97SRichard Henderson     ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi);
33178ae08860SRichard Henderson 
331826ba00cfSPeter Maydell     /* If S1 fails, return early.  */
331926ba00cfSPeter Maydell     if (ret) {
33208ae08860SRichard Henderson         return ret;
33218ae08860SRichard Henderson     }
33228ae08860SRichard Henderson 
33237fa7ea8fSRichard Henderson     ipa = result->f.phys_addr;
33247fa7ea8fSRichard Henderson     ipa_secure = result->f.attrs.secure;
332590c66293SRichard Henderson     ipa_space = result->f.attrs.space;
33268ae08860SRichard Henderson 
33277c19b2d6SRichard Henderson     ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
3328fcc0b041SPeter Maydell     ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
332990c66293SRichard Henderson     ptw->in_space = ipa_space;
3330fcc0b041SPeter Maydell     ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
33318ae08860SRichard Henderson 
3332de05a709SRichard Henderson     /*
3333de05a709SRichard Henderson      * S1 is done, now do S2 translation.
33343f5a74c5SRichard Henderson      * Save the stage1 results so that we may merge prot and cacheattrs later.
3335de05a709SRichard Henderson      */
33367fa7ea8fSRichard Henderson     s1_prot = result->f.prot;
3337c8d6c286SRichard Henderson     s1_lgpgsz = result->f.lg_page_size;
33384c09abeaSPeter Maydell     s1_guarded = result->f.extra.arm.guarded;
3339de05a709SRichard Henderson     cacheattrs1 = result->cacheattrs;
3340de05a709SRichard Henderson     memset(result, 0, sizeof(*result));
3341de05a709SRichard Henderson 
334246f38c97SRichard Henderson     ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi);
33438ae08860SRichard Henderson     fi->s2addr = ipa;
3344de05a709SRichard Henderson 
33458ae08860SRichard Henderson     /* Combine the S1 and S2 perms.  */
33467fa7ea8fSRichard Henderson     result->f.prot &= s1_prot;
33478ae08860SRichard Henderson 
33488ae08860SRichard Henderson     /* If S2 fails, return early.  */
33498ae08860SRichard Henderson     if (ret) {
33508ae08860SRichard Henderson         return ret;
33518ae08860SRichard Henderson     }
33528ae08860SRichard Henderson 
3353c8d6c286SRichard Henderson     /*
33549e65f4e6SPeter Maydell      * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
33559e65f4e6SPeter Maydell      * this means "don't put this in the TLB"; in this case, return a
33569e65f4e6SPeter Maydell      * result with lg_page_size == 0 to achieve that. Otherwise,
33579e65f4e6SPeter Maydell      * use the maximum of the S1 & S2 page size, so that invalidation
33589e65f4e6SPeter Maydell      * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
33599e65f4e6SPeter Maydell      * we know the combined result permissions etc only cover the minimum
33609e65f4e6SPeter Maydell      * of the S1 and S2 page size, because we know that the common TLB code
33619e65f4e6SPeter Maydell      * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
33629e65f4e6SPeter Maydell      * and passing a larger page size value only affects invalidations.)
3363c8d6c286SRichard Henderson      */
33649e65f4e6SPeter Maydell     if (result->f.lg_page_size < TARGET_PAGE_BITS ||
33659e65f4e6SPeter Maydell         s1_lgpgsz < TARGET_PAGE_BITS) {
33669e65f4e6SPeter Maydell         result->f.lg_page_size = 0;
33679e65f4e6SPeter Maydell     } else if (result->f.lg_page_size < s1_lgpgsz) {
3368c8d6c286SRichard Henderson         result->f.lg_page_size = s1_lgpgsz;
3369c8d6c286SRichard Henderson     }
3370c8d6c286SRichard Henderson 
33718ae08860SRichard Henderson     /* Combine the S1 and S2 cache attributes. */
33722d12bb96SPeter Maydell     hcr = arm_hcr_el2_eff_secstate(env, in_space);
3373ac76c2e5SRichard Henderson     if (hcr & HCR_DC) {
33748ae08860SRichard Henderson         /*
33758ae08860SRichard Henderson          * HCR.DC forces the first stage attributes to
33768ae08860SRichard Henderson          *  Normal Non-Shareable,
33778ae08860SRichard Henderson          *  Inner Write-Back Read-Allocate Write-Allocate,
33788ae08860SRichard Henderson          *  Outer Write-Back Read-Allocate Write-Allocate.
33798ae08860SRichard Henderson          * Do not overwrite Tagged within attrs.
33808ae08860SRichard Henderson          */
3381de05a709SRichard Henderson         if (cacheattrs1.attrs != 0xf0) {
3382de05a709SRichard Henderson             cacheattrs1.attrs = 0xff;
33838ae08860SRichard Henderson         }
3384de05a709SRichard Henderson         cacheattrs1.shareability = 0;
33858ae08860SRichard Henderson     }
3386ac76c2e5SRichard Henderson     result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
3387de05a709SRichard Henderson                                             result->cacheattrs);
33888ae08860SRichard Henderson 
33894c09abeaSPeter Maydell     /* No BTI GP information in stage 2, we just use the S1 value */
33904c09abeaSPeter Maydell     result->f.extra.arm.guarded = s1_guarded;
33914c09abeaSPeter Maydell 
33929b5ba97aSRichard Henderson     /*
33939b5ba97aSRichard Henderson      * Check if IPA translates to secure or non-secure PA space.
33949b5ba97aSRichard Henderson      * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
33959b5ba97aSRichard Henderson      */
3396eeb9578cSPeter Maydell     if (in_space == ARMSS_Secure) {
33977fa7ea8fSRichard Henderson         result->f.attrs.secure =
3398eeb9578cSPeter Maydell             !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
33999b5ba97aSRichard Henderson             && (ipa_secure
3400eeb9578cSPeter Maydell                 || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
3401eeb9578cSPeter Maydell         result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
3402eeb9578cSPeter Maydell     }
34039b5ba97aSRichard Henderson 
34046b72c542SRichard Henderson     return false;
34053f5a74c5SRichard Henderson }
34063f5a74c5SRichard Henderson 
340746f38c97SRichard Henderson static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
3408*67d762e7SArd Biesheuvel                                       vaddr address,
34093f5a74c5SRichard Henderson                                       MMUAccessType access_type,
34103f5a74c5SRichard Henderson                                       GetPhysAddrResult *result,
34113f5a74c5SRichard Henderson                                       ARMMMUFaultInfo *fi)
34123f5a74c5SRichard Henderson {
34133f5a74c5SRichard Henderson     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
341448da29e4SRichard Henderson     ARMMMUIdx s1_mmu_idx;
34153f5a74c5SRichard Henderson 
3416cead7fa4SRichard Henderson     /*
341790c66293SRichard Henderson      * The page table entries may downgrade Secure to NonSecure, but
341890c66293SRichard Henderson      * cannot upgrade a NonSecure translation regime's attributes
341990c66293SRichard Henderson      * to Secure or Realm.
3420cead7fa4SRichard Henderson      */
342190c66293SRichard Henderson     result->f.attrs.space = ptw->in_space;
3422cdbae5e7SPeter Maydell     result->f.attrs.secure = arm_space_is_secure(ptw->in_space);
3423cead7fa4SRichard Henderson 
342448da29e4SRichard Henderson     switch (mmu_idx) {
342548da29e4SRichard Henderson     case ARMMMUIdx_Phys_S:
342648da29e4SRichard Henderson     case ARMMMUIdx_Phys_NS:
3427bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Root:
3428bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Realm:
342948da29e4SRichard Henderson         /* Checking Phys early avoids special casing later vs regime_el. */
3430a5637becSPeter Maydell         return get_phys_addr_disabled(env, ptw, address, access_type,
3431a5637becSPeter Maydell                                       result, fi);
343248da29e4SRichard Henderson 
343348da29e4SRichard Henderson     case ARMMMUIdx_Stage1_E0:
343448da29e4SRichard Henderson     case ARMMMUIdx_Stage1_E1:
343548da29e4SRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
3436cdbae5e7SPeter Maydell         /*
3437cdbae5e7SPeter Maydell          * First stage lookup uses second stage for ptw; only
3438cdbae5e7SPeter Maydell          * Secure has both S and NS IPA and starts with Stage2_S.
3439cdbae5e7SPeter Maydell          */
3440cdbae5e7SPeter Maydell         ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ?
3441cdbae5e7SPeter Maydell             ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
344248da29e4SRichard Henderson         break;
344348da29e4SRichard Henderson 
3444fcc0b041SPeter Maydell     case ARMMMUIdx_Stage2:
3445fcc0b041SPeter Maydell     case ARMMMUIdx_Stage2_S:
3446fcc0b041SPeter Maydell         /*
3447fcc0b041SPeter Maydell          * Second stage lookup uses physical for ptw; whether this is S or
3448fcc0b041SPeter Maydell          * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
3449fcc0b041SPeter Maydell          * the Secure EL2&0 regime.
3450fcc0b041SPeter Maydell          */
3451fcc0b041SPeter Maydell         ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx);
3452fcc0b041SPeter Maydell         break;
3453fcc0b041SPeter Maydell 
345448da29e4SRichard Henderson     case ARMMMUIdx_E10_0:
345548da29e4SRichard Henderson         s1_mmu_idx = ARMMMUIdx_Stage1_E0;
345648da29e4SRichard Henderson         goto do_twostage;
345748da29e4SRichard Henderson     case ARMMMUIdx_E10_1:
345848da29e4SRichard Henderson         s1_mmu_idx = ARMMMUIdx_Stage1_E1;
345948da29e4SRichard Henderson         goto do_twostage;
346048da29e4SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
346148da29e4SRichard Henderson         s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
346248da29e4SRichard Henderson     do_twostage:
34638ae08860SRichard Henderson         /*
34643f5a74c5SRichard Henderson          * Call ourselves recursively to do the stage 1 and then stage 2
34653f5a74c5SRichard Henderson          * translations if mmu_idx is a two-stage regime, and EL2 present.
34663f5a74c5SRichard Henderson          * Otherwise, a stage1+stage2 translation is just stage 1.
34678ae08860SRichard Henderson          */
34683f5a74c5SRichard Henderson         ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
346926ba00cfSPeter Maydell         if (arm_feature(env, ARM_FEATURE_EL2) &&
3470d1289140SPeter Maydell             !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
34713f5a74c5SRichard Henderson             return get_phys_addr_twostage(env, ptw, address, access_type,
34723f5a74c5SRichard Henderson                                           result, fi);
34738ae08860SRichard Henderson         }
347448da29e4SRichard Henderson         /* fall through */
347548da29e4SRichard Henderson 
347648da29e4SRichard Henderson     default:
3477fcc0b041SPeter Maydell         /* Single stage uses physical for ptw. */
347890c66293SRichard Henderson         ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space);
347948da29e4SRichard Henderson         break;
34808ae08860SRichard Henderson     }
34818ae08860SRichard Henderson 
34827fa7ea8fSRichard Henderson     result->f.attrs.user = regime_is_user(env, mmu_idx);
34838ae08860SRichard Henderson 
34848ae08860SRichard Henderson     /*
34858ae08860SRichard Henderson      * Fast Context Switch Extension. This doesn't exist at all in v8.
34868ae08860SRichard Henderson      * In v7 and earlier it affects all stage 1 translations.
34878ae08860SRichard Henderson      */
34888ae08860SRichard Henderson     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
34898ae08860SRichard Henderson         && !arm_feature(env, ARM_FEATURE_V8)) {
34908ae08860SRichard Henderson         if (regime_el(env, mmu_idx) == 3) {
34918ae08860SRichard Henderson             address += env->cp15.fcseidr_s;
34928ae08860SRichard Henderson         } else {
34938ae08860SRichard Henderson             address += env->cp15.fcseidr_ns;
34948ae08860SRichard Henderson         }
34958ae08860SRichard Henderson     }
34968ae08860SRichard Henderson 
34978ae08860SRichard Henderson     if (arm_feature(env, ARM_FEATURE_PMSA)) {
34988ae08860SRichard Henderson         bool ret;
34997fa7ea8fSRichard Henderson         result->f.lg_page_size = TARGET_PAGE_BITS;
35008ae08860SRichard Henderson 
35018ae08860SRichard Henderson         if (arm_feature(env, ARM_FEATURE_V8)) {
35028ae08860SRichard Henderson             /* PMSAv8 */
3503a5637becSPeter Maydell             ret = get_phys_addr_pmsav8(env, ptw, address, access_type,
3504a5637becSPeter Maydell                                        result, fi);
35058ae08860SRichard Henderson         } else if (arm_feature(env, ARM_FEATURE_V7)) {
35068ae08860SRichard Henderson             /* PMSAv7 */
3507a5637becSPeter Maydell             ret = get_phys_addr_pmsav7(env, ptw, address, access_type,
3508a5637becSPeter Maydell                                        result, fi);
35098ae08860SRichard Henderson         } else {
35108ae08860SRichard Henderson             /* Pre-v7 MPU */
3511a5637becSPeter Maydell             ret = get_phys_addr_pmsav5(env, ptw, address, access_type,
3512a5637becSPeter Maydell                                        result, fi);
35138ae08860SRichard Henderson         }
35148ae08860SRichard Henderson         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
35158ae08860SRichard Henderson                       " mmu_idx %u -> %s (prot %c%c%c)\n",
35168ae08860SRichard Henderson                       access_type == MMU_DATA_LOAD ? "reading" :
35178ae08860SRichard Henderson                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
35188ae08860SRichard Henderson                       (uint32_t)address, mmu_idx,
35198ae08860SRichard Henderson                       ret ? "Miss" : "Hit",
35207fa7ea8fSRichard Henderson                       result->f.prot & PAGE_READ ? 'r' : '-',
35217fa7ea8fSRichard Henderson                       result->f.prot & PAGE_WRITE ? 'w' : '-',
35227fa7ea8fSRichard Henderson                       result->f.prot & PAGE_EXEC ? 'x' : '-');
35238ae08860SRichard Henderson 
35248ae08860SRichard Henderson         return ret;
35258ae08860SRichard Henderson     }
35268ae08860SRichard Henderson 
35278ae08860SRichard Henderson     /* Definitely a real MMU, not an MPU */
35288ae08860SRichard Henderson 
3529d1289140SPeter Maydell     if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
3530a5637becSPeter Maydell         return get_phys_addr_disabled(env, ptw, address, access_type,
3531a5637becSPeter Maydell                                       result, fi);
35328ae08860SRichard Henderson     }
35336d2654ffSRichard Henderson 
35348ae08860SRichard Henderson     if (regime_using_lpae_format(env, mmu_idx)) {
35357c19b2d6SRichard Henderson         return get_phys_addr_lpae(env, ptw, address, access_type, result, fi);
35366f2d9d74STimofey Kutergin     } else if (arm_feature(env, ARM_FEATURE_V7) ||
35376f2d9d74STimofey Kutergin                regime_sctlr(env, mmu_idx) & SCTLR_XP) {
35384a358556SRichard Henderson         return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
35398ae08860SRichard Henderson     } else {
35404a358556SRichard Henderson         return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
35418ae08860SRichard Henderson     }
35428ae08860SRichard Henderson }
354323971205SRichard Henderson 
354446f38c97SRichard Henderson static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
3545*67d762e7SArd Biesheuvel                               vaddr address,
354646f38c97SRichard Henderson                               MMUAccessType access_type,
354746f38c97SRichard Henderson                               GetPhysAddrResult *result,
354846f38c97SRichard Henderson                               ARMMMUFaultInfo *fi)
354946f38c97SRichard Henderson {
355046f38c97SRichard Henderson     if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) {
355146f38c97SRichard Henderson         return true;
355246f38c97SRichard Henderson     }
355346f38c97SRichard Henderson     if (!granule_protection_check(env, result->f.phys_addr,
355446f38c97SRichard Henderson                                   result->f.attrs.space, fi)) {
355546f38c97SRichard Henderson         fi->type = ARMFault_GPCFOnOutput;
355646f38c97SRichard Henderson         return true;
355746f38c97SRichard Henderson     }
355846f38c97SRichard Henderson     return false;
355946f38c97SRichard Henderson }
356046f38c97SRichard Henderson 
3561*67d762e7SArd Biesheuvel bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
3562f1269a98SJean-Philippe Brucker                                     MMUAccessType access_type,
3563e1ee56ecSJean-Philippe Brucker                                     ARMMMUIdx mmu_idx, ARMSecuritySpace space,
3564f1269a98SJean-Philippe Brucker                                     GetPhysAddrResult *result,
35654a358556SRichard Henderson                                     ARMMMUFaultInfo *fi)
35664a358556SRichard Henderson {
35674a358556SRichard Henderson     S1Translate ptw = {
35684a358556SRichard Henderson         .in_mmu_idx = mmu_idx,
3569e1ee56ecSJean-Philippe Brucker         .in_space = space,
35704a358556SRichard Henderson     };
3571f1269a98SJean-Philippe Brucker     return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi);
35724a358556SRichard Henderson }
35734a358556SRichard Henderson 
3574*67d762e7SArd Biesheuvel bool get_phys_addr(CPUARMState *env, vaddr address,
3575def8aa5bSRichard Henderson                    MMUAccessType access_type, ARMMMUIdx mmu_idx,
3576def8aa5bSRichard Henderson                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
3577def8aa5bSRichard Henderson {
357890c66293SRichard Henderson     S1Translate ptw = {
357990c66293SRichard Henderson         .in_mmu_idx = mmu_idx,
358090c66293SRichard Henderson     };
358190c66293SRichard Henderson     ARMSecuritySpace ss;
358203bea66eSRichard Henderson 
358303bea66eSRichard Henderson     switch (mmu_idx) {
358403bea66eSRichard Henderson     case ARMMMUIdx_E10_0:
358503bea66eSRichard Henderson     case ARMMMUIdx_E10_1:
358603bea66eSRichard Henderson     case ARMMMUIdx_E10_1_PAN:
358703bea66eSRichard Henderson     case ARMMMUIdx_E20_0:
358803bea66eSRichard Henderson     case ARMMMUIdx_E20_2:
358903bea66eSRichard Henderson     case ARMMMUIdx_E20_2_PAN:
359003bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E0:
359103bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E1:
359203bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
359303bea66eSRichard Henderson     case ARMMMUIdx_E2:
35944c2c0474SPeter Maydell         if (arm_aa32_secure_pl1_0(env)) {
35954c2c0474SPeter Maydell             ss = ARMSS_Secure;
35964c2c0474SPeter Maydell         } else {
359790c66293SRichard Henderson             ss = arm_security_space_below_el3(env);
35984c2c0474SPeter Maydell         }
3599d902ae75SRichard Henderson         break;
360003bea66eSRichard Henderson     case ARMMMUIdx_Stage2:
360190c66293SRichard Henderson         /*
360290c66293SRichard Henderson          * For Secure EL2, we need this index to be NonSecure;
360390c66293SRichard Henderson          * otherwise this will already be NonSecure or Realm.
360490c66293SRichard Henderson          */
360590c66293SRichard Henderson         ss = arm_security_space_below_el3(env);
360690c66293SRichard Henderson         if (ss == ARMSS_Secure) {
360790c66293SRichard Henderson             ss = ARMSS_NonSecure;
360890c66293SRichard Henderson         }
360990c66293SRichard Henderson         break;
3610a1ce3084SRichard Henderson     case ARMMMUIdx_Phys_NS:
361103bea66eSRichard Henderson     case ARMMMUIdx_MPrivNegPri:
361203bea66eSRichard Henderson     case ARMMMUIdx_MUserNegPri:
361303bea66eSRichard Henderson     case ARMMMUIdx_MPriv:
361403bea66eSRichard Henderson     case ARMMMUIdx_MUser:
361590c66293SRichard Henderson         ss = ARMSS_NonSecure;
361603bea66eSRichard Henderson         break;
361703bea66eSRichard Henderson     case ARMMMUIdx_Stage2_S:
3618a1ce3084SRichard Henderson     case ARMMMUIdx_Phys_S:
361903bea66eSRichard Henderson     case ARMMMUIdx_MSPrivNegPri:
362003bea66eSRichard Henderson     case ARMMMUIdx_MSUserNegPri:
362103bea66eSRichard Henderson     case ARMMMUIdx_MSPriv:
362203bea66eSRichard Henderson     case ARMMMUIdx_MSUser:
362390c66293SRichard Henderson         ss = ARMSS_Secure;
362490c66293SRichard Henderson         break;
362590c66293SRichard Henderson     case ARMMMUIdx_E3:
362690c66293SRichard Henderson         if (arm_feature(env, ARM_FEATURE_AARCH64) &&
362790c66293SRichard Henderson             cpu_isar_feature(aa64_rme, env_archcpu(env))) {
362890c66293SRichard Henderson             ss = ARMSS_Root;
362990c66293SRichard Henderson         } else {
363090c66293SRichard Henderson             ss = ARMSS_Secure;
363190c66293SRichard Henderson         }
363290c66293SRichard Henderson         break;
363390c66293SRichard Henderson     case ARMMMUIdx_Phys_Root:
363490c66293SRichard Henderson         ss = ARMSS_Root;
363590c66293SRichard Henderson         break;
363690c66293SRichard Henderson     case ARMMMUIdx_Phys_Realm:
363790c66293SRichard Henderson         ss = ARMSS_Realm;
363803bea66eSRichard Henderson         break;
363903bea66eSRichard Henderson     default:
364003bea66eSRichard Henderson         g_assert_not_reached();
364103bea66eSRichard Henderson     }
364290c66293SRichard Henderson 
364390c66293SRichard Henderson     ptw.in_space = ss;
364446f38c97SRichard Henderson     return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
3645def8aa5bSRichard Henderson }
3646def8aa5bSRichard Henderson 
364723971205SRichard Henderson hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
364823971205SRichard Henderson                                          MemTxAttrs *attrs)
364923971205SRichard Henderson {
365023971205SRichard Henderson     ARMCPU *cpu = ARM_CPU(cs);
365123971205SRichard Henderson     CPUARMState *env = &cpu->env;
365290c66293SRichard Henderson     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
365390c66293SRichard Henderson     ARMSecuritySpace ss = arm_security_space(env);
36544a358556SRichard Henderson     S1Translate ptw = {
365590c66293SRichard Henderson         .in_mmu_idx = mmu_idx,
365690c66293SRichard Henderson         .in_space = ss,
36574a358556SRichard Henderson         .in_debug = true,
36584a358556SRichard Henderson     };
3659de05a709SRichard Henderson     GetPhysAddrResult res = {};
366023971205SRichard Henderson     ARMMMUFaultInfo fi = {};
3661de05a709SRichard Henderson     bool ret;
366223971205SRichard Henderson 
366346f38c97SRichard Henderson     ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi);
36647fa7ea8fSRichard Henderson     *attrs = res.f.attrs;
366523971205SRichard Henderson 
366623971205SRichard Henderson     if (ret) {
366723971205SRichard Henderson         return -1;
366823971205SRichard Henderson     }
36697fa7ea8fSRichard Henderson     return res.f.phys_addr;
367023971205SRichard Henderson }
3671