xref: /openbmc/qemu/target/arm/ptw.c (revision e6459afb1ff4d86b361b14f4a2fc43f0d2b4d679)
18ae08860SRichard Henderson /*
28ae08860SRichard Henderson  * ARM page table walking.
38ae08860SRichard Henderson  *
48ae08860SRichard Henderson  * This code is licensed under the GNU GPL v2 or later.
58ae08860SRichard Henderson  *
68ae08860SRichard Henderson  * SPDX-License-Identifier: GPL-2.0-or-later
78ae08860SRichard Henderson  */
88ae08860SRichard Henderson 
98ae08860SRichard Henderson #include "qemu/osdep.h"
108ae08860SRichard Henderson #include "qemu/log.h"
111f2e87e5SRichard Henderson #include "qemu/range.h"
1271943a1eSRichard Henderson #include "qemu/main-loop.h"
13f3639a64SRichard Henderson #include "exec/exec-all.h"
1474781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
158ae08860SRichard Henderson #include "cpu.h"
168ae08860SRichard Henderson #include "internals.h"
175a534314SPeter Maydell #include "cpu-features.h"
182c1f429dSRichard Henderson #include "idau.h"
19007cd176SRichard Henderson #ifdef CONFIG_TCG
2070f168f8SRichard Henderson # include "tcg/oversized-guest.h"
21007cd176SRichard Henderson #endif
228ae08860SRichard Henderson 
236d2654ffSRichard Henderson typedef struct S1Translate {
2434eed551SPeter Maydell     /*
2534eed551SPeter Maydell      * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk.
2634eed551SPeter Maydell      * Together with in_space, specifies the architectural translation regime.
2734eed551SPeter Maydell      */
286d2654ffSRichard Henderson     ARMMMUIdx in_mmu_idx;
2934eed551SPeter Maydell     /*
3034eed551SPeter Maydell      * in_ptw_idx: specifies which mmuidx to use for the actual
3134eed551SPeter Maydell      * page table descriptor load operations. This will be one of the
3234eed551SPeter Maydell      * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes.
3334eed551SPeter Maydell      * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
3434eed551SPeter Maydell      * this field is updated accordingly.
3534eed551SPeter Maydell      */
3648da29e4SRichard Henderson     ARMMMUIdx in_ptw_idx;
3734eed551SPeter Maydell     /*
3834eed551SPeter Maydell      * in_space: the security space for this walk. This plus
3934eed551SPeter Maydell      * the in_mmu_idx specify the architectural translation regime.
4034eed551SPeter Maydell      * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
4134eed551SPeter Maydell      * this field is updated accordingly.
4234eed551SPeter Maydell      *
4334eed551SPeter Maydell      * Note that the security space for the in_ptw_idx may be different
4434eed551SPeter Maydell      * from that for the in_mmu_idx. We do not need to explicitly track
4534eed551SPeter Maydell      * the in_ptw_idx security space because:
4634eed551SPeter Maydell      *  - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx
4734eed551SPeter Maydell      *    itself specifies the security space
4834eed551SPeter Maydell      *  - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security
4934eed551SPeter Maydell      *    space used for ptw reads is the same as that of the security
5034eed551SPeter Maydell      *    space of the stage 1 translation for all cases except where
5134eed551SPeter Maydell      *    stage 1 is Secure; in that case the only possibilities for
5234eed551SPeter Maydell      *    the ptw read are Secure and NonSecure, and the in_ptw_idx
5334eed551SPeter Maydell      *    value being Stage2 vs Stage2_S distinguishes those.
5434eed551SPeter Maydell      */
5590c66293SRichard Henderson     ARMSecuritySpace in_space;
5634eed551SPeter Maydell     /*
5734eed551SPeter Maydell      * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
5834eed551SPeter Maydell      * accesses will not update the guest page table access flags
5934eed551SPeter Maydell      * and will not change the state of the softmmu TLBs.
6034eed551SPeter Maydell      */
614a358556SRichard Henderson     bool in_debug;
627c19b2d6SRichard Henderson     /*
637c19b2d6SRichard Henderson      * If this is stage 2 of a stage 1+2 page table walk, then this must
647c19b2d6SRichard Henderson      * be true if stage 1 is an EL0 access; otherwise this is ignored.
657c19b2d6SRichard Henderson      * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
667c19b2d6SRichard Henderson      */
677c19b2d6SRichard Henderson     bool in_s1_is_el0;
6871943a1eSRichard Henderson     bool out_rw;
694e7a2c98SRichard Henderson     bool out_be;
7090c66293SRichard Henderson     ARMSecuritySpace out_space;
7171943a1eSRichard Henderson     hwaddr out_virt;
726d2654ffSRichard Henderson     hwaddr out_phys;
73f3639a64SRichard Henderson     void *out_host;
746d2654ffSRichard Henderson } S1Translate;
756d2654ffSRichard Henderson 
7646f38c97SRichard Henderson static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
7767d762e7SArd Biesheuvel                                 vaddr address,
78c6cd9f9fSRichard Henderson                                 MMUAccessType access_type, MemOp memop,
7946f38c97SRichard Henderson                                 GetPhysAddrResult *result,
8046f38c97SRichard Henderson                                 ARMMMUFaultInfo *fi);
8146f38c97SRichard Henderson 
8246f38c97SRichard Henderson static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
8367d762e7SArd Biesheuvel                               vaddr address,
845458670bSRichard Henderson                               MMUAccessType access_type, MemOp memop,
853f5a74c5SRichard Henderson                               GetPhysAddrResult *result,
8686a438b4SRichard Henderson                               ARMMMUFaultInfo *fi);
873f5a74c5SRichard Henderson 
88*0340cb6eSPavel Skripkin static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
89*0340cb6eSPavel Skripkin                       int user_rw, int prot_rw, int xn, int pxn,
90*0340cb6eSPavel Skripkin                       ARMSecuritySpace in_pa, ARMSecuritySpace out_pa);
91*0340cb6eSPavel Skripkin 
921c73d848SRichard Henderson /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
931c73d848SRichard Henderson static const uint8_t pamax_map[] = {
941c73d848SRichard Henderson     [0] = 32,
951c73d848SRichard Henderson     [1] = 36,
961c73d848SRichard Henderson     [2] = 40,
971c73d848SRichard Henderson     [3] = 42,
981c73d848SRichard Henderson     [4] = 44,
991c73d848SRichard Henderson     [5] = 48,
1001c73d848SRichard Henderson     [6] = 52,
1011c73d848SRichard Henderson };
1021c73d848SRichard Henderson 
round_down_to_parange_index(uint8_t bit_size)103d54ffa54SDanny Canter uint8_t round_down_to_parange_index(uint8_t bit_size)
104d54ffa54SDanny Canter {
105d54ffa54SDanny Canter     for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) {
106d54ffa54SDanny Canter         if (pamax_map[i] <= bit_size) {
107d54ffa54SDanny Canter             return i;
108d54ffa54SDanny Canter         }
109d54ffa54SDanny Canter     }
110d54ffa54SDanny Canter     g_assert_not_reached();
111d54ffa54SDanny Canter }
112d54ffa54SDanny Canter 
round_down_to_parange_bit_size(uint8_t bit_size)113d54ffa54SDanny Canter uint8_t round_down_to_parange_bit_size(uint8_t bit_size)
114d54ffa54SDanny Canter {
115d54ffa54SDanny Canter     return pamax_map[round_down_to_parange_index(bit_size)];
116d54ffa54SDanny Canter }
117d54ffa54SDanny Canter 
11871e269fbSPeter Maydell /*
11971e269fbSPeter Maydell  * The cpu-specific constant value of PAMax; also used by hw/arm/virt.
12071e269fbSPeter Maydell  * Note that machvirt_init calls this on a CPU that is inited but not realized!
12171e269fbSPeter Maydell  */
arm_pamax(ARMCPU * cpu)1221c73d848SRichard Henderson unsigned int arm_pamax(ARMCPU *cpu)
1231c73d848SRichard Henderson {
12422536b13SRichard Henderson     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
1251c73d848SRichard Henderson         unsigned int parange =
1261c73d848SRichard Henderson             FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
1271c73d848SRichard Henderson 
1281c73d848SRichard Henderson         /*
1291c73d848SRichard Henderson          * id_aa64mmfr0 is a read-only register so values outside of the
1301c73d848SRichard Henderson          * supported mappings can be considered an implementation error.
1311c73d848SRichard Henderson          */
1321c73d848SRichard Henderson         assert(parange < ARRAY_SIZE(pamax_map));
1331c73d848SRichard Henderson         return pamax_map[parange];
1341c73d848SRichard Henderson     }
13559e1b8a2SRichard Henderson 
13671e269fbSPeter Maydell     if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
13771e269fbSPeter Maydell         /* v7 or v8 with LPAE */
13822536b13SRichard Henderson         return 40;
13922536b13SRichard Henderson     }
14022536b13SRichard Henderson     /* Anything else */
14122536b13SRichard Henderson     return 32;
14222536b13SRichard Henderson }
1431c73d848SRichard Henderson 
1441d261255SRichard Henderson /*
1451d261255SRichard Henderson  * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index
1461d261255SRichard Henderson  */
stage_1_mmu_idx(ARMMMUIdx mmu_idx)1471d261255SRichard Henderson ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
1481d261255SRichard Henderson {
1491d261255SRichard Henderson     switch (mmu_idx) {
1501d261255SRichard Henderson     case ARMMMUIdx_E10_0:
1511d261255SRichard Henderson         return ARMMMUIdx_Stage1_E0;
1521d261255SRichard Henderson     case ARMMMUIdx_E10_1:
1531d261255SRichard Henderson         return ARMMMUIdx_Stage1_E1;
1541d261255SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
1551d261255SRichard Henderson         return ARMMMUIdx_Stage1_E1_PAN;
1561d261255SRichard Henderson     default:
1571d261255SRichard Henderson         return mmu_idx;
1581d261255SRichard Henderson     }
1591d261255SRichard Henderson }
1601d261255SRichard Henderson 
arm_stage1_mmu_idx(CPUARMState * env)1611d261255SRichard Henderson ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
1621d261255SRichard Henderson {
1631d261255SRichard Henderson     return stage_1_mmu_idx(arm_mmu_idx(env));
1641d261255SRichard Henderson }
1651d261255SRichard Henderson 
166fcc0b041SPeter Maydell /*
167fcc0b041SPeter Maydell  * Return where we should do ptw loads from for a stage 2 walk.
168fcc0b041SPeter Maydell  * This depends on whether the address we are looking up is a
169fcc0b041SPeter Maydell  * Secure IPA or a NonSecure IPA, which we know from whether this is
170fcc0b041SPeter Maydell  * Stage2 or Stage2_S.
171fcc0b041SPeter Maydell  * If this is the Secure EL1&0 regime we need to check the NSW and SW bits.
172fcc0b041SPeter Maydell  */
ptw_idx_for_stage_2(CPUARMState * env,ARMMMUIdx stage2idx)173fcc0b041SPeter Maydell static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
174fcc0b041SPeter Maydell {
175fcc0b041SPeter Maydell     bool s2walk_secure;
176fcc0b041SPeter Maydell 
177fcc0b041SPeter Maydell     /*
178fcc0b041SPeter Maydell      * We're OK to check the current state of the CPU here because
179da64251eSJean-Philippe Brucker      * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit
180da64251eSJean-Philippe Brucker      * changes.
181fcc0b041SPeter Maydell      * (2) there's no way to do a lookup that cares about Stage 2 for a
182fcc0b041SPeter Maydell      * different security state to the current one for AArch64, and AArch32
183fcc0b041SPeter Maydell      * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
184fcc0b041SPeter Maydell      * an NS stage 1+2 lookup while the NS bit is 0.)
185fcc0b041SPeter Maydell      */
186da64251eSJean-Philippe Brucker     if (!arm_el_is_aa64(env, 3)) {
187fcc0b041SPeter Maydell         return ARMMMUIdx_Phys_NS;
188fcc0b041SPeter Maydell     }
189da64251eSJean-Philippe Brucker 
190da64251eSJean-Philippe Brucker     switch (arm_security_space_below_el3(env)) {
191da64251eSJean-Philippe Brucker     case ARMSS_NonSecure:
192da64251eSJean-Philippe Brucker         return ARMMMUIdx_Phys_NS;
193da64251eSJean-Philippe Brucker     case ARMSS_Realm:
194da64251eSJean-Philippe Brucker         return ARMMMUIdx_Phys_Realm;
195da64251eSJean-Philippe Brucker     case ARMSS_Secure:
196fcc0b041SPeter Maydell         if (stage2idx == ARMMMUIdx_Stage2_S) {
197fcc0b041SPeter Maydell             s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
198fcc0b041SPeter Maydell         } else {
199fcc0b041SPeter Maydell             s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
200fcc0b041SPeter Maydell         }
201fcc0b041SPeter Maydell         return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
202da64251eSJean-Philippe Brucker     default:
203da64251eSJean-Philippe Brucker         g_assert_not_reached();
204da64251eSJean-Philippe Brucker     }
205fcc0b041SPeter Maydell }
206fcc0b041SPeter Maydell 
regime_translation_big_endian(CPUARMState * env,ARMMMUIdx mmu_idx)20711552bb0SRichard Henderson static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
20811552bb0SRichard Henderson {
20911552bb0SRichard Henderson     return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
21011552bb0SRichard Henderson }
21111552bb0SRichard Henderson 
2123b318aaeSRichard Henderson /* Return the TTBR associated with this translation regime */
regime_ttbr(CPUARMState * env,ARMMMUIdx mmu_idx,int ttbrn)2133b318aaeSRichard Henderson static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
2143b318aaeSRichard Henderson {
2153b318aaeSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2) {
2163b318aaeSRichard Henderson         return env->cp15.vttbr_el2;
2173b318aaeSRichard Henderson     }
2183b318aaeSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2_S) {
2193b318aaeSRichard Henderson         return env->cp15.vsttbr_el2;
2203b318aaeSRichard Henderson     }
2213b318aaeSRichard Henderson     if (ttbrn == 0) {
2223b318aaeSRichard Henderson         return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
2233b318aaeSRichard Henderson     } else {
2243b318aaeSRichard Henderson         return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
2253b318aaeSRichard Henderson     }
2263b318aaeSRichard Henderson }
2273b318aaeSRichard Henderson 
2288db1a3a0SRichard Henderson /* Return true if the specified stage of address translation is disabled */
regime_translation_disabled(CPUARMState * env,ARMMMUIdx mmu_idx,ARMSecuritySpace space)2297e80c0a4SRichard Henderson static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
230d1289140SPeter Maydell                                         ARMSecuritySpace space)
2318db1a3a0SRichard Henderson {
2328db1a3a0SRichard Henderson     uint64_t hcr_el2;
2338db1a3a0SRichard Henderson 
2348db1a3a0SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M)) {
2352d12bb96SPeter Maydell         bool is_secure = arm_space_is_secure(space);
2367e80c0a4SRichard Henderson         switch (env->v7m.mpu_ctrl[is_secure] &
2378db1a3a0SRichard Henderson                 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
2388db1a3a0SRichard Henderson         case R_V7M_MPU_CTRL_ENABLE_MASK:
2398db1a3a0SRichard Henderson             /* Enabled, but not for HardFault and NMI */
2408db1a3a0SRichard Henderson             return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
2418db1a3a0SRichard Henderson         case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
2428db1a3a0SRichard Henderson             /* Enabled for all cases */
2438db1a3a0SRichard Henderson             return false;
2448db1a3a0SRichard Henderson         case 0:
2458db1a3a0SRichard Henderson         default:
2468db1a3a0SRichard Henderson             /*
2478db1a3a0SRichard Henderson              * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
2488db1a3a0SRichard Henderson              * we warned about that in armv7m_nvic.c when the guest set it.
2498db1a3a0SRichard Henderson              */
2508db1a3a0SRichard Henderson             return true;
2518db1a3a0SRichard Henderson         }
2528db1a3a0SRichard Henderson     }
2538db1a3a0SRichard Henderson 
2548db1a3a0SRichard Henderson 
2553b2af993SRichard Henderson     switch (mmu_idx) {
2563b2af993SRichard Henderson     case ARMMMUIdx_Stage2:
2573b2af993SRichard Henderson     case ARMMMUIdx_Stage2_S:
2588db1a3a0SRichard Henderson         /* HCR.DC means HCR.VM behaves as 1 */
2592d12bb96SPeter Maydell         hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
2608db1a3a0SRichard Henderson         return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
2618db1a3a0SRichard Henderson 
2623b2af993SRichard Henderson     case ARMMMUIdx_E10_0:
2633b2af993SRichard Henderson     case ARMMMUIdx_E10_1:
2643b2af993SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
265fdf12933SRichard Henderson         /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
2662d12bb96SPeter Maydell         hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
267fdf12933SRichard Henderson         if (hcr_el2 & HCR_TGE) {
2688db1a3a0SRichard Henderson             return true;
2698db1a3a0SRichard Henderson         }
2703b2af993SRichard Henderson         break;
2718db1a3a0SRichard Henderson 
2723b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E0:
2733b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E1:
2743b2af993SRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
2758db1a3a0SRichard Henderson         /* HCR.DC means SCTLR_EL1.M behaves as 0 */
2762d12bb96SPeter Maydell         hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
2773b2af993SRichard Henderson         if (hcr_el2 & HCR_DC) {
2788db1a3a0SRichard Henderson             return true;
2798db1a3a0SRichard Henderson         }
2803b2af993SRichard Henderson         break;
2813b2af993SRichard Henderson 
2823b2af993SRichard Henderson     case ARMMMUIdx_E20_0:
2833b2af993SRichard Henderson     case ARMMMUIdx_E20_2:
2843b2af993SRichard Henderson     case ARMMMUIdx_E20_2_PAN:
2853b2af993SRichard Henderson     case ARMMMUIdx_E2:
2863b2af993SRichard Henderson     case ARMMMUIdx_E3:
287efbe180aSPeter Maydell     case ARMMMUIdx_E30_0:
288efbe180aSPeter Maydell     case ARMMMUIdx_E30_3_PAN:
2893b2af993SRichard Henderson         break;
2903b2af993SRichard Henderson 
291a1ce3084SRichard Henderson     case ARMMMUIdx_Phys_S:
292bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_NS:
293bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Root:
294bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Realm:
295a1ce3084SRichard Henderson         /* No translation for physical address spaces. */
296a1ce3084SRichard Henderson         return true;
297a1ce3084SRichard Henderson 
2983b2af993SRichard Henderson     default:
2993b2af993SRichard Henderson         g_assert_not_reached();
3003b2af993SRichard Henderson     }
3018db1a3a0SRichard Henderson 
3028db1a3a0SRichard Henderson     return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
3038db1a3a0SRichard Henderson }
3048db1a3a0SRichard Henderson 
granule_protection_check(CPUARMState * env,uint64_t paddress,ARMSecuritySpace pspace,ARMMMUFaultInfo * fi)30546f38c97SRichard Henderson static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
30646f38c97SRichard Henderson                                      ARMSecuritySpace pspace,
30746f38c97SRichard Henderson                                      ARMMMUFaultInfo *fi)
30846f38c97SRichard Henderson {
30946f38c97SRichard Henderson     MemTxAttrs attrs = {
31046f38c97SRichard Henderson         .secure = true,
31146f38c97SRichard Henderson         .space = ARMSS_Root,
31246f38c97SRichard Henderson     };
31346f38c97SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
31446f38c97SRichard Henderson     uint64_t gpccr = env->cp15.gpccr_el3;
31546f38c97SRichard Henderson     unsigned pps, pgs, l0gptsz, level = 0;
31646f38c97SRichard Henderson     uint64_t tableaddr, pps_mask, align, entry, index;
31746f38c97SRichard Henderson     AddressSpace *as;
31846f38c97SRichard Henderson     MemTxResult result;
31946f38c97SRichard Henderson     int gpi;
32046f38c97SRichard Henderson 
32146f38c97SRichard Henderson     if (!FIELD_EX64(gpccr, GPCCR, GPC)) {
32246f38c97SRichard Henderson         return true;
32346f38c97SRichard Henderson     }
32446f38c97SRichard Henderson 
32546f38c97SRichard Henderson     /*
32646f38c97SRichard Henderson      * GPC Priority 1 (R_GMGRR):
32746f38c97SRichard Henderson      * R_JWCSM: If the configuration of GPCCR_EL3 is invalid,
32846f38c97SRichard Henderson      * the access fails as GPT walk fault at level 0.
32946f38c97SRichard Henderson      */
33046f38c97SRichard Henderson 
33146f38c97SRichard Henderson     /*
33246f38c97SRichard Henderson      * Configuration of PPS to a value exceeding the implemented
33346f38c97SRichard Henderson      * physical address size is invalid.
33446f38c97SRichard Henderson      */
33546f38c97SRichard Henderson     pps = FIELD_EX64(gpccr, GPCCR, PPS);
33646f38c97SRichard Henderson     if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
33746f38c97SRichard Henderson         goto fault_walk;
33846f38c97SRichard Henderson     }
33946f38c97SRichard Henderson     pps = pamax_map[pps];
34046f38c97SRichard Henderson     pps_mask = MAKE_64BIT_MASK(0, pps);
34146f38c97SRichard Henderson 
34246f38c97SRichard Henderson     switch (FIELD_EX64(gpccr, GPCCR, SH)) {
34346f38c97SRichard Henderson     case 0b10: /* outer shareable */
34446f38c97SRichard Henderson         break;
34546f38c97SRichard Henderson     case 0b00: /* non-shareable */
34646f38c97SRichard Henderson     case 0b11: /* inner shareable */
34746f38c97SRichard Henderson         /* Inner and Outer non-cacheable requires Outer shareable. */
34846f38c97SRichard Henderson         if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 &&
34946f38c97SRichard Henderson             FIELD_EX64(gpccr, GPCCR, IRGN) == 0) {
35046f38c97SRichard Henderson             goto fault_walk;
35146f38c97SRichard Henderson         }
35246f38c97SRichard Henderson         break;
35346f38c97SRichard Henderson     default:   /* reserved */
35446f38c97SRichard Henderson         goto fault_walk;
35546f38c97SRichard Henderson     }
35646f38c97SRichard Henderson 
35746f38c97SRichard Henderson     switch (FIELD_EX64(gpccr, GPCCR, PGS)) {
35846f38c97SRichard Henderson     case 0b00: /* 4KB */
35946f38c97SRichard Henderson         pgs = 12;
36046f38c97SRichard Henderson         break;
36146f38c97SRichard Henderson     case 0b01: /* 64KB */
36246f38c97SRichard Henderson         pgs = 16;
36346f38c97SRichard Henderson         break;
36446f38c97SRichard Henderson     case 0b10: /* 16KB */
36546f38c97SRichard Henderson         pgs = 14;
36646f38c97SRichard Henderson         break;
36746f38c97SRichard Henderson     default: /* reserved */
36846f38c97SRichard Henderson         goto fault_walk;
36946f38c97SRichard Henderson     }
37046f38c97SRichard Henderson 
37146f38c97SRichard Henderson     /* Note this field is read-only and fixed at reset. */
37246f38c97SRichard Henderson     l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
37346f38c97SRichard Henderson 
37446f38c97SRichard Henderson     /*
37546f38c97SRichard Henderson      * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
37646f38c97SRichard Henderson      * R_CPDSB: A NonSecure physical address input exceeding PPS
37746f38c97SRichard Henderson      * does not experience any fault.
37846f38c97SRichard Henderson      */
37946f38c97SRichard Henderson     if (paddress & ~pps_mask) {
38046f38c97SRichard Henderson         if (pspace == ARMSS_NonSecure) {
38146f38c97SRichard Henderson             return true;
38246f38c97SRichard Henderson         }
38346f38c97SRichard Henderson         goto fault_size;
38446f38c97SRichard Henderson     }
38546f38c97SRichard Henderson 
38646f38c97SRichard Henderson     /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
38746f38c97SRichard Henderson     tableaddr = env->cp15.gptbr_el3 << 12;
38846f38c97SRichard Henderson     if (tableaddr & ~pps_mask) {
38946f38c97SRichard Henderson         goto fault_size;
39046f38c97SRichard Henderson     }
39146f38c97SRichard Henderson 
39246f38c97SRichard Henderson     /*
39346f38c97SRichard Henderson      * BADDR is aligned per a function of PPS and L0GPTSZ.
39446f38c97SRichard Henderson      * These bits of GPTBR_EL3 are RES0, but are not a configuration error,
39546f38c97SRichard Henderson      * unlike the RES0 bits of the GPT entries (R_XNKFZ).
39646f38c97SRichard Henderson      */
39746f38c97SRichard Henderson     align = MAX(pps - l0gptsz + 3, 12);
39846f38c97SRichard Henderson     align = MAKE_64BIT_MASK(0, align);
39946f38c97SRichard Henderson     tableaddr &= ~align;
40046f38c97SRichard Henderson 
40146f38c97SRichard Henderson     as = arm_addressspace(env_cpu(env), attrs);
40246f38c97SRichard Henderson 
40346f38c97SRichard Henderson     /* Level 0 lookup. */
40446f38c97SRichard Henderson     index = extract64(paddress, l0gptsz, pps - l0gptsz);
40546f38c97SRichard Henderson     tableaddr += index * 8;
40646f38c97SRichard Henderson     entry = address_space_ldq_le(as, tableaddr, attrs, &result);
40746f38c97SRichard Henderson     if (result != MEMTX_OK) {
40846f38c97SRichard Henderson         goto fault_eabt;
40946f38c97SRichard Henderson     }
41046f38c97SRichard Henderson 
41146f38c97SRichard Henderson     switch (extract32(entry, 0, 4)) {
41246f38c97SRichard Henderson     case 1: /* block descriptor */
41346f38c97SRichard Henderson         if (entry >> 8) {
41446f38c97SRichard Henderson             goto fault_walk; /* RES0 bits not 0 */
41546f38c97SRichard Henderson         }
41646f38c97SRichard Henderson         gpi = extract32(entry, 4, 4);
41746f38c97SRichard Henderson         goto found;
41846f38c97SRichard Henderson     case 3: /* table descriptor */
41946f38c97SRichard Henderson         tableaddr = entry & ~0xf;
42046f38c97SRichard Henderson         align = MAX(l0gptsz - pgs - 1, 12);
42146f38c97SRichard Henderson         align = MAKE_64BIT_MASK(0, align);
42246f38c97SRichard Henderson         if (tableaddr & (~pps_mask | align)) {
42346f38c97SRichard Henderson             goto fault_walk; /* RES0 bits not 0 */
42446f38c97SRichard Henderson         }
42546f38c97SRichard Henderson         break;
42646f38c97SRichard Henderson     default: /* invalid */
42746f38c97SRichard Henderson         goto fault_walk;
42846f38c97SRichard Henderson     }
42946f38c97SRichard Henderson 
43046f38c97SRichard Henderson     /* Level 1 lookup */
43146f38c97SRichard Henderson     level = 1;
43246f38c97SRichard Henderson     index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4);
43346f38c97SRichard Henderson     tableaddr += index * 8;
43446f38c97SRichard Henderson     entry = address_space_ldq_le(as, tableaddr, attrs, &result);
43546f38c97SRichard Henderson     if (result != MEMTX_OK) {
43646f38c97SRichard Henderson         goto fault_eabt;
43746f38c97SRichard Henderson     }
43846f38c97SRichard Henderson 
43946f38c97SRichard Henderson     switch (extract32(entry, 0, 4)) {
44046f38c97SRichard Henderson     case 1: /* contiguous descriptor */
44146f38c97SRichard Henderson         if (entry >> 10) {
44246f38c97SRichard Henderson             goto fault_walk; /* RES0 bits not 0 */
44346f38c97SRichard Henderson         }
44446f38c97SRichard Henderson         /*
44546f38c97SRichard Henderson          * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE,
44646f38c97SRichard Henderson          * and because we cannot invalidate by pa, and thus will always
44746f38c97SRichard Henderson          * flush entire tlbs, we don't actually care about the range here
44846f38c97SRichard Henderson          * and can simply extract the GPI as the result.
44946f38c97SRichard Henderson          */
45046f38c97SRichard Henderson         if (extract32(entry, 8, 2) == 0) {
45146f38c97SRichard Henderson             goto fault_walk; /* reserved contig */
45246f38c97SRichard Henderson         }
45346f38c97SRichard Henderson         gpi = extract32(entry, 4, 4);
45446f38c97SRichard Henderson         break;
45546f38c97SRichard Henderson     default:
45646f38c97SRichard Henderson         index = extract64(paddress, pgs, 4);
45746f38c97SRichard Henderson         gpi = extract64(entry, index * 4, 4);
45846f38c97SRichard Henderson         break;
45946f38c97SRichard Henderson     }
46046f38c97SRichard Henderson 
46146f38c97SRichard Henderson  found:
46246f38c97SRichard Henderson     switch (gpi) {
46346f38c97SRichard Henderson     case 0b0000: /* no access */
46446f38c97SRichard Henderson         break;
46546f38c97SRichard Henderson     case 0b1111: /* all access */
46646f38c97SRichard Henderson         return true;
46746f38c97SRichard Henderson     case 0b1000:
46846f38c97SRichard Henderson     case 0b1001:
46946f38c97SRichard Henderson     case 0b1010:
47046f38c97SRichard Henderson     case 0b1011:
47146f38c97SRichard Henderson         if (pspace == (gpi & 3)) {
47246f38c97SRichard Henderson             return true;
47346f38c97SRichard Henderson         }
47446f38c97SRichard Henderson         break;
47546f38c97SRichard Henderson     default:
47646f38c97SRichard Henderson         goto fault_walk; /* reserved */
47746f38c97SRichard Henderson     }
47846f38c97SRichard Henderson 
47946f38c97SRichard Henderson     fi->gpcf = GPCF_Fail;
48046f38c97SRichard Henderson     goto fault_common;
48146f38c97SRichard Henderson  fault_eabt:
48246f38c97SRichard Henderson     fi->gpcf = GPCF_EABT;
48346f38c97SRichard Henderson     goto fault_common;
48446f38c97SRichard Henderson  fault_size:
48546f38c97SRichard Henderson     fi->gpcf = GPCF_AddressSize;
48646f38c97SRichard Henderson     goto fault_common;
48746f38c97SRichard Henderson  fault_walk:
48846f38c97SRichard Henderson     fi->gpcf = GPCF_Walk;
48946f38c97SRichard Henderson  fault_common:
49046f38c97SRichard Henderson     fi->level = level;
49146f38c97SRichard Henderson     fi->paddr = paddress;
49246f38c97SRichard Henderson     fi->paddr_space = pspace;
49346f38c97SRichard Henderson     return false;
49446f38c97SRichard Henderson }
49546f38c97SRichard Henderson 
S1_attrs_are_device(uint8_t attrs)496728b923fSRichard Henderson static bool S1_attrs_are_device(uint8_t attrs)
497728b923fSRichard Henderson {
498728b923fSRichard Henderson     /*
499728b923fSRichard Henderson      * This slightly under-decodes the MAIR_ELx field:
500728b923fSRichard Henderson      * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE;
501728b923fSRichard Henderson      * 0b0000dd1x is UNPREDICTABLE.
502728b923fSRichard Henderson      */
503728b923fSRichard Henderson     return (attrs & 0xf0) == 0;
504728b923fSRichard Henderson }
505728b923fSRichard Henderson 
S2_attrs_are_device(uint64_t hcr,uint8_t attrs)506f3639a64SRichard Henderson static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs)
50711552bb0SRichard Henderson {
50811552bb0SRichard Henderson     /*
50911552bb0SRichard Henderson      * For an S1 page table walk, the stage 1 attributes are always
51011552bb0SRichard Henderson      * some form of "this is Normal memory". The combined S1+S2
51111552bb0SRichard Henderson      * attributes are therefore only Device if stage 2 specifies Device.
51211552bb0SRichard Henderson      * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
51311552bb0SRichard Henderson      * ie when cacheattrs.attrs bits [3:2] are 0b00.
51411552bb0SRichard Henderson      * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
51511552bb0SRichard Henderson      * when cacheattrs.attrs bit [2] is 0.
51611552bb0SRichard Henderson      */
517ac76c2e5SRichard Henderson     if (hcr & HCR_FWB) {
518f3639a64SRichard Henderson         return (attrs & 0x4) == 0;
51911552bb0SRichard Henderson     } else {
520f3639a64SRichard Henderson         return (attrs & 0xc) == 0;
52111552bb0SRichard Henderson     }
52211552bb0SRichard Henderson }
52311552bb0SRichard Henderson 
S2_security_space(ARMSecuritySpace s1_space,ARMMMUIdx s2_mmu_idx)5243f74da44SPeter Maydell static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
5253f74da44SPeter Maydell                                           ARMMMUIdx s2_mmu_idx)
5263f74da44SPeter Maydell {
5273f74da44SPeter Maydell     /*
5283f74da44SPeter Maydell      * Return the security space to use for stage 2 when doing
5293f74da44SPeter Maydell      * the S1 page table descriptor load.
5303f74da44SPeter Maydell      */
5313f74da44SPeter Maydell     if (regime_is_stage2(s2_mmu_idx)) {
5323f74da44SPeter Maydell         /*
5333f74da44SPeter Maydell          * The security space for ptw reads is almost always the same
5343f74da44SPeter Maydell          * as that of the security space of the stage 1 translation.
5353f74da44SPeter Maydell          * The only exception is when stage 1 is Secure; in that case
5363f74da44SPeter Maydell          * the ptw read might be to the Secure or the NonSecure space
5373f74da44SPeter Maydell          * (but never Realm or Root), and the s2_mmu_idx tells us which.
5383f74da44SPeter Maydell          * Root translations are always single-stage.
5393f74da44SPeter Maydell          */
5403f74da44SPeter Maydell         if (s1_space == ARMSS_Secure) {
5413f74da44SPeter Maydell             return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S);
5423f74da44SPeter Maydell         } else {
5433f74da44SPeter Maydell             assert(s2_mmu_idx != ARMMMUIdx_Stage2_S);
5443f74da44SPeter Maydell             assert(s1_space != ARMSS_Root);
5453f74da44SPeter Maydell             return s1_space;
5463f74da44SPeter Maydell         }
5473f74da44SPeter Maydell     } else {
5483f74da44SPeter Maydell         /* ptw loads are from phys: the mmu idx itself says which space */
5493f74da44SPeter Maydell         return arm_phys_to_space(s2_mmu_idx);
5503f74da44SPeter Maydell     }
5513f74da44SPeter Maydell }
5523f74da44SPeter Maydell 
fault_s1ns(ARMSecuritySpace space,ARMMMUIdx s2_mmu_idx)5534f51edd3SPeter Maydell static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx)
5544f51edd3SPeter Maydell {
5554f51edd3SPeter Maydell     /*
5564f51edd3SPeter Maydell      * For stage 2 faults in Secure EL22, S1NS indicates
5574f51edd3SPeter Maydell      * whether the faulting IPA is in the Secure or NonSecure
5584f51edd3SPeter Maydell      * IPA space. For all other kinds of fault, it is false.
5594f51edd3SPeter Maydell      */
5604f51edd3SPeter Maydell     return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx)
5614f51edd3SPeter Maydell         && s2_mmu_idx == ARMMMUIdx_Stage2_S;
5624f51edd3SPeter Maydell }
5634f51edd3SPeter Maydell 
56411552bb0SRichard Henderson /* Translate a S1 pagetable walk through S2 if needed.  */
S1_ptw_translate(CPUARMState * env,S1Translate * ptw,hwaddr addr,ARMMMUFaultInfo * fi)5656d2654ffSRichard Henderson static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
5666d2654ffSRichard Henderson                              hwaddr addr, ARMMMUFaultInfo *fi)
56711552bb0SRichard Henderson {
568f3639a64SRichard Henderson     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
56948da29e4SRichard Henderson     ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
570f3639a64SRichard Henderson     uint8_t pte_attrs;
571bf25b7b0SRichard Henderson 
57271943a1eSRichard Henderson     ptw->out_virt = addr;
57371943a1eSRichard Henderson 
574f3639a64SRichard Henderson     if (unlikely(ptw->in_debug)) {
575f3639a64SRichard Henderson         /*
576f3639a64SRichard Henderson          * From gdbstub, do not use softmmu so that we don't modify the
577f3639a64SRichard Henderson          * state of the cpu at all, including softmmu tlb contents.
578f3639a64SRichard Henderson          */
5793f74da44SPeter Maydell         ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
5806d2654ffSRichard Henderson         S1Translate s2ptw = {
5816d2654ffSRichard Henderson             .in_mmu_idx = s2_mmu_idx,
582fcc0b041SPeter Maydell             .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
5833f74da44SPeter Maydell             .in_space = s2_space,
584f3639a64SRichard Henderson             .in_debug = true,
5856d2654ffSRichard Henderson         };
586f3639a64SRichard Henderson         GetPhysAddrResult s2 = { };
58748da29e4SRichard Henderson 
5885458670bSRichard Henderson         if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) {
589f3639a64SRichard Henderson             goto fail;
590f3639a64SRichard Henderson         }
59146f38c97SRichard Henderson 
592f3639a64SRichard Henderson         ptw->out_phys = s2.f.phys_addr;
593f3639a64SRichard Henderson         pte_attrs = s2.cacheattrs.attrs;
594f3639a64SRichard Henderson         ptw->out_host = NULL;
59571943a1eSRichard Henderson         ptw->out_rw = false;
596fe4a5472SRichard Henderson         ptw->out_space = s2.f.attrs.space;
597f3639a64SRichard Henderson     } else {
5980d3de77aSFabiano Rosas #ifdef CONFIG_TCG
599f3639a64SRichard Henderson         CPUTLBEntryFull *full;
600f3639a64SRichard Henderson         int flags;
60111552bb0SRichard Henderson 
602f3639a64SRichard Henderson         env->tlb_fi = fi;
6036d03226bSAlex Bennée         flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD,
604f3639a64SRichard Henderson                                       arm_to_core_mmu_idx(s2_mmu_idx),
6056d03226bSAlex Bennée                                       &ptw->out_host, &full);
606f3639a64SRichard Henderson         env->tlb_fi = NULL;
607f3639a64SRichard Henderson 
608f3639a64SRichard Henderson         if (unlikely(flags & TLB_INVALID_MASK)) {
609f3639a64SRichard Henderson             goto fail;
610f3639a64SRichard Henderson         }
6119d2617acSRichard Henderson         ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
61271943a1eSRichard Henderson         ptw->out_rw = full->prot & PAGE_WRITE;
613a81fef4bSAnton Johansson         pte_attrs = full->extra.arm.pte_attrs;
61490c66293SRichard Henderson         ptw->out_space = full->attrs.space;
6150d3de77aSFabiano Rosas #else
6160d3de77aSFabiano Rosas         g_assert_not_reached();
6170d3de77aSFabiano Rosas #endif
61811552bb0SRichard Henderson     }
619ac76c2e5SRichard Henderson 
62048da29e4SRichard Henderson     if (regime_is_stage2(s2_mmu_idx)) {
6212d12bb96SPeter Maydell         uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
622f3639a64SRichard Henderson 
623f3639a64SRichard Henderson         if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
62411552bb0SRichard Henderson             /*
62511552bb0SRichard Henderson              * PTW set and S1 walk touched S2 Device memory:
62611552bb0SRichard Henderson              * generate Permission fault.
62711552bb0SRichard Henderson              */
62811552bb0SRichard Henderson             fi->type = ARMFault_Permission;
62911552bb0SRichard Henderson             fi->s2addr = addr;
63011552bb0SRichard Henderson             fi->stage2 = true;
63111552bb0SRichard Henderson             fi->s1ptw = true;
6324f51edd3SPeter Maydell             fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
6336d2654ffSRichard Henderson             return false;
63411552bb0SRichard Henderson         }
635f3639a64SRichard Henderson     }
63611552bb0SRichard Henderson 
637f3639a64SRichard Henderson     ptw->out_be = regime_translation_big_endian(env, mmu_idx);
6386d2654ffSRichard Henderson     return true;
639f3639a64SRichard Henderson 
640f3639a64SRichard Henderson  fail:
641f3639a64SRichard Henderson     assert(fi->type != ARMFault_None);
64246f38c97SRichard Henderson     if (fi->type == ARMFault_GPCFOnOutput) {
64346f38c97SRichard Henderson         fi->type = ARMFault_GPCFOnWalk;
64446f38c97SRichard Henderson     }
645f3639a64SRichard Henderson     fi->s2addr = addr;
646f6415660SPeter Maydell     fi->stage2 = regime_is_stage2(s2_mmu_idx);
647f6415660SPeter Maydell     fi->s1ptw = fi->stage2;
6484f51edd3SPeter Maydell     fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
649f3639a64SRichard Henderson     return false;
65011552bb0SRichard Henderson }
65111552bb0SRichard Henderson 
65211552bb0SRichard Henderson /* All loads done in the course of a page table walk go through here. */
arm_ldl_ptw(CPUARMState * env,S1Translate * ptw,ARMMMUFaultInfo * fi)65393e5b3a6SRichard Henderson static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
6546d2654ffSRichard Henderson                             ARMMMUFaultInfo *fi)
65511552bb0SRichard Henderson {
6565e79887bSRichard Henderson     CPUState *cs = env_cpu(env);
65771943a1eSRichard Henderson     void *host = ptw->out_host;
65811552bb0SRichard Henderson     uint32_t data;
65911552bb0SRichard Henderson 
66071943a1eSRichard Henderson     if (likely(host)) {
661f3639a64SRichard Henderson         /* Page tables are in RAM, and we have the host address. */
66271943a1eSRichard Henderson         data = qatomic_read((uint32_t *)host);
6634e7a2c98SRichard Henderson         if (ptw->out_be) {
66471943a1eSRichard Henderson             data = be32_to_cpu(data);
66511552bb0SRichard Henderson         } else {
66671943a1eSRichard Henderson             data = le32_to_cpu(data);
66711552bb0SRichard Henderson         }
668f3639a64SRichard Henderson     } else {
669f3639a64SRichard Henderson         /* Page tables are in MMIO. */
67090c66293SRichard Henderson         MemTxAttrs attrs = {
67190c66293SRichard Henderson             .space = ptw->out_space,
672b02f5e06SPeter Maydell             .secure = arm_space_is_secure(ptw->out_space),
67390c66293SRichard Henderson         };
674f3639a64SRichard Henderson         AddressSpace *as = arm_addressspace(cs, attrs);
675f3639a64SRichard Henderson         MemTxResult result = MEMTX_OK;
676f3639a64SRichard Henderson 
677f3639a64SRichard Henderson         if (ptw->out_be) {
678f3639a64SRichard Henderson             data = address_space_ldl_be(as, ptw->out_phys, attrs, &result);
679f3639a64SRichard Henderson         } else {
680f3639a64SRichard Henderson             data = address_space_ldl_le(as, ptw->out_phys, attrs, &result);
68111552bb0SRichard Henderson         }
682f3639a64SRichard Henderson         if (unlikely(result != MEMTX_OK)) {
68311552bb0SRichard Henderson             fi->type = ARMFault_SyncExternalOnWalk;
68411552bb0SRichard Henderson             fi->ea = arm_extabort_type(result);
68511552bb0SRichard Henderson             return 0;
68611552bb0SRichard Henderson         }
687f3639a64SRichard Henderson     }
688f3639a64SRichard Henderson     return data;
689f3639a64SRichard Henderson }
69011552bb0SRichard Henderson 
arm_ldq_ptw(CPUARMState * env,S1Translate * ptw,ARMMMUFaultInfo * fi)69193e5b3a6SRichard Henderson static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
6926d2654ffSRichard Henderson                             ARMMMUFaultInfo *fi)
69311552bb0SRichard Henderson {
6945e79887bSRichard Henderson     CPUState *cs = env_cpu(env);
69571943a1eSRichard Henderson     void *host = ptw->out_host;
69611552bb0SRichard Henderson     uint64_t data;
69711552bb0SRichard Henderson 
69871943a1eSRichard Henderson     if (likely(host)) {
699f3639a64SRichard Henderson         /* Page tables are in RAM, and we have the host address. */
70071943a1eSRichard Henderson #ifdef CONFIG_ATOMIC64
70171943a1eSRichard Henderson         data = qatomic_read__nocheck((uint64_t *)host);
7024e7a2c98SRichard Henderson         if (ptw->out_be) {
70371943a1eSRichard Henderson             data = be64_to_cpu(data);
70411552bb0SRichard Henderson         } else {
70571943a1eSRichard Henderson             data = le64_to_cpu(data);
70611552bb0SRichard Henderson         }
70771943a1eSRichard Henderson #else
70871943a1eSRichard Henderson         if (ptw->out_be) {
70971943a1eSRichard Henderson             data = ldq_be_p(host);
71071943a1eSRichard Henderson         } else {
71171943a1eSRichard Henderson             data = ldq_le_p(host);
71271943a1eSRichard Henderson         }
71371943a1eSRichard Henderson #endif
714f3639a64SRichard Henderson     } else {
715f3639a64SRichard Henderson         /* Page tables are in MMIO. */
71690c66293SRichard Henderson         MemTxAttrs attrs = {
71790c66293SRichard Henderson             .space = ptw->out_space,
718b02f5e06SPeter Maydell             .secure = arm_space_is_secure(ptw->out_space),
71990c66293SRichard Henderson         };
720f3639a64SRichard Henderson         AddressSpace *as = arm_addressspace(cs, attrs);
721f3639a64SRichard Henderson         MemTxResult result = MEMTX_OK;
722f3639a64SRichard Henderson 
723f3639a64SRichard Henderson         if (ptw->out_be) {
724f3639a64SRichard Henderson             data = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
725f3639a64SRichard Henderson         } else {
726f3639a64SRichard Henderson             data = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
72711552bb0SRichard Henderson         }
728f3639a64SRichard Henderson         if (unlikely(result != MEMTX_OK)) {
72911552bb0SRichard Henderson             fi->type = ARMFault_SyncExternalOnWalk;
73011552bb0SRichard Henderson             fi->ea = arm_extabort_type(result);
73111552bb0SRichard Henderson             return 0;
73211552bb0SRichard Henderson         }
733f3639a64SRichard Henderson     }
734f3639a64SRichard Henderson     return data;
735f3639a64SRichard Henderson }
73611552bb0SRichard Henderson 
arm_casq_ptw(CPUARMState * env,uint64_t old_val,uint64_t new_val,S1Translate * ptw,ARMMMUFaultInfo * fi)73771943a1eSRichard Henderson static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
73871943a1eSRichard Henderson                              uint64_t new_val, S1Translate *ptw,
73971943a1eSRichard Henderson                              ARMMMUFaultInfo *fi)
74071943a1eSRichard Henderson {
741465af4dbSAlex Bennée #if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
74271943a1eSRichard Henderson     uint64_t cur_val;
74371943a1eSRichard Henderson     void *host = ptw->out_host;
74471943a1eSRichard Henderson 
74571943a1eSRichard Henderson     if (unlikely(!host)) {
7467421ddc4SJonathan Cameron         /* Page table in MMIO Memory Region */
7477421ddc4SJonathan Cameron         CPUState *cs = env_cpu(env);
7487421ddc4SJonathan Cameron         MemTxAttrs attrs = {
7497421ddc4SJonathan Cameron             .space = ptw->out_space,
7507421ddc4SJonathan Cameron             .secure = arm_space_is_secure(ptw->out_space),
7517421ddc4SJonathan Cameron         };
7527421ddc4SJonathan Cameron         AddressSpace *as = arm_addressspace(cs, attrs);
7537421ddc4SJonathan Cameron         MemTxResult result = MEMTX_OK;
7547421ddc4SJonathan Cameron         bool need_lock = !bql_locked();
7557421ddc4SJonathan Cameron 
7567421ddc4SJonathan Cameron         if (need_lock) {
7577421ddc4SJonathan Cameron             bql_lock();
7587421ddc4SJonathan Cameron         }
7597421ddc4SJonathan Cameron         if (ptw->out_be) {
7607421ddc4SJonathan Cameron             cur_val = address_space_ldq_be(as, ptw->out_phys, attrs, &result);
7617421ddc4SJonathan Cameron             if (unlikely(result != MEMTX_OK)) {
7627421ddc4SJonathan Cameron                 fi->type = ARMFault_SyncExternalOnWalk;
7637421ddc4SJonathan Cameron                 fi->ea = arm_extabort_type(result);
7647421ddc4SJonathan Cameron                 if (need_lock) {
7657421ddc4SJonathan Cameron                     bql_unlock();
7667421ddc4SJonathan Cameron                 }
7677421ddc4SJonathan Cameron                 return old_val;
7687421ddc4SJonathan Cameron             }
7697421ddc4SJonathan Cameron             if (cur_val == old_val) {
7707421ddc4SJonathan Cameron                 address_space_stq_be(as, ptw->out_phys, new_val, attrs, &result);
7717421ddc4SJonathan Cameron                 if (unlikely(result != MEMTX_OK)) {
7727421ddc4SJonathan Cameron                     fi->type = ARMFault_SyncExternalOnWalk;
7737421ddc4SJonathan Cameron                     fi->ea = arm_extabort_type(result);
7747421ddc4SJonathan Cameron                     if (need_lock) {
7757421ddc4SJonathan Cameron                         bql_unlock();
7767421ddc4SJonathan Cameron                     }
7777421ddc4SJonathan Cameron                     return old_val;
7787421ddc4SJonathan Cameron                 }
7797421ddc4SJonathan Cameron                 cur_val = new_val;
7807421ddc4SJonathan Cameron             }
7817421ddc4SJonathan Cameron         } else {
7827421ddc4SJonathan Cameron             cur_val = address_space_ldq_le(as, ptw->out_phys, attrs, &result);
7837421ddc4SJonathan Cameron             if (unlikely(result != MEMTX_OK)) {
7847421ddc4SJonathan Cameron                 fi->type = ARMFault_SyncExternalOnWalk;
7857421ddc4SJonathan Cameron                 fi->ea = arm_extabort_type(result);
7867421ddc4SJonathan Cameron                 if (need_lock) {
7877421ddc4SJonathan Cameron                     bql_unlock();
7887421ddc4SJonathan Cameron                 }
7897421ddc4SJonathan Cameron                 return old_val;
7907421ddc4SJonathan Cameron             }
7917421ddc4SJonathan Cameron             if (cur_val == old_val) {
7927421ddc4SJonathan Cameron                 address_space_stq_le(as, ptw->out_phys, new_val, attrs, &result);
7937421ddc4SJonathan Cameron                 if (unlikely(result != MEMTX_OK)) {
7947421ddc4SJonathan Cameron                     fi->type = ARMFault_SyncExternalOnWalk;
7957421ddc4SJonathan Cameron                     fi->ea = arm_extabort_type(result);
7967421ddc4SJonathan Cameron                     if (need_lock) {
7977421ddc4SJonathan Cameron                         bql_unlock();
7987421ddc4SJonathan Cameron                     }
7997421ddc4SJonathan Cameron                     return old_val;
8007421ddc4SJonathan Cameron                 }
8017421ddc4SJonathan Cameron                 cur_val = new_val;
8027421ddc4SJonathan Cameron             }
8037421ddc4SJonathan Cameron         }
8047421ddc4SJonathan Cameron         if (need_lock) {
8057421ddc4SJonathan Cameron             bql_unlock();
8067421ddc4SJonathan Cameron         }
8077421ddc4SJonathan Cameron         return cur_val;
80871943a1eSRichard Henderson     }
80971943a1eSRichard Henderson 
81071943a1eSRichard Henderson     /*
81171943a1eSRichard Henderson      * Raising a stage2 Protection fault for an atomic update to a read-only
81271943a1eSRichard Henderson      * page is delayed until it is certain that there is a change to make.
81371943a1eSRichard Henderson      */
81471943a1eSRichard Henderson     if (unlikely(!ptw->out_rw)) {
81571943a1eSRichard Henderson         int flags;
81671943a1eSRichard Henderson 
81771943a1eSRichard Henderson         env->tlb_fi = fi;
8186d03226bSAlex Bennée         flags = probe_access_full_mmu(env, ptw->out_virt, 0,
8196d03226bSAlex Bennée                                       MMU_DATA_STORE,
82071943a1eSRichard Henderson                                       arm_to_core_mmu_idx(ptw->in_ptw_idx),
8216d03226bSAlex Bennée                                       NULL, NULL);
82271943a1eSRichard Henderson         env->tlb_fi = NULL;
82371943a1eSRichard Henderson 
82471943a1eSRichard Henderson         if (unlikely(flags & TLB_INVALID_MASK)) {
825f6415660SPeter Maydell             /*
826f6415660SPeter Maydell              * We know this must be a stage 2 fault because the granule
827f6415660SPeter Maydell              * protection table does not separately track read and write
828f6415660SPeter Maydell              * permission, so all GPC faults are caught in S1_ptw_translate():
829f6415660SPeter Maydell              * we only get here for "readable but not writeable".
830f6415660SPeter Maydell              */
83171943a1eSRichard Henderson             assert(fi->type != ARMFault_None);
83271943a1eSRichard Henderson             fi->s2addr = ptw->out_virt;
83371943a1eSRichard Henderson             fi->stage2 = true;
83471943a1eSRichard Henderson             fi->s1ptw = true;
8354f51edd3SPeter Maydell             fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx);
83671943a1eSRichard Henderson             return 0;
83771943a1eSRichard Henderson         }
83871943a1eSRichard Henderson 
83971943a1eSRichard Henderson         /* In case CAS mismatches and we loop, remember writability. */
84071943a1eSRichard Henderson         ptw->out_rw = true;
84171943a1eSRichard Henderson     }
84271943a1eSRichard Henderson 
84371943a1eSRichard Henderson #ifdef CONFIG_ATOMIC64
84471943a1eSRichard Henderson     if (ptw->out_be) {
84571943a1eSRichard Henderson         old_val = cpu_to_be64(old_val);
84671943a1eSRichard Henderson         new_val = cpu_to_be64(new_val);
84771943a1eSRichard Henderson         cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
84871943a1eSRichard Henderson         cur_val = be64_to_cpu(cur_val);
84971943a1eSRichard Henderson     } else {
85071943a1eSRichard Henderson         old_val = cpu_to_le64(old_val);
85171943a1eSRichard Henderson         new_val = cpu_to_le64(new_val);
85271943a1eSRichard Henderson         cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val);
85371943a1eSRichard Henderson         cur_val = le64_to_cpu(cur_val);
85471943a1eSRichard Henderson     }
85571943a1eSRichard Henderson #else
85671943a1eSRichard Henderson     /*
85771943a1eSRichard Henderson      * We can't support the full 64-bit atomic cmpxchg on the host.
85871943a1eSRichard Henderson      * Because this is only used for FEAT_HAFDBS, which is only for AA64,
85971943a1eSRichard Henderson      * we know that TCG_OVERSIZED_GUEST is set, which means that we are
86071943a1eSRichard Henderson      * running in round-robin mode and could only race with dma i/o.
86171943a1eSRichard Henderson      */
862d3ae5f5dSRichard Henderson #if !TCG_OVERSIZED_GUEST
86371943a1eSRichard Henderson # error "Unexpected configuration"
86471943a1eSRichard Henderson #endif
865195801d7SStefan Hajnoczi     bool locked = bql_locked();
86671943a1eSRichard Henderson     if (!locked) {
867195801d7SStefan Hajnoczi         bql_lock();
86871943a1eSRichard Henderson     }
86971943a1eSRichard Henderson     if (ptw->out_be) {
87071943a1eSRichard Henderson         cur_val = ldq_be_p(host);
87171943a1eSRichard Henderson         if (cur_val == old_val) {
87271943a1eSRichard Henderson             stq_be_p(host, new_val);
87371943a1eSRichard Henderson         }
87471943a1eSRichard Henderson     } else {
87571943a1eSRichard Henderson         cur_val = ldq_le_p(host);
87671943a1eSRichard Henderson         if (cur_val == old_val) {
87771943a1eSRichard Henderson             stq_le_p(host, new_val);
87871943a1eSRichard Henderson         }
87971943a1eSRichard Henderson     }
88071943a1eSRichard Henderson     if (!locked) {
881195801d7SStefan Hajnoczi         bql_unlock();
88271943a1eSRichard Henderson     }
88371943a1eSRichard Henderson #endif
88471943a1eSRichard Henderson 
88571943a1eSRichard Henderson     return cur_val;
886d3ae5f5dSRichard Henderson #else
887465af4dbSAlex Bennée     /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */
888d3ae5f5dSRichard Henderson     g_assert_not_reached();
889d3ae5f5dSRichard Henderson #endif
89071943a1eSRichard Henderson }
89171943a1eSRichard Henderson 
get_level1_table_address(CPUARMState * env,ARMMMUIdx mmu_idx,uint32_t * table,uint32_t address)8924c74ab15SRichard Henderson static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
8934c74ab15SRichard Henderson                                      uint32_t *table, uint32_t address)
8944c74ab15SRichard Henderson {
8954c74ab15SRichard Henderson     /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
896c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
8979e70e26cSPeter Maydell     int maskshift = extract32(tcr, 0, 3);
8989e70e26cSPeter Maydell     uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift);
8999e70e26cSPeter Maydell     uint32_t base_mask;
9004c74ab15SRichard Henderson 
9019e70e26cSPeter Maydell     if (address & mask) {
9029e70e26cSPeter Maydell         if (tcr & TTBCR_PD1) {
9034c74ab15SRichard Henderson             /* Translation table walk disabled for TTBR1 */
9044c74ab15SRichard Henderson             return false;
9054c74ab15SRichard Henderson         }
9064c74ab15SRichard Henderson         *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
9074c74ab15SRichard Henderson     } else {
9089e70e26cSPeter Maydell         if (tcr & TTBCR_PD0) {
9094c74ab15SRichard Henderson             /* Translation table walk disabled for TTBR0 */
9104c74ab15SRichard Henderson             return false;
9114c74ab15SRichard Henderson         }
9129e70e26cSPeter Maydell         base_mask = ~((uint32_t)0x3fffu >> maskshift);
9139e70e26cSPeter Maydell         *table = regime_ttbr(env, mmu_idx, 0) & base_mask;
9144c74ab15SRichard Henderson     }
9154c74ab15SRichard Henderson     *table |= (address >> 18) & 0x3ffc;
9164c74ab15SRichard Henderson     return true;
9174c74ab15SRichard Henderson }
9184c74ab15SRichard Henderson 
9194845d3beSRichard Henderson /*
9204845d3beSRichard Henderson  * Translate section/page access permissions to page R/W protection flags
9214845d3beSRichard Henderson  * @env:         CPUARMState
9224845d3beSRichard Henderson  * @mmu_idx:     MMU index indicating required translation regime
9234845d3beSRichard Henderson  * @ap:          The 3-bit access permissions (AP[2:0])
9244845d3beSRichard Henderson  * @domain_prot: The 2-bit domain access permissions
9256f2d9d74STimofey Kutergin  * @is_user: TRUE if accessing from PL0
9264845d3beSRichard Henderson  */
ap_to_rw_prot_is_user(CPUARMState * env,ARMMMUIdx mmu_idx,int ap,int domain_prot,bool is_user)9276f2d9d74STimofey Kutergin static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx,
9286f2d9d74STimofey Kutergin                          int ap, int domain_prot, bool is_user)
9294845d3beSRichard Henderson {
9304845d3beSRichard Henderson     if (domain_prot == 3) {
9314845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
9324845d3beSRichard Henderson     }
9334845d3beSRichard Henderson 
9344845d3beSRichard Henderson     switch (ap) {
9354845d3beSRichard Henderson     case 0:
9364845d3beSRichard Henderson         if (arm_feature(env, ARM_FEATURE_V7)) {
9374845d3beSRichard Henderson             return 0;
9384845d3beSRichard Henderson         }
9394845d3beSRichard Henderson         switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
9404845d3beSRichard Henderson         case SCTLR_S:
9414845d3beSRichard Henderson             return is_user ? 0 : PAGE_READ;
9424845d3beSRichard Henderson         case SCTLR_R:
9434845d3beSRichard Henderson             return PAGE_READ;
9444845d3beSRichard Henderson         default:
9454845d3beSRichard Henderson             return 0;
9464845d3beSRichard Henderson         }
9474845d3beSRichard Henderson     case 1:
9484845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
9494845d3beSRichard Henderson     case 2:
9504845d3beSRichard Henderson         if (is_user) {
9514845d3beSRichard Henderson             return PAGE_READ;
9524845d3beSRichard Henderson         } else {
9534845d3beSRichard Henderson             return PAGE_READ | PAGE_WRITE;
9544845d3beSRichard Henderson         }
9554845d3beSRichard Henderson     case 3:
9564845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
9574845d3beSRichard Henderson     case 4: /* Reserved.  */
9584845d3beSRichard Henderson         return 0;
9594845d3beSRichard Henderson     case 5:
9604845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ;
9614845d3beSRichard Henderson     case 6:
9624845d3beSRichard Henderson         return PAGE_READ;
9634845d3beSRichard Henderson     case 7:
9644845d3beSRichard Henderson         if (!arm_feature(env, ARM_FEATURE_V6K)) {
9654845d3beSRichard Henderson             return 0;
9664845d3beSRichard Henderson         }
9674845d3beSRichard Henderson         return PAGE_READ;
9684845d3beSRichard Henderson     default:
9694845d3beSRichard Henderson         g_assert_not_reached();
9704845d3beSRichard Henderson     }
9714845d3beSRichard Henderson }
9724845d3beSRichard Henderson 
9734845d3beSRichard Henderson /*
9746f2d9d74STimofey Kutergin  * Translate section/page access permissions to page R/W protection flags
9756f2d9d74STimofey Kutergin  * @env:         CPUARMState
9766f2d9d74STimofey Kutergin  * @mmu_idx:     MMU index indicating required translation regime
9776f2d9d74STimofey Kutergin  * @ap:          The 3-bit access permissions (AP[2:0])
9786f2d9d74STimofey Kutergin  * @domain_prot: The 2-bit domain access permissions
9796f2d9d74STimofey Kutergin  */
ap_to_rw_prot(CPUARMState * env,ARMMMUIdx mmu_idx,int ap,int domain_prot)9806f2d9d74STimofey Kutergin static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
9816f2d9d74STimofey Kutergin                          int ap, int domain_prot)
9826f2d9d74STimofey Kutergin {
9836f2d9d74STimofey Kutergin    return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
9846f2d9d74STimofey Kutergin                                 regime_is_user(env, mmu_idx));
9856f2d9d74STimofey Kutergin }
9866f2d9d74STimofey Kutergin 
9876f2d9d74STimofey Kutergin /*
9884845d3beSRichard Henderson  * Translate section/page access permissions to page R/W protection flags.
9894845d3beSRichard Henderson  * @ap:      The 2-bit simple AP (AP[2:1])
9904845d3beSRichard Henderson  * @is_user: TRUE if accessing from PL0
9914845d3beSRichard Henderson  */
simple_ap_to_rw_prot_is_user(int ap,bool is_user)9924845d3beSRichard Henderson static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
9934845d3beSRichard Henderson {
9944845d3beSRichard Henderson     switch (ap) {
9954845d3beSRichard Henderson     case 0:
9964845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ | PAGE_WRITE;
9974845d3beSRichard Henderson     case 1:
9984845d3beSRichard Henderson         return PAGE_READ | PAGE_WRITE;
9994845d3beSRichard Henderson     case 2:
10004845d3beSRichard Henderson         return is_user ? 0 : PAGE_READ;
10014845d3beSRichard Henderson     case 3:
10024845d3beSRichard Henderson         return PAGE_READ;
10034845d3beSRichard Henderson     default:
10044845d3beSRichard Henderson         g_assert_not_reached();
10054845d3beSRichard Henderson     }
10064845d3beSRichard Henderson }
10074845d3beSRichard Henderson 
simple_ap_to_rw_prot(CPUARMState * env,ARMMMUIdx mmu_idx,int ap)10084845d3beSRichard Henderson static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
10094845d3beSRichard Henderson {
10104845d3beSRichard Henderson     return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
10114845d3beSRichard Henderson }
10124845d3beSRichard Henderson 
get_phys_addr_v5(CPUARMState * env,S1Translate * ptw,uint32_t address,MMUAccessType access_type,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)10136d2654ffSRichard Henderson static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
10146d2654ffSRichard Henderson                              uint32_t address, MMUAccessType access_type,
10156d2654ffSRichard Henderson                              GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
1016f2d2f5ceSRichard Henderson {
1017f2d2f5ceSRichard Henderson     int level = 1;
1018f2d2f5ceSRichard Henderson     uint32_t table;
1019f2d2f5ceSRichard Henderson     uint32_t desc;
1020f2d2f5ceSRichard Henderson     int type;
1021f2d2f5ceSRichard Henderson     int ap;
1022f2d2f5ceSRichard Henderson     int domain = 0;
1023f2d2f5ceSRichard Henderson     int domain_prot;
1024f2d2f5ceSRichard Henderson     hwaddr phys_addr;
1025f2d2f5ceSRichard Henderson     uint32_t dacr;
1026f2d2f5ceSRichard Henderson 
1027f2d2f5ceSRichard Henderson     /* Pagetable walk.  */
1028f2d2f5ceSRichard Henderson     /* Lookup l1 descriptor.  */
10296d2654ffSRichard Henderson     if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) {
1030f2d2f5ceSRichard Henderson         /* Section translation fault if page walk is disabled by PD0 or PD1 */
1031f2d2f5ceSRichard Henderson         fi->type = ARMFault_Translation;
1032f2d2f5ceSRichard Henderson         goto do_fault;
1033f2d2f5ceSRichard Henderson     }
103493e5b3a6SRichard Henderson     if (!S1_ptw_translate(env, ptw, table, fi)) {
103593e5b3a6SRichard Henderson         goto do_fault;
103693e5b3a6SRichard Henderson     }
103793e5b3a6SRichard Henderson     desc = arm_ldl_ptw(env, ptw, fi);
1038f2d2f5ceSRichard Henderson     if (fi->type != ARMFault_None) {
1039f2d2f5ceSRichard Henderson         goto do_fault;
1040f2d2f5ceSRichard Henderson     }
1041f2d2f5ceSRichard Henderson     type = (desc & 3);
1042f2d2f5ceSRichard Henderson     domain = (desc >> 5) & 0x0f;
10436d2654ffSRichard Henderson     if (regime_el(env, ptw->in_mmu_idx) == 1) {
1044f2d2f5ceSRichard Henderson         dacr = env->cp15.dacr_ns;
1045f2d2f5ceSRichard Henderson     } else {
1046f2d2f5ceSRichard Henderson         dacr = env->cp15.dacr_s;
1047f2d2f5ceSRichard Henderson     }
1048f2d2f5ceSRichard Henderson     domain_prot = (dacr >> (domain * 2)) & 3;
1049f2d2f5ceSRichard Henderson     if (type == 0) {
1050f2d2f5ceSRichard Henderson         /* Section translation fault.  */
1051f2d2f5ceSRichard Henderson         fi->type = ARMFault_Translation;
1052f2d2f5ceSRichard Henderson         goto do_fault;
1053f2d2f5ceSRichard Henderson     }
1054f2d2f5ceSRichard Henderson     if (type != 2) {
1055f2d2f5ceSRichard Henderson         level = 2;
1056f2d2f5ceSRichard Henderson     }
1057f2d2f5ceSRichard Henderson     if (domain_prot == 0 || domain_prot == 2) {
1058f2d2f5ceSRichard Henderson         fi->type = ARMFault_Domain;
1059f2d2f5ceSRichard Henderson         goto do_fault;
1060f2d2f5ceSRichard Henderson     }
1061f2d2f5ceSRichard Henderson     if (type == 2) {
1062f2d2f5ceSRichard Henderson         /* 1Mb section.  */
1063f2d2f5ceSRichard Henderson         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1064f2d2f5ceSRichard Henderson         ap = (desc >> 10) & 3;
10657fa7ea8fSRichard Henderson         result->f.lg_page_size = 20; /* 1MB */
1066f2d2f5ceSRichard Henderson     } else {
1067f2d2f5ceSRichard Henderson         /* Lookup l2 entry.  */
1068f2d2f5ceSRichard Henderson         if (type == 1) {
1069f2d2f5ceSRichard Henderson             /* Coarse pagetable.  */
1070f2d2f5ceSRichard Henderson             table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1071f2d2f5ceSRichard Henderson         } else {
1072f2d2f5ceSRichard Henderson             /* Fine pagetable.  */
1073f2d2f5ceSRichard Henderson             table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
1074f2d2f5ceSRichard Henderson         }
107593e5b3a6SRichard Henderson         if (!S1_ptw_translate(env, ptw, table, fi)) {
107693e5b3a6SRichard Henderson             goto do_fault;
107793e5b3a6SRichard Henderson         }
107893e5b3a6SRichard Henderson         desc = arm_ldl_ptw(env, ptw, fi);
1079f2d2f5ceSRichard Henderson         if (fi->type != ARMFault_None) {
1080f2d2f5ceSRichard Henderson             goto do_fault;
1081f2d2f5ceSRichard Henderson         }
1082f2d2f5ceSRichard Henderson         switch (desc & 3) {
1083f2d2f5ceSRichard Henderson         case 0: /* Page translation fault.  */
1084f2d2f5ceSRichard Henderson             fi->type = ARMFault_Translation;
1085f2d2f5ceSRichard Henderson             goto do_fault;
1086f2d2f5ceSRichard Henderson         case 1: /* 64k page.  */
1087f2d2f5ceSRichard Henderson             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1088f2d2f5ceSRichard Henderson             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
10897fa7ea8fSRichard Henderson             result->f.lg_page_size = 16;
1090f2d2f5ceSRichard Henderson             break;
1091f2d2f5ceSRichard Henderson         case 2: /* 4k page.  */
1092f2d2f5ceSRichard Henderson             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1093f2d2f5ceSRichard Henderson             ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
10947fa7ea8fSRichard Henderson             result->f.lg_page_size = 12;
1095f2d2f5ceSRichard Henderson             break;
1096f2d2f5ceSRichard Henderson         case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
1097f2d2f5ceSRichard Henderson             if (type == 1) {
1098f2d2f5ceSRichard Henderson                 /* ARMv6/XScale extended small page format */
1099f2d2f5ceSRichard Henderson                 if (arm_feature(env, ARM_FEATURE_XSCALE)
1100f2d2f5ceSRichard Henderson                     || arm_feature(env, ARM_FEATURE_V6)) {
1101f2d2f5ceSRichard Henderson                     phys_addr = (desc & 0xfffff000) | (address & 0xfff);
11027fa7ea8fSRichard Henderson                     result->f.lg_page_size = 12;
1103f2d2f5ceSRichard Henderson                 } else {
1104f2d2f5ceSRichard Henderson                     /*
1105f2d2f5ceSRichard Henderson                      * UNPREDICTABLE in ARMv5; we choose to take a
1106f2d2f5ceSRichard Henderson                      * page translation fault.
1107f2d2f5ceSRichard Henderson                      */
1108f2d2f5ceSRichard Henderson                     fi->type = ARMFault_Translation;
1109f2d2f5ceSRichard Henderson                     goto do_fault;
1110f2d2f5ceSRichard Henderson                 }
1111f2d2f5ceSRichard Henderson             } else {
1112f2d2f5ceSRichard Henderson                 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
11137fa7ea8fSRichard Henderson                 result->f.lg_page_size = 10;
1114f2d2f5ceSRichard Henderson             }
1115f2d2f5ceSRichard Henderson             ap = (desc >> 4) & 3;
1116f2d2f5ceSRichard Henderson             break;
1117f2d2f5ceSRichard Henderson         default:
1118f2d2f5ceSRichard Henderson             /* Never happens, but compiler isn't smart enough to tell.  */
1119f2d2f5ceSRichard Henderson             g_assert_not_reached();
1120f2d2f5ceSRichard Henderson         }
1121f2d2f5ceSRichard Henderson     }
11226d2654ffSRichard Henderson     result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
11237fa7ea8fSRichard Henderson     result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
11247fa7ea8fSRichard Henderson     if (!(result->f.prot & (1 << access_type))) {
1125f2d2f5ceSRichard Henderson         /* Access permission fault.  */
1126f2d2f5ceSRichard Henderson         fi->type = ARMFault_Permission;
1127f2d2f5ceSRichard Henderson         goto do_fault;
1128f2d2f5ceSRichard Henderson     }
11297fa7ea8fSRichard Henderson     result->f.phys_addr = phys_addr;
1130f2d2f5ceSRichard Henderson     return false;
1131f2d2f5ceSRichard Henderson do_fault:
1132f2d2f5ceSRichard Henderson     fi->domain = domain;
1133f2d2f5ceSRichard Henderson     fi->level = level;
1134f2d2f5ceSRichard Henderson     return true;
1135f2d2f5ceSRichard Henderson }
1136f2d2f5ceSRichard Henderson 
get_phys_addr_v6(CPUARMState * env,S1Translate * ptw,uint32_t address,MMUAccessType access_type,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)11376d2654ffSRichard Henderson static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
11386d2654ffSRichard Henderson                              uint32_t address, MMUAccessType access_type,
11396d2654ffSRichard Henderson                              GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
114053c038efSRichard Henderson {
114153c038efSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
11426d2654ffSRichard Henderson     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
114353c038efSRichard Henderson     int level = 1;
114453c038efSRichard Henderson     uint32_t table;
114553c038efSRichard Henderson     uint32_t desc;
114653c038efSRichard Henderson     uint32_t xn;
114753c038efSRichard Henderson     uint32_t pxn = 0;
114853c038efSRichard Henderson     int type;
114953c038efSRichard Henderson     int ap;
115053c038efSRichard Henderson     int domain = 0;
115153c038efSRichard Henderson     int domain_prot;
115253c038efSRichard Henderson     hwaddr phys_addr;
115353c038efSRichard Henderson     uint32_t dacr;
115453c038efSRichard Henderson     bool ns;
1155*0340cb6eSPavel Skripkin     ARMSecuritySpace out_space;
115653c038efSRichard Henderson 
115753c038efSRichard Henderson     /* Pagetable walk.  */
115853c038efSRichard Henderson     /* Lookup l1 descriptor.  */
115953c038efSRichard Henderson     if (!get_level1_table_address(env, mmu_idx, &table, address)) {
116053c038efSRichard Henderson         /* Section translation fault if page walk is disabled by PD0 or PD1 */
116153c038efSRichard Henderson         fi->type = ARMFault_Translation;
116253c038efSRichard Henderson         goto do_fault;
116353c038efSRichard Henderson     }
116493e5b3a6SRichard Henderson     if (!S1_ptw_translate(env, ptw, table, fi)) {
116593e5b3a6SRichard Henderson         goto do_fault;
116693e5b3a6SRichard Henderson     }
116793e5b3a6SRichard Henderson     desc = arm_ldl_ptw(env, ptw, fi);
116853c038efSRichard Henderson     if (fi->type != ARMFault_None) {
116953c038efSRichard Henderson         goto do_fault;
117053c038efSRichard Henderson     }
117153c038efSRichard Henderson     type = (desc & 3);
117253c038efSRichard Henderson     if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
117353c038efSRichard Henderson         /* Section translation fault, or attempt to use the encoding
117453c038efSRichard Henderson          * which is Reserved on implementations without PXN.
117553c038efSRichard Henderson          */
117653c038efSRichard Henderson         fi->type = ARMFault_Translation;
117753c038efSRichard Henderson         goto do_fault;
117853c038efSRichard Henderson     }
117953c038efSRichard Henderson     if ((type == 1) || !(desc & (1 << 18))) {
118053c038efSRichard Henderson         /* Page or Section.  */
118153c038efSRichard Henderson         domain = (desc >> 5) & 0x0f;
118253c038efSRichard Henderson     }
118353c038efSRichard Henderson     if (regime_el(env, mmu_idx) == 1) {
118453c038efSRichard Henderson         dacr = env->cp15.dacr_ns;
118553c038efSRichard Henderson     } else {
118653c038efSRichard Henderson         dacr = env->cp15.dacr_s;
118753c038efSRichard Henderson     }
118853c038efSRichard Henderson     if (type == 1) {
118953c038efSRichard Henderson         level = 2;
119053c038efSRichard Henderson     }
119153c038efSRichard Henderson     domain_prot = (dacr >> (domain * 2)) & 3;
119253c038efSRichard Henderson     if (domain_prot == 0 || domain_prot == 2) {
119353c038efSRichard Henderson         /* Section or Page domain fault */
119453c038efSRichard Henderson         fi->type = ARMFault_Domain;
119553c038efSRichard Henderson         goto do_fault;
119653c038efSRichard Henderson     }
119753c038efSRichard Henderson     if (type != 1) {
119853c038efSRichard Henderson         if (desc & (1 << 18)) {
119953c038efSRichard Henderson             /* Supersection.  */
120053c038efSRichard Henderson             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
120153c038efSRichard Henderson             phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
120253c038efSRichard Henderson             phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
12037fa7ea8fSRichard Henderson             result->f.lg_page_size = 24;  /* 16MB */
120453c038efSRichard Henderson         } else {
120553c038efSRichard Henderson             /* Section.  */
120653c038efSRichard Henderson             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
12077fa7ea8fSRichard Henderson             result->f.lg_page_size = 20;  /* 1MB */
120853c038efSRichard Henderson         }
120953c038efSRichard Henderson         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
121053c038efSRichard Henderson         xn = desc & (1 << 4);
121153c038efSRichard Henderson         pxn = desc & 1;
121253c038efSRichard Henderson         ns = extract32(desc, 19, 1);
121353c038efSRichard Henderson     } else {
121453c038efSRichard Henderson         if (cpu_isar_feature(aa32_pxn, cpu)) {
121553c038efSRichard Henderson             pxn = (desc >> 2) & 1;
121653c038efSRichard Henderson         }
121753c038efSRichard Henderson         ns = extract32(desc, 3, 1);
121853c038efSRichard Henderson         /* Lookup l2 entry.  */
121953c038efSRichard Henderson         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
122093e5b3a6SRichard Henderson         if (!S1_ptw_translate(env, ptw, table, fi)) {
122193e5b3a6SRichard Henderson             goto do_fault;
122293e5b3a6SRichard Henderson         }
122393e5b3a6SRichard Henderson         desc = arm_ldl_ptw(env, ptw, fi);
122453c038efSRichard Henderson         if (fi->type != ARMFault_None) {
122553c038efSRichard Henderson             goto do_fault;
122653c038efSRichard Henderson         }
122753c038efSRichard Henderson         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
122853c038efSRichard Henderson         switch (desc & 3) {
122953c038efSRichard Henderson         case 0: /* Page translation fault.  */
123053c038efSRichard Henderson             fi->type = ARMFault_Translation;
123153c038efSRichard Henderson             goto do_fault;
123253c038efSRichard Henderson         case 1: /* 64k page.  */
123353c038efSRichard Henderson             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
123453c038efSRichard Henderson             xn = desc & (1 << 15);
12357fa7ea8fSRichard Henderson             result->f.lg_page_size = 16;
123653c038efSRichard Henderson             break;
123753c038efSRichard Henderson         case 2: case 3: /* 4k page.  */
123853c038efSRichard Henderson             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
123953c038efSRichard Henderson             xn = desc & 1;
12407fa7ea8fSRichard Henderson             result->f.lg_page_size = 12;
124153c038efSRichard Henderson             break;
124253c038efSRichard Henderson         default:
124353c038efSRichard Henderson             /* Never happens, but compiler isn't smart enough to tell.  */
124453c038efSRichard Henderson             g_assert_not_reached();
124553c038efSRichard Henderson         }
124653c038efSRichard Henderson     }
1247*0340cb6eSPavel Skripkin     out_space = ptw->in_space;
1248*0340cb6eSPavel Skripkin     if (ns) {
1249*0340cb6eSPavel Skripkin         /*
1250*0340cb6eSPavel Skripkin          * The NS bit will (as required by the architecture) have no effect if
1251*0340cb6eSPavel Skripkin          * the CPU doesn't support TZ or this is a non-secure translation
1252*0340cb6eSPavel Skripkin          * regime, because the output space will already be non-secure.
1253*0340cb6eSPavel Skripkin          */
1254*0340cb6eSPavel Skripkin         out_space = ARMSS_NonSecure;
1255*0340cb6eSPavel Skripkin     }
125653c038efSRichard Henderson     if (domain_prot == 3) {
12577fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
125853c038efSRichard Henderson     } else {
1259*0340cb6eSPavel Skripkin         int user_rw, prot_rw;
126053c038efSRichard Henderson 
126153c038efSRichard Henderson         if (arm_feature(env, ARM_FEATURE_V6K) &&
126253c038efSRichard Henderson                 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
126353c038efSRichard Henderson             /* The simplified model uses AP[0] as an access control bit.  */
126453c038efSRichard Henderson             if ((ap & 1) == 0) {
126553c038efSRichard Henderson                 /* Access flag fault.  */
126653c038efSRichard Henderson                 fi->type = ARMFault_AccessFlag;
126753c038efSRichard Henderson                 goto do_fault;
126853c038efSRichard Henderson             }
1269*0340cb6eSPavel Skripkin             prot_rw = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
1270*0340cb6eSPavel Skripkin             user_rw = simple_ap_to_rw_prot_is_user(ap >> 1, 1);
127153c038efSRichard Henderson         } else {
1272*0340cb6eSPavel Skripkin             prot_rw = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
1273*0340cb6eSPavel Skripkin             user_rw = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1);
127453c038efSRichard Henderson         }
1275*0340cb6eSPavel Skripkin 
1276*0340cb6eSPavel Skripkin         result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw,
1277*0340cb6eSPavel Skripkin                                     xn, pxn, result->f.attrs.space, out_space);
12787fa7ea8fSRichard Henderson         if (!(result->f.prot & (1 << access_type))) {
127953c038efSRichard Henderson             /* Access permission fault.  */
128053c038efSRichard Henderson             fi->type = ARMFault_Permission;
128153c038efSRichard Henderson             goto do_fault;
128253c038efSRichard Henderson         }
12836f2d9d74STimofey Kutergin     }
1284*0340cb6eSPavel Skripkin     result->f.attrs.space = out_space;
1285*0340cb6eSPavel Skripkin     result->f.attrs.secure = arm_space_is_secure(out_space);
12867fa7ea8fSRichard Henderson     result->f.phys_addr = phys_addr;
128753c038efSRichard Henderson     return false;
128853c038efSRichard Henderson do_fault:
128953c038efSRichard Henderson     fi->domain = domain;
129053c038efSRichard Henderson     fi->level = level;
129153c038efSRichard Henderson     return true;
129253c038efSRichard Henderson }
129353c038efSRichard Henderson 
1294f8526edcSRichard Henderson /*
1295f8526edcSRichard Henderson  * Translate S2 section/page access permissions to protection flags
1296f8526edcSRichard Henderson  * @env:     CPUARMState
1297f8526edcSRichard Henderson  * @s2ap:    The 2-bit stage2 access permissions (S2AP)
1298f8526edcSRichard Henderson  * @xn:      XN (execute-never) bits
1299f8526edcSRichard Henderson  * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
1300f8526edcSRichard Henderson  */
get_S2prot_noexecute(int s2ap)13014a7d7702SRichard Henderson static int get_S2prot_noexecute(int s2ap)
1302f8526edcSRichard Henderson {
1303f8526edcSRichard Henderson     int prot = 0;
1304f8526edcSRichard Henderson 
1305f8526edcSRichard Henderson     if (s2ap & 1) {
1306f8526edcSRichard Henderson         prot |= PAGE_READ;
1307f8526edcSRichard Henderson     }
1308f8526edcSRichard Henderson     if (s2ap & 2) {
1309f8526edcSRichard Henderson         prot |= PAGE_WRITE;
1310f8526edcSRichard Henderson     }
13114a7d7702SRichard Henderson     return prot;
13124a7d7702SRichard Henderson }
13134a7d7702SRichard Henderson 
get_S2prot(CPUARMState * env,int s2ap,int xn,bool s1_is_el0)13144a7d7702SRichard Henderson static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
13154a7d7702SRichard Henderson {
13164a7d7702SRichard Henderson     int prot = get_S2prot_noexecute(s2ap);
1317f8526edcSRichard Henderson 
1318f8526edcSRichard Henderson     if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
1319f8526edcSRichard Henderson         switch (xn) {
1320f8526edcSRichard Henderson         case 0:
1321f8526edcSRichard Henderson             prot |= PAGE_EXEC;
1322f8526edcSRichard Henderson             break;
1323f8526edcSRichard Henderson         case 1:
1324f8526edcSRichard Henderson             if (s1_is_el0) {
1325f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
1326f8526edcSRichard Henderson             }
1327f8526edcSRichard Henderson             break;
1328f8526edcSRichard Henderson         case 2:
1329f8526edcSRichard Henderson             break;
1330f8526edcSRichard Henderson         case 3:
1331f8526edcSRichard Henderson             if (!s1_is_el0) {
1332f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
1333f8526edcSRichard Henderson             }
1334f8526edcSRichard Henderson             break;
1335f8526edcSRichard Henderson         default:
1336f8526edcSRichard Henderson             g_assert_not_reached();
1337f8526edcSRichard Henderson         }
1338f8526edcSRichard Henderson     } else {
1339f8526edcSRichard Henderson         if (!extract32(xn, 1, 1)) {
1340f8526edcSRichard Henderson             if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
1341f8526edcSRichard Henderson                 prot |= PAGE_EXEC;
1342f8526edcSRichard Henderson             }
1343f8526edcSRichard Henderson         }
1344f8526edcSRichard Henderson     }
1345f8526edcSRichard Henderson     return prot;
1346f8526edcSRichard Henderson }
1347f8526edcSRichard Henderson 
1348f8526edcSRichard Henderson /*
1349f8526edcSRichard Henderson  * Translate section/page access permissions to protection flags
1350f8526edcSRichard Henderson  * @env:     CPUARMState
1351f8526edcSRichard Henderson  * @mmu_idx: MMU index indicating required translation regime
1352f8526edcSRichard Henderson  * @is_aa64: TRUE if AArch64
13530231bdc8SPavel Skripkin  * @user_rw: Translated AP for user access
13540231bdc8SPavel Skripkin  * @prot_rw: Translated AP for privileged access
1355f8526edcSRichard Henderson  * @xn:      XN (execute-never) bit
1356f8526edcSRichard Henderson  * @pxn:     PXN (privileged execute-never) bit
13572f1ff4e7SRichard Henderson  * @in_pa:   The original input pa space
13582f1ff4e7SRichard Henderson  * @out_pa:  The output pa space, modified by NSTable, NS, and NSE
1359f8526edcSRichard Henderson  */
get_S1prot(CPUARMState * env,ARMMMUIdx mmu_idx,bool is_aa64,int user_rw,int prot_rw,int xn,int pxn,ARMSecuritySpace in_pa,ARMSecuritySpace out_pa)1360f8526edcSRichard Henderson static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
13610231bdc8SPavel Skripkin                       int user_rw, int prot_rw, int xn, int pxn,
13622f1ff4e7SRichard Henderson                       ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
1363f8526edcSRichard Henderson {
1364dd17143fSPeter Maydell     ARMCPU *cpu = env_archcpu(env);
1365f8526edcSRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
1366f8526edcSRichard Henderson     bool have_wxn;
1367f8526edcSRichard Henderson     int wxn = 0;
1368f8526edcSRichard Henderson 
1369edc05dd4SRichard Henderson     assert(!regime_is_stage2(mmu_idx));
1370f8526edcSRichard Henderson 
1371f8526edcSRichard Henderson     if (is_user) {
1372f8526edcSRichard Henderson         prot_rw = user_rw;
1373f8526edcSRichard Henderson     } else {
1374dd17143fSPeter Maydell         /*
1375dd17143fSPeter Maydell          * PAN controls can forbid data accesses but don't affect insn fetch.
1376dd17143fSPeter Maydell          * Plain PAN forbids data accesses if EL0 has data permissions;
1377dd17143fSPeter Maydell          * PAN3 forbids data accesses if EL0 has either data or exec perms.
1378dd17143fSPeter Maydell          * Note that for AArch64 the 'user can exec' case is exactly !xn.
1379dd17143fSPeter Maydell          * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
1380dd17143fSPeter Maydell          * do not affect EPAN.
1381dd17143fSPeter Maydell          */
1382f8526edcSRichard Henderson         if (user_rw && regime_is_pan(env, mmu_idx)) {
1383dd17143fSPeter Maydell             prot_rw = 0;
1384dd17143fSPeter Maydell         } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
1385dd17143fSPeter Maydell                    regime_is_pan(env, mmu_idx) &&
1386dd17143fSPeter Maydell                    (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
1387f8526edcSRichard Henderson             prot_rw = 0;
1388f8526edcSRichard Henderson         }
1389f8526edcSRichard Henderson     }
1390f8526edcSRichard Henderson 
13914a7d7702SRichard Henderson     if (in_pa != out_pa) {
13924a7d7702SRichard Henderson         switch (in_pa) {
13934a7d7702SRichard Henderson         case ARMSS_Root:
13944a7d7702SRichard Henderson             /*
13954a7d7702SRichard Henderson              * R_ZWRVD: permission fault for insn fetched from non-Root,
13964a7d7702SRichard Henderson              * I_WWBFB: SIF has no effect in EL3.
13974a7d7702SRichard Henderson              */
1398f8526edcSRichard Henderson             return prot_rw;
13994a7d7702SRichard Henderson         case ARMSS_Realm:
14004a7d7702SRichard Henderson             /*
14014a7d7702SRichard Henderson              * R_PKTDS: permission fault for insn fetched from non-Realm,
14024a7d7702SRichard Henderson              * for Realm EL2 or EL2&0.  The corresponding fault for EL1&0
14034a7d7702SRichard Henderson              * happens during any stage2 translation.
14044a7d7702SRichard Henderson              */
14054a7d7702SRichard Henderson             switch (mmu_idx) {
14064a7d7702SRichard Henderson             case ARMMMUIdx_E2:
14074a7d7702SRichard Henderson             case ARMMMUIdx_E20_0:
14084a7d7702SRichard Henderson             case ARMMMUIdx_E20_2:
14094a7d7702SRichard Henderson             case ARMMMUIdx_E20_2_PAN:
14104a7d7702SRichard Henderson                 return prot_rw;
14114a7d7702SRichard Henderson             default:
14124a7d7702SRichard Henderson                 break;
14134a7d7702SRichard Henderson             }
14144a7d7702SRichard Henderson             break;
14154a7d7702SRichard Henderson         case ARMSS_Secure:
14164a7d7702SRichard Henderson             if (env->cp15.scr_el3 & SCR_SIF) {
14174a7d7702SRichard Henderson                 return prot_rw;
14184a7d7702SRichard Henderson             }
14194a7d7702SRichard Henderson             break;
14204a7d7702SRichard Henderson         default:
14214a7d7702SRichard Henderson             /* Input NonSecure must have output NonSecure. */
14224a7d7702SRichard Henderson             g_assert_not_reached();
14234a7d7702SRichard Henderson         }
1424f8526edcSRichard Henderson     }
1425f8526edcSRichard Henderson 
1426f8526edcSRichard Henderson     /* TODO have_wxn should be replaced with
1427f8526edcSRichard Henderson      *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
1428f8526edcSRichard Henderson      * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
1429f8526edcSRichard Henderson      * compatible processors have EL2, which is required for [U]WXN.
1430f8526edcSRichard Henderson      */
1431f8526edcSRichard Henderson     have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
1432f8526edcSRichard Henderson 
1433f8526edcSRichard Henderson     if (have_wxn) {
1434f8526edcSRichard Henderson         wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
1435f8526edcSRichard Henderson     }
1436f8526edcSRichard Henderson 
1437f8526edcSRichard Henderson     if (is_aa64) {
1438f8526edcSRichard Henderson         if (regime_has_2_ranges(mmu_idx) && !is_user) {
1439f8526edcSRichard Henderson             xn = pxn || (user_rw & PAGE_WRITE);
1440f8526edcSRichard Henderson         }
1441f8526edcSRichard Henderson     } else if (arm_feature(env, ARM_FEATURE_V7)) {
1442f8526edcSRichard Henderson         switch (regime_el(env, mmu_idx)) {
1443f8526edcSRichard Henderson         case 1:
1444f8526edcSRichard Henderson         case 3:
1445f8526edcSRichard Henderson             if (is_user) {
1446f8526edcSRichard Henderson                 xn = xn || !(user_rw & PAGE_READ);
1447f8526edcSRichard Henderson             } else {
1448f8526edcSRichard Henderson                 int uwxn = 0;
1449f8526edcSRichard Henderson                 if (have_wxn) {
1450f8526edcSRichard Henderson                     uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
1451f8526edcSRichard Henderson                 }
1452f8526edcSRichard Henderson                 xn = xn || !(prot_rw & PAGE_READ) || pxn ||
1453f8526edcSRichard Henderson                      (uwxn && (user_rw & PAGE_WRITE));
1454f8526edcSRichard Henderson             }
1455f8526edcSRichard Henderson             break;
1456f8526edcSRichard Henderson         case 2:
1457f8526edcSRichard Henderson             break;
1458f8526edcSRichard Henderson         }
1459f8526edcSRichard Henderson     } else {
1460f8526edcSRichard Henderson         xn = wxn = 0;
1461f8526edcSRichard Henderson     }
1462f8526edcSRichard Henderson 
1463f8526edcSRichard Henderson     if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
1464f8526edcSRichard Henderson         return prot_rw;
1465f8526edcSRichard Henderson     }
1466f8526edcSRichard Henderson     return prot_rw | PAGE_EXEC;
1467f8526edcSRichard Henderson }
1468f8526edcSRichard Henderson 
aa32_va_parameters(CPUARMState * env,uint32_t va,ARMMMUIdx mmu_idx)14692f0ec92eSRichard Henderson static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
14702f0ec92eSRichard Henderson                                           ARMMMUIdx mmu_idx)
14712f0ec92eSRichard Henderson {
1472c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
14732f0ec92eSRichard Henderson     uint32_t el = regime_el(env, mmu_idx);
14742f0ec92eSRichard Henderson     int select, tsz;
14752f0ec92eSRichard Henderson     bool epd, hpd;
14762f0ec92eSRichard Henderson 
14772f0ec92eSRichard Henderson     assert(mmu_idx != ARMMMUIdx_Stage2_S);
14782f0ec92eSRichard Henderson 
14792f0ec92eSRichard Henderson     if (mmu_idx == ARMMMUIdx_Stage2) {
14802f0ec92eSRichard Henderson         /* VTCR */
14812f0ec92eSRichard Henderson         bool sext = extract32(tcr, 4, 1);
14822f0ec92eSRichard Henderson         bool sign = extract32(tcr, 3, 1);
14832f0ec92eSRichard Henderson 
14842f0ec92eSRichard Henderson         /*
14852f0ec92eSRichard Henderson          * If the sign-extend bit is not the same as t0sz[3], the result
14862f0ec92eSRichard Henderson          * is unpredictable. Flag this as a guest error.
14872f0ec92eSRichard Henderson          */
14882f0ec92eSRichard Henderson         if (sign != sext) {
14892f0ec92eSRichard Henderson             qemu_log_mask(LOG_GUEST_ERROR,
14902f0ec92eSRichard Henderson                           "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
14912f0ec92eSRichard Henderson         }
14922f0ec92eSRichard Henderson         tsz = sextract32(tcr, 0, 4) + 8;
14932f0ec92eSRichard Henderson         select = 0;
14942f0ec92eSRichard Henderson         hpd = false;
14952f0ec92eSRichard Henderson         epd = false;
14962f0ec92eSRichard Henderson     } else if (el == 2) {
14972f0ec92eSRichard Henderson         /* HTCR */
14982f0ec92eSRichard Henderson         tsz = extract32(tcr, 0, 3);
14992f0ec92eSRichard Henderson         select = 0;
15002f0ec92eSRichard Henderson         hpd = extract64(tcr, 24, 1);
15012f0ec92eSRichard Henderson         epd = false;
15022f0ec92eSRichard Henderson     } else {
15032f0ec92eSRichard Henderson         int t0sz = extract32(tcr, 0, 3);
15042f0ec92eSRichard Henderson         int t1sz = extract32(tcr, 16, 3);
15052f0ec92eSRichard Henderson 
15062f0ec92eSRichard Henderson         if (t1sz == 0) {
15072f0ec92eSRichard Henderson             select = va > (0xffffffffu >> t0sz);
15082f0ec92eSRichard Henderson         } else {
15092f0ec92eSRichard Henderson             /* Note that we will detect errors later.  */
15102f0ec92eSRichard Henderson             select = va >= ~(0xffffffffu >> t1sz);
15112f0ec92eSRichard Henderson         }
15122f0ec92eSRichard Henderson         if (!select) {
15132f0ec92eSRichard Henderson             tsz = t0sz;
15142f0ec92eSRichard Henderson             epd = extract32(tcr, 7, 1);
15152f0ec92eSRichard Henderson             hpd = extract64(tcr, 41, 1);
15162f0ec92eSRichard Henderson         } else {
15172f0ec92eSRichard Henderson             tsz = t1sz;
15182f0ec92eSRichard Henderson             epd = extract32(tcr, 23, 1);
15192f0ec92eSRichard Henderson             hpd = extract64(tcr, 42, 1);
15202f0ec92eSRichard Henderson         }
15212f0ec92eSRichard Henderson         /* For aarch32, hpd0 is not enabled without t2e as well.  */
15222f0ec92eSRichard Henderson         hpd &= extract32(tcr, 6, 1);
15232f0ec92eSRichard Henderson     }
15242f0ec92eSRichard Henderson 
15252f0ec92eSRichard Henderson     return (ARMVAParameters) {
15262f0ec92eSRichard Henderson         .tsz = tsz,
15272f0ec92eSRichard Henderson         .select = select,
15282f0ec92eSRichard Henderson         .epd = epd,
15292f0ec92eSRichard Henderson         .hpd = hpd,
15302f0ec92eSRichard Henderson     };
15312f0ec92eSRichard Henderson }
15322f0ec92eSRichard Henderson 
1533c5168785SRichard Henderson /*
1534c5168785SRichard Henderson  * check_s2_mmu_setup
1535c5168785SRichard Henderson  * @cpu:        ARMCPU
1536c5168785SRichard Henderson  * @is_aa64:    True if the translation regime is in AArch64 state
15370ffe5b7bSRichard Henderson  * @tcr:        VTCR_EL2 or VSTCR_EL2
15380ffe5b7bSRichard Henderson  * @ds:         Effective value of TCR.DS.
15390ffe5b7bSRichard Henderson  * @iasize:     Bitsize of IPAs
1540c5168785SRichard Henderson  * @stride:     Page-table stride (See the ARM ARM)
1541c5168785SRichard Henderson  *
15420ffe5b7bSRichard Henderson  * Decode the starting level of the S2 lookup, returning INT_MIN if
15430ffe5b7bSRichard Henderson  * the configuration is invalid.
1544c5168785SRichard Henderson  */
check_s2_mmu_setup(ARMCPU * cpu,bool is_aa64,uint64_t tcr,bool ds,int iasize,int stride)15450ffe5b7bSRichard Henderson static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
15460ffe5b7bSRichard Henderson                               bool ds, int iasize, int stride)
1547c5168785SRichard Henderson {
15480ffe5b7bSRichard Henderson     int sl0, sl2, startlevel, granulebits, levels;
15490ffe5b7bSRichard Henderson     int s1_min_iasize, s1_max_iasize;
15500ffe5b7bSRichard Henderson 
15510ffe5b7bSRichard Henderson     sl0 = extract32(tcr, 6, 2);
15520ffe5b7bSRichard Henderson     if (is_aa64) {
15530ffe5b7bSRichard Henderson         /*
15540ffe5b7bSRichard Henderson          * AArch64.S2InvalidSL: Interpretation of SL depends on the page size,
15550ffe5b7bSRichard Henderson          * so interleave AArch64.S2StartLevel.
1556c5168785SRichard Henderson          */
1557c5168785SRichard Henderson         switch (stride) {
15580ffe5b7bSRichard Henderson         case 9: /* 4KB */
15590ffe5b7bSRichard Henderson             /* SL2 is RES0 unless DS=1 & 4KB granule. */
15600ffe5b7bSRichard Henderson             sl2 = extract64(tcr, 33, 1);
15610ffe5b7bSRichard Henderson             if (ds && sl2) {
15620ffe5b7bSRichard Henderson                 if (sl0 != 0) {
15630ffe5b7bSRichard Henderson                     goto fail;
15640ffe5b7bSRichard Henderson                 }
15650ffe5b7bSRichard Henderson                 startlevel = -1;
15660ffe5b7bSRichard Henderson             } else {
15670ffe5b7bSRichard Henderson                 startlevel = 2 - sl0;
15680ffe5b7bSRichard Henderson                 switch (sl0) {
15690ffe5b7bSRichard Henderson                 case 2:
15700ffe5b7bSRichard Henderson                     if (arm_pamax(cpu) < 44) {
15710ffe5b7bSRichard Henderson                         goto fail;
1572c5168785SRichard Henderson                     }
1573c5168785SRichard Henderson                     break;
15740ffe5b7bSRichard Henderson                 case 3:
15750ffe5b7bSRichard Henderson                     if (!cpu_isar_feature(aa64_st, cpu)) {
15760ffe5b7bSRichard Henderson                         goto fail;
15770ffe5b7bSRichard Henderson                     }
15780ffe5b7bSRichard Henderson                     startlevel = 3;
15790ffe5b7bSRichard Henderson                     break;
15800ffe5b7bSRichard Henderson                 }
1581c5168785SRichard Henderson             }
1582c5168785SRichard Henderson             break;
15830ffe5b7bSRichard Henderson         case 11: /* 16KB */
15840ffe5b7bSRichard Henderson             switch (sl0) {
15850ffe5b7bSRichard Henderson             case 2:
15860ffe5b7bSRichard Henderson                 if (arm_pamax(cpu) < 42) {
15870ffe5b7bSRichard Henderson                     goto fail;
1588c5168785SRichard Henderson                 }
1589c5168785SRichard Henderson                 break;
15900ffe5b7bSRichard Henderson             case 3:
15910ffe5b7bSRichard Henderson                 if (!ds) {
15920ffe5b7bSRichard Henderson                     goto fail;
15930ffe5b7bSRichard Henderson                 }
15940ffe5b7bSRichard Henderson                 break;
15950ffe5b7bSRichard Henderson             }
15960ffe5b7bSRichard Henderson             startlevel = 3 - sl0;
15970ffe5b7bSRichard Henderson             break;
15980ffe5b7bSRichard Henderson         case 13: /* 64KB */
15990ffe5b7bSRichard Henderson             switch (sl0) {
16000ffe5b7bSRichard Henderson             case 2:
16010ffe5b7bSRichard Henderson                 if (arm_pamax(cpu) < 44) {
16020ffe5b7bSRichard Henderson                     goto fail;
16030ffe5b7bSRichard Henderson                 }
16040ffe5b7bSRichard Henderson                 break;
16050ffe5b7bSRichard Henderson             case 3:
16060ffe5b7bSRichard Henderson                 goto fail;
16070ffe5b7bSRichard Henderson             }
16080ffe5b7bSRichard Henderson             startlevel = 3 - sl0;
16090ffe5b7bSRichard Henderson             break;
1610c5168785SRichard Henderson         default:
1611c5168785SRichard Henderson             g_assert_not_reached();
1612c5168785SRichard Henderson         }
1613c5168785SRichard Henderson     } else {
16140ffe5b7bSRichard Henderson         /*
16150ffe5b7bSRichard Henderson          * Things are simpler for AArch32 EL2, with only 4k pages.
16160ffe5b7bSRichard Henderson          * There is no separate S2InvalidSL function, but AArch32.S2Walk
16170ffe5b7bSRichard Henderson          * begins with walkparms.sl0 in {'1x'}.
16180ffe5b7bSRichard Henderson          */
1619c5168785SRichard Henderson         assert(stride == 9);
16200ffe5b7bSRichard Henderson         if (sl0 >= 2) {
16210ffe5b7bSRichard Henderson             goto fail;
16220ffe5b7bSRichard Henderson         }
16230ffe5b7bSRichard Henderson         startlevel = 2 - sl0;
16240ffe5b7bSRichard Henderson     }
1625c5168785SRichard Henderson 
16260ffe5b7bSRichard Henderson     /* AArch{64,32}.S2InconsistentSL are functionally equivalent.  */
16270ffe5b7bSRichard Henderson     levels = 3 - startlevel;
16280ffe5b7bSRichard Henderson     granulebits = stride + 3;
16290ffe5b7bSRichard Henderson 
16300ffe5b7bSRichard Henderson     s1_min_iasize = levels * stride + granulebits + 1;
16310ffe5b7bSRichard Henderson     s1_max_iasize = s1_min_iasize + (stride - 1) + 4;
16320ffe5b7bSRichard Henderson 
16330ffe5b7bSRichard Henderson     if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) {
16340ffe5b7bSRichard Henderson         return startlevel;
1635c5168785SRichard Henderson     }
16360ffe5b7bSRichard Henderson 
16370ffe5b7bSRichard Henderson  fail:
16380ffe5b7bSRichard Henderson     return INT_MIN;
1639c5168785SRichard Henderson }
1640c5168785SRichard Henderson 
lpae_block_desc_valid(ARMCPU * cpu,bool ds,ARMGranuleSize gran,int level)1641d53e2507SPeter Maydell static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
1642d53e2507SPeter Maydell                                   ARMGranuleSize gran, int level)
1643d53e2507SPeter Maydell {
1644d53e2507SPeter Maydell     /*
1645d53e2507SPeter Maydell      * See pseudocode AArch46.BlockDescSupported(): block descriptors
1646d53e2507SPeter Maydell      * are not valid at all levels, depending on the page size.
1647d53e2507SPeter Maydell      */
1648d53e2507SPeter Maydell     switch (gran) {
1649d53e2507SPeter Maydell     case Gran4K:
1650d53e2507SPeter Maydell         return (level == 0 && ds) || level == 1 || level == 2;
1651d53e2507SPeter Maydell     case Gran16K:
1652d53e2507SPeter Maydell         return (level == 1 && ds) || level == 2;
1653d53e2507SPeter Maydell     case Gran64K:
1654d53e2507SPeter Maydell         return (level == 1 && arm_pamax(cpu) == 52) || level == 2;
1655d53e2507SPeter Maydell     default:
1656d53e2507SPeter Maydell         g_assert_not_reached();
1657d53e2507SPeter Maydell     }
1658d53e2507SPeter Maydell }
1659d53e2507SPeter Maydell 
nv_nv1_enabled(CPUARMState * env,S1Translate * ptw)1660dea9104aSPeter Maydell static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
1661dea9104aSPeter Maydell {
1662dea9104aSPeter Maydell     uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
1663dea9104aSPeter Maydell     return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
1664dea9104aSPeter Maydell }
1665dea9104aSPeter Maydell 
16663283222aSRichard Henderson /**
16673283222aSRichard Henderson  * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
16683283222aSRichard Henderson  *
16693283222aSRichard Henderson  * Returns false if the translation was successful. Otherwise, phys_ptr,
16703283222aSRichard Henderson  * attrs, prot and page_size may not be filled in, and the populated fsr
16713283222aSRichard Henderson  * value provides information on why the translation aborted, in the format
16723283222aSRichard Henderson  * of a long-format DFSR/IFSR fault register, with the following caveat:
16733283222aSRichard Henderson  * the WnR bit is never set (the caller must do this).
16743283222aSRichard Henderson  *
16753283222aSRichard Henderson  * @env: CPUARMState
16766d2654ffSRichard Henderson  * @ptw: Current and next stage parameters for the walk.
16773283222aSRichard Henderson  * @address: virtual address to get physical address for
16783283222aSRichard Henderson  * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
1679c053f40bSRichard Henderson  * @memop: memory operation feeding this access, or 0 for none
168003ee9bbeSRichard Henderson  * @result: set on translation success,
16813283222aSRichard Henderson  * @fi: set to fault info if the translation fails
16823283222aSRichard Henderson  */
get_phys_addr_lpae(CPUARMState * env,S1Translate * ptw,uint64_t address,MMUAccessType access_type,MemOp memop,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)16836d2654ffSRichard Henderson static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
16846d2654ffSRichard Henderson                                uint64_t address,
1685c053f40bSRichard Henderson                                MMUAccessType access_type, MemOp memop,
1686c23f08a5SRichard Henderson                                GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
16873283222aSRichard Henderson {
16883283222aSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
16896d2654ffSRichard Henderson     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
169015f8f467SArd Biesheuvel     int32_t level;
16913283222aSRichard Henderson     ARMVAParameters param;
16923283222aSRichard Henderson     uint64_t ttbr;
16933283222aSRichard Henderson     hwaddr descaddr, indexmask, indexmask_grainsize;
16943283222aSRichard Henderson     uint32_t tableattrs;
16953283222aSRichard Henderson     target_ulong page_size;
169645666091SRichard Henderson     uint64_t attrs;
16973283222aSRichard Henderson     int32_t stride;
16983283222aSRichard Henderson     int addrsize, inputsize, outputsize;
1699c1547bbaSPeter Maydell     uint64_t tcr = regime_tcr(env, mmu_idx);
17002f1ff4e7SRichard Henderson     int ap, xn, pxn;
17013283222aSRichard Henderson     uint32_t el = regime_el(env, mmu_idx);
17023283222aSRichard Henderson     uint64_t descaddrmask;
17033283222aSRichard Henderson     bool aarch64 = arm_el_is_aa64(env, el);
170471943a1eSRichard Henderson     uint64_t descriptor, new_descriptor;
17052f1ff4e7SRichard Henderson     ARMSecuritySpace out_space;
1706728b923fSRichard Henderson     bool device;
17073283222aSRichard Henderson 
17083283222aSRichard Henderson     /* TODO: This code does not support shareability levels. */
17093283222aSRichard Henderson     if (aarch64) {
17103283222aSRichard Henderson         int ps;
17113283222aSRichard Henderson 
17123283222aSRichard Henderson         param = aa64_va_parameters(env, address, mmu_idx,
1713478dccbbSPeter Maydell                                    access_type != MMU_INST_FETCH,
1714478dccbbSPeter Maydell                                    !arm_el_is_aa64(env, 1));
17153283222aSRichard Henderson         level = 0;
17163283222aSRichard Henderson 
17173283222aSRichard Henderson         /*
17183283222aSRichard Henderson          * If TxSZ is programmed to a value larger than the maximum,
17193283222aSRichard Henderson          * or smaller than the effective minimum, it is IMPLEMENTATION
17203283222aSRichard Henderson          * DEFINED whether we behave as if the field were programmed
17213283222aSRichard Henderson          * within bounds, or if a level 0 Translation fault is generated.
17223283222aSRichard Henderson          *
17233283222aSRichard Henderson          * With FEAT_LVA, fault on less than minimum becomes required,
17243283222aSRichard Henderson          * so our choice is to always raise the fault.
17253283222aSRichard Henderson          */
17263283222aSRichard Henderson         if (param.tsz_oob) {
172727c1b81dSRichard Henderson             goto do_translation_fault;
17283283222aSRichard Henderson         }
17293283222aSRichard Henderson 
17303283222aSRichard Henderson         addrsize = 64 - 8 * param.tbi;
17313283222aSRichard Henderson         inputsize = 64 - param.tsz;
17323283222aSRichard Henderson 
17333283222aSRichard Henderson         /*
17343283222aSRichard Henderson          * Bound PS by PARANGE to find the effective output address size.
17353283222aSRichard Henderson          * ID_AA64MMFR0 is a read-only register so values outside of the
17363283222aSRichard Henderson          * supported mappings can be considered an implementation error.
17373283222aSRichard Henderson          */
17383283222aSRichard Henderson         ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
17393283222aSRichard Henderson         ps = MIN(ps, param.ps);
17403283222aSRichard Henderson         assert(ps < ARRAY_SIZE(pamax_map));
17413283222aSRichard Henderson         outputsize = pamax_map[ps];
1742312b71abSArd Biesheuvel 
1743312b71abSArd Biesheuvel         /*
1744312b71abSArd Biesheuvel          * With LPA2, the effective output address (OA) size is at most 48 bits
1745312b71abSArd Biesheuvel          * unless TCR.DS == 1
1746312b71abSArd Biesheuvel          */
1747312b71abSArd Biesheuvel         if (!param.ds && param.gran != Gran64K) {
1748312b71abSArd Biesheuvel             outputsize = MIN(outputsize, 48);
1749312b71abSArd Biesheuvel         }
17503283222aSRichard Henderson     } else {
17513283222aSRichard Henderson         param = aa32_va_parameters(env, address, mmu_idx);
17523283222aSRichard Henderson         level = 1;
17533283222aSRichard Henderson         addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
17543283222aSRichard Henderson         inputsize = addrsize - param.tsz;
17553283222aSRichard Henderson         outputsize = 40;
17563283222aSRichard Henderson     }
17573283222aSRichard Henderson 
17583283222aSRichard Henderson     /*
17593283222aSRichard Henderson      * We determined the region when collecting the parameters, but we
17603283222aSRichard Henderson      * have not yet validated that the address is valid for the region.
17613283222aSRichard Henderson      * Extract the top bits and verify that they all match select.
17623283222aSRichard Henderson      *
17633283222aSRichard Henderson      * For aa32, if inputsize == addrsize, then we have selected the
17643283222aSRichard Henderson      * region by exclusion in aa32_va_parameters and there is no more
17653283222aSRichard Henderson      * validation to do here.
17663283222aSRichard Henderson      */
17673283222aSRichard Henderson     if (inputsize < addrsize) {
17683283222aSRichard Henderson         target_ulong top_bits = sextract64(address, inputsize,
17693283222aSRichard Henderson                                            addrsize - inputsize);
17703283222aSRichard Henderson         if (-top_bits != param.select) {
17713283222aSRichard Henderson             /* The gap between the two regions is a Translation fault */
177227c1b81dSRichard Henderson             goto do_translation_fault;
17733283222aSRichard Henderson         }
17743283222aSRichard Henderson     }
17753283222aSRichard Henderson 
17763c003f70SPeter Maydell     stride = arm_granule_bits(param.gran) - 3;
17773283222aSRichard Henderson 
17783283222aSRichard Henderson     /*
17793283222aSRichard Henderson      * Note that QEMU ignores shareability and cacheability attributes,
17803283222aSRichard Henderson      * so we don't need to do anything with the SH, ORGN, IRGN fields
17813283222aSRichard Henderson      * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
17823283222aSRichard Henderson      * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
17833283222aSRichard Henderson      * implement any ASID-like capability so we can ignore it (instead
17843283222aSRichard Henderson      * we will always flush the TLB any time the ASID is changed).
17853283222aSRichard Henderson      */
17863283222aSRichard Henderson     ttbr = regime_ttbr(env, mmu_idx, param.select);
17873283222aSRichard Henderson 
17883283222aSRichard Henderson     /*
17893283222aSRichard Henderson      * Here we should have set up all the parameters for the translation:
17903283222aSRichard Henderson      * inputsize, ttbr, epd, stride, tbi
17913283222aSRichard Henderson      */
17923283222aSRichard Henderson 
17933283222aSRichard Henderson     if (param.epd) {
17943283222aSRichard Henderson         /*
17953283222aSRichard Henderson          * Translation table walk disabled => Translation fault on TLB miss
17963283222aSRichard Henderson          * Note: This is always 0 on 64-bit EL2 and EL3.
17973283222aSRichard Henderson          */
179827c1b81dSRichard Henderson         goto do_translation_fault;
17993283222aSRichard Henderson     }
18003283222aSRichard Henderson 
1801edc05dd4SRichard Henderson     if (!regime_is_stage2(mmu_idx)) {
18023283222aSRichard Henderson         /*
18033283222aSRichard Henderson          * The starting level depends on the virtual address size (which can
18043283222aSRichard Henderson          * be up to 48 bits) and the translation granule size. It indicates
18053283222aSRichard Henderson          * the number of strides (stride bits at a time) needed to
18063283222aSRichard Henderson          * consume the bits of the input address. In the pseudocode this is:
18073283222aSRichard Henderson          *  level = 4 - RoundUp((inputsize - grainsize) / stride)
18083283222aSRichard Henderson          * where their 'inputsize' is our 'inputsize', 'grainsize' is
18093283222aSRichard Henderson          * our 'stride + 3' and 'stride' is our 'stride'.
18103283222aSRichard Henderson          * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
18113283222aSRichard Henderson          * = 4 - (inputsize - stride - 3 + stride - 1) / stride
18123283222aSRichard Henderson          * = 4 - (inputsize - 4) / stride;
18133283222aSRichard Henderson          */
18143283222aSRichard Henderson         level = 4 - (inputsize - 4) / stride;
18153283222aSRichard Henderson     } else {
18160ffe5b7bSRichard Henderson         int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds,
18170ffe5b7bSRichard Henderson                                             inputsize, stride);
18180ffe5b7bSRichard Henderson         if (startlevel == INT_MIN) {
18193283222aSRichard Henderson             level = 0;
182027c1b81dSRichard Henderson             goto do_translation_fault;
18213283222aSRichard Henderson         }
18223283222aSRichard Henderson         level = startlevel;
18233283222aSRichard Henderson     }
18243283222aSRichard Henderson 
18253283222aSRichard Henderson     indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3);
18263283222aSRichard Henderson     indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level)));
18273283222aSRichard Henderson 
18283283222aSRichard Henderson     /* Now we can extract the actual base address from the TTBR */
18293283222aSRichard Henderson     descaddr = extract64(ttbr, 0, 48);
18303283222aSRichard Henderson 
18313283222aSRichard Henderson     /*
18323283222aSRichard Henderson      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR.
18333283222aSRichard Henderson      *
18343283222aSRichard Henderson      * Otherwise, if the base address is out of range, raise AddressSizeFault.
18353283222aSRichard Henderson      * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
18363283222aSRichard Henderson      * but we've just cleared the bits above 47, so simplify the test.
18373283222aSRichard Henderson      */
18383283222aSRichard Henderson     if (outputsize > 48) {
18393283222aSRichard Henderson         descaddr |= extract64(ttbr, 2, 4) << 48;
18403283222aSRichard Henderson     } else if (descaddr >> outputsize) {
18413283222aSRichard Henderson         level = 0;
184227c1b81dSRichard Henderson         fi->type = ARMFault_AddressSize;
18433283222aSRichard Henderson         goto do_fault;
18443283222aSRichard Henderson     }
18453283222aSRichard Henderson 
18463283222aSRichard Henderson     /*
18473283222aSRichard Henderson      * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
18483283222aSRichard Henderson      * and also to mask out CnP (bit 0) which could validly be non-zero.
18493283222aSRichard Henderson      */
18503283222aSRichard Henderson     descaddr &= ~indexmask;
18513283222aSRichard Henderson 
18523283222aSRichard Henderson     /*
18533283222aSRichard Henderson      * For AArch32, the address field in the descriptor goes up to bit 39
18543283222aSRichard Henderson      * for both v7 and v8.  However, for v8 the SBZ bits [47:40] must be 0
18553283222aSRichard Henderson      * or an AddressSize fault is raised.  So for v8 we extract those SBZ
18563283222aSRichard Henderson      * bits as part of the address, which will be checked via outputsize.
18573283222aSRichard Henderson      * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2;
18583283222aSRichard Henderson      * the highest bits of a 52-bit output are placed elsewhere.
18593283222aSRichard Henderson      */
18603283222aSRichard Henderson     if (param.ds) {
18613283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 50);
18623283222aSRichard Henderson     } else if (arm_feature(env, ARM_FEATURE_V8)) {
18633283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 48);
18643283222aSRichard Henderson     } else {
18653283222aSRichard Henderson         descaddrmask = MAKE_64BIT_MASK(0, 40);
18663283222aSRichard Henderson     }
18673283222aSRichard Henderson     descaddrmask &= ~indexmask_grainsize;
186826d19945SRichard Henderson     tableattrs = 0;
18693283222aSRichard Henderson 
1870fe4ddc15SRichard Henderson  next_level:
18713283222aSRichard Henderson     descaddr |= (address >> (stride * (4 - level))) & indexmask;
18723283222aSRichard Henderson     descaddr &= ~7ULL;
187326d19945SRichard Henderson 
187426d19945SRichard Henderson     /*
187526d19945SRichard Henderson      * Process the NSTable bit from the previous level.  This changes
187626d19945SRichard Henderson      * the table address space and the output space from Secure to
187726d19945SRichard Henderson      * NonSecure.  With RME, the EL3 translation regime does not change
187826d19945SRichard Henderson      * from Root to NonSecure.
187926d19945SRichard Henderson      */
188026d19945SRichard Henderson     if (ptw->in_space == ARMSS_Secure
188126d19945SRichard Henderson         && !regime_is_stage2(mmu_idx)
188226d19945SRichard Henderson         && extract32(tableattrs, 4, 1)) {
188348da29e4SRichard Henderson         /*
188448da29e4SRichard Henderson          * Stage2_S -> Stage2 or Phys_S -> Phys_NS
1885d38fa967SRichard Henderson          * Assert the relative order of the secure/non-secure indexes.
188648da29e4SRichard Henderson          */
1887d38fa967SRichard Henderson         QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
1888d38fa967SRichard Henderson         QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
1889d38fa967SRichard Henderson         ptw->in_ptw_idx += 1;
189026d19945SRichard Henderson         ptw->in_space = ARMSS_NonSecure;
189148da29e4SRichard Henderson     }
189226d19945SRichard Henderson 
189393e5b3a6SRichard Henderson     if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
189493e5b3a6SRichard Henderson         goto do_fault;
189593e5b3a6SRichard Henderson     }
189693e5b3a6SRichard Henderson     descriptor = arm_ldq_ptw(env, ptw, fi);
18973283222aSRichard Henderson     if (fi->type != ARMFault_None) {
18983283222aSRichard Henderson         goto do_fault;
18993283222aSRichard Henderson     }
190071943a1eSRichard Henderson     new_descriptor = descriptor;
19013283222aSRichard Henderson 
190271943a1eSRichard Henderson  restart_atomic_update:
1903d53e2507SPeter Maydell     if (!(descriptor & 1) ||
1904d53e2507SPeter Maydell         (!(descriptor & 2) &&
1905d53e2507SPeter Maydell          !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) {
1906d53e2507SPeter Maydell         /* Invalid, or a block descriptor at an invalid level */
190727c1b81dSRichard Henderson         goto do_translation_fault;
19083283222aSRichard Henderson     }
19093283222aSRichard Henderson 
19103283222aSRichard Henderson     descaddr = descriptor & descaddrmask;
19113283222aSRichard Henderson 
19123283222aSRichard Henderson     /*
19133283222aSRichard Henderson      * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12]
19143283222aSRichard Henderson      * of descriptor.  For FEAT_LPA2 and effective DS, bits [51:50] of
19153283222aSRichard Henderson      * descaddr are in [9:8].  Otherwise, if descaddr is out of range,
19163283222aSRichard Henderson      * raise AddressSizeFault.
19173283222aSRichard Henderson      */
19183283222aSRichard Henderson     if (outputsize > 48) {
19193283222aSRichard Henderson         if (param.ds) {
19203283222aSRichard Henderson             descaddr |= extract64(descriptor, 8, 2) << 50;
19213283222aSRichard Henderson         } else {
19223283222aSRichard Henderson             descaddr |= extract64(descriptor, 12, 4) << 48;
19233283222aSRichard Henderson         }
19243283222aSRichard Henderson     } else if (descaddr >> outputsize) {
192527c1b81dSRichard Henderson         fi->type = ARMFault_AddressSize;
19263283222aSRichard Henderson         goto do_fault;
19273283222aSRichard Henderson     }
19283283222aSRichard Henderson 
19293283222aSRichard Henderson     if ((descriptor & 2) && (level < 3)) {
19303283222aSRichard Henderson         /*
19313283222aSRichard Henderson          * Table entry. The top five bits are attributes which may
19323283222aSRichard Henderson          * propagate down through lower levels of the table (and
19333283222aSRichard Henderson          * which are all arranged so that 0 means "no effect", so
19343283222aSRichard Henderson          * we can gather them up by ORing in the bits at each level).
19353283222aSRichard Henderson          */
19363283222aSRichard Henderson         tableattrs |= extract64(descriptor, 59, 5);
19373283222aSRichard Henderson         level++;
19383283222aSRichard Henderson         indexmask = indexmask_grainsize;
1939fe4ddc15SRichard Henderson         goto next_level;
19403283222aSRichard Henderson     }
1941fe4ddc15SRichard Henderson 
19423283222aSRichard Henderson     /*
19433283222aSRichard Henderson      * Block entry at level 1 or 2, or page entry at level 3.
19443283222aSRichard Henderson      * These are basically the same thing, although the number
19453283222aSRichard Henderson      * of bits we pull in from the vaddr varies. Note that although
19463283222aSRichard Henderson      * descaddrmask masks enough of the low bits of the descriptor
19473283222aSRichard Henderson      * to give a correct page or table address, the address field
19483283222aSRichard Henderson      * in a block descriptor is smaller; so we need to explicitly
19493283222aSRichard Henderson      * clear the lower bits here before ORing in the low vaddr bits.
195071943a1eSRichard Henderson      *
195171943a1eSRichard Henderson      * Afterward, descaddr is the final physical address.
19523283222aSRichard Henderson      */
19533283222aSRichard Henderson     page_size = (1ULL << ((stride * (4 - level)) + 3));
1954c2360eaaSPeter Maydell     descaddr &= ~(hwaddr)(page_size - 1);
19553283222aSRichard Henderson     descaddr |= (address & (page_size - 1));
19563283222aSRichard Henderson 
195771943a1eSRichard Henderson     if (likely(!ptw->in_debug)) {
195834a57faeSRichard Henderson         /*
195971943a1eSRichard Henderson          * Access flag.
196071943a1eSRichard Henderson          * If HA is enabled, prepare to update the descriptor below.
196171943a1eSRichard Henderson          * Otherwise, pass the access fault on to software.
196234a57faeSRichard Henderson          */
196371943a1eSRichard Henderson         if (!(descriptor & (1 << 10))) {
196471943a1eSRichard Henderson             if (param.ha) {
196571943a1eSRichard Henderson                 new_descriptor |= 1 << 10; /* AF */
196671943a1eSRichard Henderson             } else {
196771943a1eSRichard Henderson                 fi->type = ARMFault_AccessFlag;
196871943a1eSRichard Henderson                 goto do_fault;
196971943a1eSRichard Henderson             }
197071943a1eSRichard Henderson         }
197165c123fdSRichard Henderson 
197265c123fdSRichard Henderson         /*
197365c123fdSRichard Henderson          * Dirty Bit.
197465c123fdSRichard Henderson          * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP
197565c123fdSRichard Henderson          * bit for writeback. The actual write protection test may still be
197665c123fdSRichard Henderson          * overridden by tableattrs, to be merged below.
197765c123fdSRichard Henderson          */
197865c123fdSRichard Henderson         if (param.hd
197965c123fdSRichard Henderson             && extract64(descriptor, 51, 1)  /* DBM */
198065c123fdSRichard Henderson             && access_type == MMU_DATA_STORE) {
198165c123fdSRichard Henderson             if (regime_is_stage2(mmu_idx)) {
198265c123fdSRichard Henderson                 new_descriptor |= 1ull << 7;    /* set S2AP[1] */
198365c123fdSRichard Henderson             } else {
198465c123fdSRichard Henderson                 new_descriptor &= ~(1ull << 7); /* clear AP[2] */
198565c123fdSRichard Henderson             }
198665c123fdSRichard Henderson         }
198771943a1eSRichard Henderson     }
198871943a1eSRichard Henderson 
198971943a1eSRichard Henderson     /*
199071943a1eSRichard Henderson      * Extract attributes from the (modified) descriptor, and apply
199171943a1eSRichard Henderson      * table descriptors. Stage 2 table descriptors do not include
199271943a1eSRichard Henderson      * any attribute fields. HPD disables all the table attributes
1993b9c139dcSPeter Maydell      * except NSTable (which we have already handled).
199471943a1eSRichard Henderson      */
199571943a1eSRichard Henderson     attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
199634a57faeSRichard Henderson     if (!regime_is_stage2(mmu_idx)) {
199734a57faeSRichard Henderson         if (!param.hpd) {
199845666091SRichard Henderson             attrs |= extract64(tableattrs, 0, 2) << 53;     /* XN, PXN */
19993283222aSRichard Henderson             /*
20003283222aSRichard Henderson              * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
20013283222aSRichard Henderson              * means "force PL1 access only", which means forcing AP[1] to 0.
20023283222aSRichard Henderson              */
200345666091SRichard Henderson             attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
200445666091SRichard Henderson             attrs |= extract32(tableattrs, 3, 1) << 7;    /* APT[1] => AP[2] */
200534a57faeSRichard Henderson         }
200634a57faeSRichard Henderson     }
2007fe4ddc15SRichard Henderson 
200845666091SRichard Henderson     ap = extract32(attrs, 6, 2);
20092f1ff4e7SRichard Henderson     out_space = ptw->in_space;
2010edc05dd4SRichard Henderson     if (regime_is_stage2(mmu_idx)) {
20112f1ff4e7SRichard Henderson         /*
20122f1ff4e7SRichard Henderson          * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
20132f1ff4e7SRichard Henderson          * The bit remains ignored for other security states.
20144a7d7702SRichard Henderson          * R_YMCSL: Executing an insn fetched from non-Realm causes
20154a7d7702SRichard Henderson          * a stage2 permission fault.
20162f1ff4e7SRichard Henderson          */
20172f1ff4e7SRichard Henderson         if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
20182f1ff4e7SRichard Henderson             out_space = ARMSS_NonSecure;
20194a7d7702SRichard Henderson             result->f.prot = get_S2prot_noexecute(ap);
20204a7d7702SRichard Henderson         } else {
202145666091SRichard Henderson             xn = extract64(attrs, 53, 2);
20227c19b2d6SRichard Henderson             result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
20234a7d7702SRichard Henderson         }
202464bda510SRichard Henderson 
202564bda510SRichard Henderson         result->cacheattrs.is_s2_format = true;
202664bda510SRichard Henderson         result->cacheattrs.attrs = extract32(attrs, 2, 4);
202764bda510SRichard Henderson         /*
202864bda510SRichard Henderson          * Security state does not really affect HCR_EL2.FWB;
202964bda510SRichard Henderson          * we only need to filter FWB for aa32 or other FEAT.
203064bda510SRichard Henderson          */
203164bda510SRichard Henderson         device = S2_attrs_are_device(arm_hcr_el2_eff(env),
203264bda510SRichard Henderson                                      result->cacheattrs.attrs);
20333283222aSRichard Henderson     } else {
20342f1ff4e7SRichard Henderson         int nse, ns = extract32(attrs, 5, 1);
203564bda510SRichard Henderson         uint8_t attrindx;
203664bda510SRichard Henderson         uint64_t mair;
20370231bdc8SPavel Skripkin         int user_rw, prot_rw;
203864bda510SRichard Henderson 
20392f1ff4e7SRichard Henderson         switch (out_space) {
20402f1ff4e7SRichard Henderson         case ARMSS_Root:
20412f1ff4e7SRichard Henderson             /*
20422f1ff4e7SRichard Henderson              * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
20432f1ff4e7SRichard Henderson              * R_XTYPW: NSE and NS together select the output pa space.
20442f1ff4e7SRichard Henderson              */
20452f1ff4e7SRichard Henderson             nse = extract32(attrs, 11, 1);
20462f1ff4e7SRichard Henderson             out_space = (nse << 1) | ns;
20472f1ff4e7SRichard Henderson             if (out_space == ARMSS_Secure &&
20482f1ff4e7SRichard Henderson                 !cpu_isar_feature(aa64_sel2, cpu)) {
20492f1ff4e7SRichard Henderson                 out_space = ARMSS_NonSecure;
20502f1ff4e7SRichard Henderson             }
20512f1ff4e7SRichard Henderson             break;
20522f1ff4e7SRichard Henderson         case ARMSS_Secure:
20532f1ff4e7SRichard Henderson             if (ns) {
20542f1ff4e7SRichard Henderson                 out_space = ARMSS_NonSecure;
20552f1ff4e7SRichard Henderson             }
20562f1ff4e7SRichard Henderson             break;
20572f1ff4e7SRichard Henderson         case ARMSS_Realm:
20582f1ff4e7SRichard Henderson             switch (mmu_idx) {
20592f1ff4e7SRichard Henderson             case ARMMMUIdx_Stage1_E0:
20602f1ff4e7SRichard Henderson             case ARMMMUIdx_Stage1_E1:
20612f1ff4e7SRichard Henderson             case ARMMMUIdx_Stage1_E1_PAN:
20622f1ff4e7SRichard Henderson                 /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
20632f1ff4e7SRichard Henderson                 break;
20642f1ff4e7SRichard Henderson             case ARMMMUIdx_E2:
20652f1ff4e7SRichard Henderson             case ARMMMUIdx_E20_0:
20662f1ff4e7SRichard Henderson             case ARMMMUIdx_E20_2:
20672f1ff4e7SRichard Henderson             case ARMMMUIdx_E20_2_PAN:
20682f1ff4e7SRichard Henderson                 /*
20692f1ff4e7SRichard Henderson                  * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
20702f1ff4e7SRichard Henderson                  * NS changes the output to non-secure space.
20712f1ff4e7SRichard Henderson                  */
20722f1ff4e7SRichard Henderson                 if (ns) {
20732f1ff4e7SRichard Henderson                     out_space = ARMSS_NonSecure;
20742f1ff4e7SRichard Henderson                 }
20752f1ff4e7SRichard Henderson                 break;
20762f1ff4e7SRichard Henderson             default:
20772f1ff4e7SRichard Henderson                 g_assert_not_reached();
20782f1ff4e7SRichard Henderson             }
20792f1ff4e7SRichard Henderson             break;
20802f1ff4e7SRichard Henderson         case ARMSS_NonSecure:
20812f1ff4e7SRichard Henderson             /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
20822f1ff4e7SRichard Henderson             break;
20832f1ff4e7SRichard Henderson         default:
20842f1ff4e7SRichard Henderson             g_assert_not_reached();
20852f1ff4e7SRichard Henderson         }
208645666091SRichard Henderson         xn = extract64(attrs, 54, 1);
208745666091SRichard Henderson         pxn = extract64(attrs, 53, 1);
20882f1ff4e7SRichard Henderson 
2089dea9104aSPeter Maydell         if (el == 1 && nv_nv1_enabled(env, ptw)) {
2090dea9104aSPeter Maydell             /*
2091dea9104aSPeter Maydell              * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page
2092dea9104aSPeter Maydell              * descriptor bit 54 holds PXN, 53 is RES0, and the effective value
2093dea9104aSPeter Maydell              * of UXN is 0. Similarly for bits 59 and 60 in table descriptors
2094dea9104aSPeter Maydell              * (which we have already folded into bits 53 and 54 of attrs).
2095dea9104aSPeter Maydell              * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
2096dea9104aSPeter Maydell              * Similarly, APTable[0] from the table descriptor is treated as 0;
2097dea9104aSPeter Maydell              * we already folded this into AP[1] and squashing that to 0 does
2098dea9104aSPeter Maydell              * the right thing.
2099dea9104aSPeter Maydell              */
2100dea9104aSPeter Maydell             pxn = xn;
2101dea9104aSPeter Maydell             xn = 0;
2102dea9104aSPeter Maydell             ap &= ~1;
2103dea9104aSPeter Maydell         }
21040231bdc8SPavel Skripkin 
21050231bdc8SPavel Skripkin         user_rw = simple_ap_to_rw_prot_is_user(ap, true);
21060231bdc8SPavel Skripkin         prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
21072f1ff4e7SRichard Henderson         /*
21082f1ff4e7SRichard Henderson          * Note that we modified ptw->in_space earlier for NSTable, but
21092f1ff4e7SRichard Henderson          * result->f.attrs retains a copy of the original security space.
21102f1ff4e7SRichard Henderson          */
21110231bdc8SPavel Skripkin         result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
21120231bdc8SPavel Skripkin                                     xn, pxn, result->f.attrs.space, out_space);
211364bda510SRichard Henderson 
211464bda510SRichard Henderson         /* Index into MAIR registers for cache attributes */
211564bda510SRichard Henderson         attrindx = extract32(attrs, 2, 3);
211664bda510SRichard Henderson         mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
211764bda510SRichard Henderson         assert(attrindx <= 7);
211864bda510SRichard Henderson         result->cacheattrs.is_s2_format = false;
211964bda510SRichard Henderson         result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
212064bda510SRichard Henderson 
212164bda510SRichard Henderson         /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
212264bda510SRichard Henderson         if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
212364bda510SRichard Henderson             result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
212464bda510SRichard Henderson         }
212564bda510SRichard Henderson         device = S1_attrs_are_device(result->cacheattrs.attrs);
21263283222aSRichard Henderson     }
21273283222aSRichard Henderson 
2128e530581eSRichard Henderson     /*
2129e530581eSRichard Henderson      * Enable alignment checks on Device memory.
2130e530581eSRichard Henderson      *
2131e530581eSRichard Henderson      * Per R_XCHFJ, the correct ordering for alignment, permission,
2132e530581eSRichard Henderson      * and stage 2 faults is:
2133e530581eSRichard Henderson      *    - Alignment fault caused by the memory type
2134e530581eSRichard Henderson      *    - Permission fault
2135e530581eSRichard Henderson      *    - A stage 2 fault on the memory access
2136e530581eSRichard Henderson      * Perform the alignment check now, so that we recognize it in
2137e530581eSRichard Henderson      * the correct order.  Set TLB_CHECK_ALIGNED so that any subsequent
2138e530581eSRichard Henderson      * softmmu tlb hit will also check the alignment; clear along the
2139e530581eSRichard Henderson      * non-device path so that tlb_fill_flags is consistent in the
2140e530581eSRichard Henderson      * event of restart_atomic_update.
2141e530581eSRichard Henderson      *
2142e530581eSRichard Henderson      * In v7, for a CPU without the Virtualization Extensions this
2143e530581eSRichard Henderson      * access is UNPREDICTABLE; we choose to make it take the alignment
2144e530581eSRichard Henderson      * fault as is required for a v7VE CPU. (QEMU doesn't emulate any
2145e530581eSRichard Henderson      * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.)
2146e530581eSRichard Henderson      */
2147e530581eSRichard Henderson     if (device) {
2148e530581eSRichard Henderson         unsigned a_bits = memop_atomicity_bits(memop);
2149e530581eSRichard Henderson         if (address & ((1 << a_bits) - 1)) {
2150e530581eSRichard Henderson             fi->type = ARMFault_Alignment;
2151e530581eSRichard Henderson             goto do_fault;
2152e530581eSRichard Henderson         }
2153e530581eSRichard Henderson         result->f.tlb_fill_flags = TLB_CHECK_ALIGNED;
2154e530581eSRichard Henderson     } else {
2155e530581eSRichard Henderson         result->f.tlb_fill_flags = 0;
2156e530581eSRichard Henderson     }
2157e530581eSRichard Henderson 
21587fa7ea8fSRichard Henderson     if (!(result->f.prot & (1 << access_type))) {
215927c1b81dSRichard Henderson         fi->type = ARMFault_Permission;
21603283222aSRichard Henderson         goto do_fault;
21613283222aSRichard Henderson     }
21623283222aSRichard Henderson 
216371943a1eSRichard Henderson     /* If FEAT_HAFDBS has made changes, update the PTE. */
216471943a1eSRichard Henderson     if (new_descriptor != descriptor) {
216571943a1eSRichard Henderson         new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
216671943a1eSRichard Henderson         if (fi->type != ARMFault_None) {
216771943a1eSRichard Henderson             goto do_fault;
216871943a1eSRichard Henderson         }
216971943a1eSRichard Henderson         /*
217071943a1eSRichard Henderson          * I_YZSVV says that if the in-memory descriptor has changed,
217171943a1eSRichard Henderson          * then we must use the information in that new value
217271943a1eSRichard Henderson          * (which might include a different output address, different
217371943a1eSRichard Henderson          * attributes, or generate a fault).
217471943a1eSRichard Henderson          * Restart the handling of the descriptor value from scratch.
217571943a1eSRichard Henderson          */
217671943a1eSRichard Henderson         if (new_descriptor != descriptor) {
217771943a1eSRichard Henderson             descriptor = new_descriptor;
217871943a1eSRichard Henderson             goto restart_atomic_update;
217971943a1eSRichard Henderson         }
218071943a1eSRichard Henderson     }
218171943a1eSRichard Henderson 
21822f1ff4e7SRichard Henderson     result->f.attrs.space = out_space;
21832f1ff4e7SRichard Henderson     result->f.attrs.secure = arm_space_is_secure(out_space);
2184937f2245SRichard Henderson 
2185728b923fSRichard Henderson     /*
21863283222aSRichard Henderson      * For FEAT_LPA2 and effective DS, the SH field in the attributes
21873283222aSRichard Henderson      * was re-purposed for output address bits.  The SH attribute in
21883283222aSRichard Henderson      * that case comes from TCR_ELx, which we extracted earlier.
21893283222aSRichard Henderson      */
21903283222aSRichard Henderson     if (param.ds) {
219103ee9bbeSRichard Henderson         result->cacheattrs.shareability = param.sh;
21923283222aSRichard Henderson     } else {
219345666091SRichard Henderson         result->cacheattrs.shareability = extract32(attrs, 8, 2);
21943283222aSRichard Henderson     }
21953283222aSRichard Henderson 
21967fa7ea8fSRichard Henderson     result->f.phys_addr = descaddr;
21977fa7ea8fSRichard Henderson     result->f.lg_page_size = ctz64(page_size);
21983283222aSRichard Henderson     return false;
21993283222aSRichard Henderson 
220027c1b81dSRichard Henderson  do_translation_fault:
220127c1b81dSRichard Henderson     fi->type = ARMFault_Translation;
22023283222aSRichard Henderson  do_fault:
2203a729d636SPeter Maydell     if (fi->s1ptw) {
2204a729d636SPeter Maydell         /* Retain the existing stage 2 fi->level */
2205a729d636SPeter Maydell         assert(fi->stage2);
2206a729d636SPeter Maydell     } else {
22073283222aSRichard Henderson         fi->level = level;
2208a729d636SPeter Maydell         fi->stage2 = regime_is_stage2(mmu_idx);
2209a729d636SPeter Maydell     }
22104f51edd3SPeter Maydell     fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx);
22113283222aSRichard Henderson     return true;
22123283222aSRichard Henderson }
22133283222aSRichard Henderson 
get_phys_addr_pmsav5(CPUARMState * env,S1Translate * ptw,uint32_t address,MMUAccessType access_type,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)2214a5637becSPeter Maydell static bool get_phys_addr_pmsav5(CPUARMState *env,
2215a5637becSPeter Maydell                                  S1Translate *ptw,
2216a5637becSPeter Maydell                                  uint32_t address,
2217a5637becSPeter Maydell                                  MMUAccessType access_type,
2218a5637becSPeter Maydell                                  GetPhysAddrResult *result,
22199a12fb36SRichard Henderson                                  ARMMMUFaultInfo *fi)
22209a12fb36SRichard Henderson {
22219a12fb36SRichard Henderson     int n;
22229a12fb36SRichard Henderson     uint32_t mask;
22239a12fb36SRichard Henderson     uint32_t base;
2224a5637becSPeter Maydell     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
22259a12fb36SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
22269a12fb36SRichard Henderson 
2227d1289140SPeter Maydell     if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
22289a12fb36SRichard Henderson         /* MPU disabled.  */
22297fa7ea8fSRichard Henderson         result->f.phys_addr = address;
22307fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
22319a12fb36SRichard Henderson         return false;
22329a12fb36SRichard Henderson     }
22339a12fb36SRichard Henderson 
22347fa7ea8fSRichard Henderson     result->f.phys_addr = address;
22359a12fb36SRichard Henderson     for (n = 7; n >= 0; n--) {
22369a12fb36SRichard Henderson         base = env->cp15.c6_region[n];
22379a12fb36SRichard Henderson         if ((base & 1) == 0) {
22389a12fb36SRichard Henderson             continue;
22399a12fb36SRichard Henderson         }
22409a12fb36SRichard Henderson         mask = 1 << ((base >> 1) & 0x1f);
22419a12fb36SRichard Henderson         /* Keep this shift separate from the above to avoid an
22429a12fb36SRichard Henderson            (undefined) << 32.  */
22439a12fb36SRichard Henderson         mask = (mask << 1) - 1;
22449a12fb36SRichard Henderson         if (((base ^ address) & ~mask) == 0) {
22459a12fb36SRichard Henderson             break;
22469a12fb36SRichard Henderson         }
22479a12fb36SRichard Henderson     }
22489a12fb36SRichard Henderson     if (n < 0) {
22499a12fb36SRichard Henderson         fi->type = ARMFault_Background;
22509a12fb36SRichard Henderson         return true;
22519a12fb36SRichard Henderson     }
22529a12fb36SRichard Henderson 
22539a12fb36SRichard Henderson     if (access_type == MMU_INST_FETCH) {
22549a12fb36SRichard Henderson         mask = env->cp15.pmsav5_insn_ap;
22559a12fb36SRichard Henderson     } else {
22569a12fb36SRichard Henderson         mask = env->cp15.pmsav5_data_ap;
22579a12fb36SRichard Henderson     }
22589a12fb36SRichard Henderson     mask = (mask >> (n * 4)) & 0xf;
22599a12fb36SRichard Henderson     switch (mask) {
22609a12fb36SRichard Henderson     case 0:
22619a12fb36SRichard Henderson         fi->type = ARMFault_Permission;
22629a12fb36SRichard Henderson         fi->level = 1;
22639a12fb36SRichard Henderson         return true;
22649a12fb36SRichard Henderson     case 1:
22659a12fb36SRichard Henderson         if (is_user) {
22669a12fb36SRichard Henderson             fi->type = ARMFault_Permission;
22679a12fb36SRichard Henderson             fi->level = 1;
22689a12fb36SRichard Henderson             return true;
22699a12fb36SRichard Henderson         }
22707fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE;
22719a12fb36SRichard Henderson         break;
22729a12fb36SRichard Henderson     case 2:
22737fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
22749a12fb36SRichard Henderson         if (!is_user) {
22757fa7ea8fSRichard Henderson             result->f.prot |= PAGE_WRITE;
22769a12fb36SRichard Henderson         }
22779a12fb36SRichard Henderson         break;
22789a12fb36SRichard Henderson     case 3:
22797fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ | PAGE_WRITE;
22809a12fb36SRichard Henderson         break;
22819a12fb36SRichard Henderson     case 5:
22829a12fb36SRichard Henderson         if (is_user) {
22839a12fb36SRichard Henderson             fi->type = ARMFault_Permission;
22849a12fb36SRichard Henderson             fi->level = 1;
22859a12fb36SRichard Henderson             return true;
22869a12fb36SRichard Henderson         }
22877fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
22889a12fb36SRichard Henderson         break;
22899a12fb36SRichard Henderson     case 6:
22907fa7ea8fSRichard Henderson         result->f.prot = PAGE_READ;
22919a12fb36SRichard Henderson         break;
22929a12fb36SRichard Henderson     default:
22939a12fb36SRichard Henderson         /* Bad permission.  */
22949a12fb36SRichard Henderson         fi->type = ARMFault_Permission;
22959a12fb36SRichard Henderson         fi->level = 1;
22969a12fb36SRichard Henderson         return true;
22979a12fb36SRichard Henderson     }
22987fa7ea8fSRichard Henderson     result->f.prot |= PAGE_EXEC;
22999a12fb36SRichard Henderson     return false;
23009a12fb36SRichard Henderson }
23019a12fb36SRichard Henderson 
get_phys_addr_pmsav7_default(CPUARMState * env,ARMMMUIdx mmu_idx,int32_t address,uint8_t * prot)2302fedbaa05SRichard Henderson static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
23037fa7ea8fSRichard Henderson                                          int32_t address, uint8_t *prot)
23047d2e08c9SRichard Henderson {
23057d2e08c9SRichard Henderson     if (!arm_feature(env, ARM_FEATURE_M)) {
23067d2e08c9SRichard Henderson         *prot = PAGE_READ | PAGE_WRITE;
23077d2e08c9SRichard Henderson         switch (address) {
23087d2e08c9SRichard Henderson         case 0xF0000000 ... 0xFFFFFFFF:
23097d2e08c9SRichard Henderson             if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
23107d2e08c9SRichard Henderson                 /* hivecs execing is ok */
23117d2e08c9SRichard Henderson                 *prot |= PAGE_EXEC;
23127d2e08c9SRichard Henderson             }
23137d2e08c9SRichard Henderson             break;
23147d2e08c9SRichard Henderson         case 0x00000000 ... 0x7FFFFFFF:
23157d2e08c9SRichard Henderson             *prot |= PAGE_EXEC;
23167d2e08c9SRichard Henderson             break;
23177d2e08c9SRichard Henderson         }
23187d2e08c9SRichard Henderson     } else {
23197d2e08c9SRichard Henderson         /* Default system address map for M profile cores.
23207d2e08c9SRichard Henderson          * The architecture specifies which regions are execute-never;
23217d2e08c9SRichard Henderson          * at the MPU level no other checks are defined.
23227d2e08c9SRichard Henderson          */
23237d2e08c9SRichard Henderson         switch (address) {
23247d2e08c9SRichard Henderson         case 0x00000000 ... 0x1fffffff: /* ROM */
23257d2e08c9SRichard Henderson         case 0x20000000 ... 0x3fffffff: /* SRAM */
23267d2e08c9SRichard Henderson         case 0x60000000 ... 0x7fffffff: /* RAM */
23277d2e08c9SRichard Henderson         case 0x80000000 ... 0x9fffffff: /* RAM */
23287d2e08c9SRichard Henderson             *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
23297d2e08c9SRichard Henderson             break;
23307d2e08c9SRichard Henderson         case 0x40000000 ... 0x5fffffff: /* Peripheral */
23317d2e08c9SRichard Henderson         case 0xa0000000 ... 0xbfffffff: /* Device */
23327d2e08c9SRichard Henderson         case 0xc0000000 ... 0xdfffffff: /* Device */
23337d2e08c9SRichard Henderson         case 0xe0000000 ... 0xffffffff: /* System */
23347d2e08c9SRichard Henderson             *prot = PAGE_READ | PAGE_WRITE;
23357d2e08c9SRichard Henderson             break;
23367d2e08c9SRichard Henderson         default:
23377d2e08c9SRichard Henderson             g_assert_not_reached();
23387d2e08c9SRichard Henderson         }
23397d2e08c9SRichard Henderson     }
23407d2e08c9SRichard Henderson }
23417d2e08c9SRichard Henderson 
m_is_ppb_region(CPUARMState * env,uint32_t address)234247ff5ba9SRichard Henderson static bool m_is_ppb_region(CPUARMState *env, uint32_t address)
234347ff5ba9SRichard Henderson {
234447ff5ba9SRichard Henderson     /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
234547ff5ba9SRichard Henderson     return arm_feature(env, ARM_FEATURE_M) &&
234647ff5ba9SRichard Henderson         extract32(address, 20, 12) == 0xe00;
234747ff5ba9SRichard Henderson }
234847ff5ba9SRichard Henderson 
m_is_system_region(CPUARMState * env,uint32_t address)234947ff5ba9SRichard Henderson static bool m_is_system_region(CPUARMState *env, uint32_t address)
235047ff5ba9SRichard Henderson {
235147ff5ba9SRichard Henderson     /*
235247ff5ba9SRichard Henderson      * True if address is in the M profile system region
235347ff5ba9SRichard Henderson      * 0xe0000000 - 0xffffffff
235447ff5ba9SRichard Henderson      */
235547ff5ba9SRichard Henderson     return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
235647ff5ba9SRichard Henderson }
235747ff5ba9SRichard Henderson 
pmsav7_use_background_region(ARMCPU * cpu,ARMMMUIdx mmu_idx,bool is_secure,bool is_user)2358c8e436c9SRichard Henderson static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
23591a469cf7SRichard Henderson                                          bool is_secure, bool is_user)
2360c8e436c9SRichard Henderson {
2361c8e436c9SRichard Henderson     /*
2362c8e436c9SRichard Henderson      * Return true if we should use the default memory map as a
2363c8e436c9SRichard Henderson      * "background" region if there are no hits against any MPU regions.
2364c8e436c9SRichard Henderson      */
2365c8e436c9SRichard Henderson     CPUARMState *env = &cpu->env;
2366c8e436c9SRichard Henderson 
2367c8e436c9SRichard Henderson     if (is_user) {
2368c8e436c9SRichard Henderson         return false;
2369c8e436c9SRichard Henderson     }
2370c8e436c9SRichard Henderson 
2371c8e436c9SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M)) {
23721a469cf7SRichard Henderson         return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
2373c8e436c9SRichard Henderson     }
2374fca45e34STobias Röhmel 
2375fca45e34STobias Röhmel     if (mmu_idx == ARMMMUIdx_Stage2) {
2376fca45e34STobias Röhmel         return false;
2377fca45e34STobias Röhmel     }
2378fca45e34STobias Röhmel 
2379fca45e34STobias Röhmel     return regime_sctlr(env, mmu_idx) & SCTLR_BR;
2380c8e436c9SRichard Henderson }
2381c8e436c9SRichard Henderson 
get_phys_addr_pmsav7(CPUARMState * env,S1Translate * ptw,uint32_t address,MMUAccessType access_type,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)2382a5637becSPeter Maydell static bool get_phys_addr_pmsav7(CPUARMState *env,
2383a5637becSPeter Maydell                                  S1Translate *ptw,
2384a5637becSPeter Maydell                                  uint32_t address,
2385a5637becSPeter Maydell                                  MMUAccessType access_type,
2386a5637becSPeter Maydell                                  GetPhysAddrResult *result,
23871f2e87e5SRichard Henderson                                  ARMMMUFaultInfo *fi)
23881f2e87e5SRichard Henderson {
23891f2e87e5SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
23901f2e87e5SRichard Henderson     int n;
2391a5637becSPeter Maydell     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
23921f2e87e5SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
2393a5637becSPeter Maydell     bool secure = arm_space_is_secure(ptw->in_space);
23941f2e87e5SRichard Henderson 
23957fa7ea8fSRichard Henderson     result->f.phys_addr = address;
23967fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
23977fa7ea8fSRichard Henderson     result->f.prot = 0;
23981f2e87e5SRichard Henderson 
2399d1289140SPeter Maydell     if (regime_translation_disabled(env, mmu_idx, ptw->in_space) ||
24001f2e87e5SRichard Henderson         m_is_ppb_region(env, address)) {
24011f2e87e5SRichard Henderson         /*
24021f2e87e5SRichard Henderson          * MPU disabled or M profile PPB access: use default memory map.
24031f2e87e5SRichard Henderson          * The other case which uses the default memory map in the
24041f2e87e5SRichard Henderson          * v7M ARM ARM pseudocode is exception vector reads from the vector
24051f2e87e5SRichard Henderson          * table. In QEMU those accesses are done in arm_v7m_load_vector(),
24061f2e87e5SRichard Henderson          * which always does a direct read using address_space_ldl(), rather
24071f2e87e5SRichard Henderson          * than going via this function, so we don't need to check that here.
24081f2e87e5SRichard Henderson          */
24097fa7ea8fSRichard Henderson         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
24101f2e87e5SRichard Henderson     } else { /* MPU enabled */
24111f2e87e5SRichard Henderson         for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
24121f2e87e5SRichard Henderson             /* region search */
24131f2e87e5SRichard Henderson             uint32_t base = env->pmsav7.drbar[n];
24141f2e87e5SRichard Henderson             uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
24151f2e87e5SRichard Henderson             uint32_t rmask;
24161f2e87e5SRichard Henderson             bool srdis = false;
24171f2e87e5SRichard Henderson 
24181f2e87e5SRichard Henderson             if (!(env->pmsav7.drsr[n] & 0x1)) {
24191f2e87e5SRichard Henderson                 continue;
24201f2e87e5SRichard Henderson             }
24211f2e87e5SRichard Henderson 
24221f2e87e5SRichard Henderson             if (!rsize) {
24231f2e87e5SRichard Henderson                 qemu_log_mask(LOG_GUEST_ERROR,
24241f2e87e5SRichard Henderson                               "DRSR[%d]: Rsize field cannot be 0\n", n);
24251f2e87e5SRichard Henderson                 continue;
24261f2e87e5SRichard Henderson             }
24271f2e87e5SRichard Henderson             rsize++;
24281f2e87e5SRichard Henderson             rmask = (1ull << rsize) - 1;
24291f2e87e5SRichard Henderson 
24301f2e87e5SRichard Henderson             if (base & rmask) {
24311f2e87e5SRichard Henderson                 qemu_log_mask(LOG_GUEST_ERROR,
24321f2e87e5SRichard Henderson                               "DRBAR[%d]: 0x%" PRIx32 " misaligned "
24331f2e87e5SRichard Henderson                               "to DRSR region size, mask = 0x%" PRIx32 "\n",
24341f2e87e5SRichard Henderson                               n, base, rmask);
24351f2e87e5SRichard Henderson                 continue;
24361f2e87e5SRichard Henderson             }
24371f2e87e5SRichard Henderson 
24381f2e87e5SRichard Henderson             if (address < base || address > base + rmask) {
24391f2e87e5SRichard Henderson                 /*
24401f2e87e5SRichard Henderson                  * Address not in this region. We must check whether the
24411f2e87e5SRichard Henderson                  * region covers addresses in the same page as our address.
24421f2e87e5SRichard Henderson                  * In that case we must not report a size that covers the
24431f2e87e5SRichard Henderson                  * whole page for a subsequent hit against a different MPU
24441f2e87e5SRichard Henderson                  * region or the background region, because it would result in
24451f2e87e5SRichard Henderson                  * incorrect TLB hits for subsequent accesses to addresses that
24461f2e87e5SRichard Henderson                  * are in this MPU region.
24471f2e87e5SRichard Henderson                  */
24481f2e87e5SRichard Henderson                 if (ranges_overlap(base, rmask,
24491f2e87e5SRichard Henderson                                    address & TARGET_PAGE_MASK,
24501f2e87e5SRichard Henderson                                    TARGET_PAGE_SIZE)) {
24517fa7ea8fSRichard Henderson                     result->f.lg_page_size = 0;
24521f2e87e5SRichard Henderson                 }
24531f2e87e5SRichard Henderson                 continue;
24541f2e87e5SRichard Henderson             }
24551f2e87e5SRichard Henderson 
24561f2e87e5SRichard Henderson             /* Region matched */
24571f2e87e5SRichard Henderson 
24581f2e87e5SRichard Henderson             if (rsize >= 8) { /* no subregions for regions < 256 bytes */
24591f2e87e5SRichard Henderson                 int i, snd;
24601f2e87e5SRichard Henderson                 uint32_t srdis_mask;
24611f2e87e5SRichard Henderson 
24621f2e87e5SRichard Henderson                 rsize -= 3; /* sub region size (power of 2) */
24631f2e87e5SRichard Henderson                 snd = ((address - base) >> rsize) & 0x7;
24641f2e87e5SRichard Henderson                 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
24651f2e87e5SRichard Henderson 
24661f2e87e5SRichard Henderson                 srdis_mask = srdis ? 0x3 : 0x0;
24671f2e87e5SRichard Henderson                 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
24681f2e87e5SRichard Henderson                     /*
24691f2e87e5SRichard Henderson                      * This will check in groups of 2, 4 and then 8, whether
24701f2e87e5SRichard Henderson                      * the subregion bits are consistent. rsize is incremented
24711f2e87e5SRichard Henderson                      * back up to give the region size, considering consistent
24721f2e87e5SRichard Henderson                      * adjacent subregions as one region. Stop testing if rsize
24731f2e87e5SRichard Henderson                      * is already big enough for an entire QEMU page.
24741f2e87e5SRichard Henderson                      */
24751f2e87e5SRichard Henderson                     int snd_rounded = snd & ~(i - 1);
24761f2e87e5SRichard Henderson                     uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
24771f2e87e5SRichard Henderson                                                      snd_rounded + 8, i);
24781f2e87e5SRichard Henderson                     if (srdis_mask ^ srdis_multi) {
24791f2e87e5SRichard Henderson                         break;
24801f2e87e5SRichard Henderson                     }
24811f2e87e5SRichard Henderson                     srdis_mask = (srdis_mask << i) | srdis_mask;
24821f2e87e5SRichard Henderson                     rsize++;
24831f2e87e5SRichard Henderson                 }
24841f2e87e5SRichard Henderson             }
24851f2e87e5SRichard Henderson             if (srdis) {
24861f2e87e5SRichard Henderson                 continue;
24871f2e87e5SRichard Henderson             }
24881f2e87e5SRichard Henderson             if (rsize < TARGET_PAGE_BITS) {
24897fa7ea8fSRichard Henderson                 result->f.lg_page_size = rsize;
24901f2e87e5SRichard Henderson             }
24911f2e87e5SRichard Henderson             break;
24921f2e87e5SRichard Henderson         }
24931f2e87e5SRichard Henderson 
24941f2e87e5SRichard Henderson         if (n == -1) { /* no hits */
24951a469cf7SRichard Henderson             if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
24961f2e87e5SRichard Henderson                 /* background fault */
24971f2e87e5SRichard Henderson                 fi->type = ARMFault_Background;
24981f2e87e5SRichard Henderson                 return true;
24991f2e87e5SRichard Henderson             }
25007fa7ea8fSRichard Henderson             get_phys_addr_pmsav7_default(env, mmu_idx, address,
25017fa7ea8fSRichard Henderson                                          &result->f.prot);
25021f2e87e5SRichard Henderson         } else { /* a MPU hit! */
25031f2e87e5SRichard Henderson             uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
25041f2e87e5SRichard Henderson             uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
25051f2e87e5SRichard Henderson 
25061f2e87e5SRichard Henderson             if (m_is_system_region(env, address)) {
25071f2e87e5SRichard Henderson                 /* System space is always execute never */
25081f2e87e5SRichard Henderson                 xn = 1;
25091f2e87e5SRichard Henderson             }
25101f2e87e5SRichard Henderson 
25111f2e87e5SRichard Henderson             if (is_user) { /* User mode AP bit decoding */
25121f2e87e5SRichard Henderson                 switch (ap) {
25131f2e87e5SRichard Henderson                 case 0:
25141f2e87e5SRichard Henderson                 case 1:
25151f2e87e5SRichard Henderson                 case 5:
25161f2e87e5SRichard Henderson                     break; /* no access */
25171f2e87e5SRichard Henderson                 case 3:
25187fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_WRITE;
25191f2e87e5SRichard Henderson                     /* fall through */
25201f2e87e5SRichard Henderson                 case 2:
25211f2e87e5SRichard Henderson                 case 6:
25227fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_READ | PAGE_EXEC;
25231f2e87e5SRichard Henderson                     break;
25241f2e87e5SRichard Henderson                 case 7:
25251f2e87e5SRichard Henderson                     /* for v7M, same as 6; for R profile a reserved value */
25261f2e87e5SRichard Henderson                     if (arm_feature(env, ARM_FEATURE_M)) {
25277fa7ea8fSRichard Henderson                         result->f.prot |= PAGE_READ | PAGE_EXEC;
25281f2e87e5SRichard Henderson                         break;
25291f2e87e5SRichard Henderson                     }
25301f2e87e5SRichard Henderson                     /* fall through */
25311f2e87e5SRichard Henderson                 default:
25321f2e87e5SRichard Henderson                     qemu_log_mask(LOG_GUEST_ERROR,
25331f2e87e5SRichard Henderson                                   "DRACR[%d]: Bad value for AP bits: 0x%"
25341f2e87e5SRichard Henderson                                   PRIx32 "\n", n, ap);
25351f2e87e5SRichard Henderson                 }
25361f2e87e5SRichard Henderson             } else { /* Priv. mode AP bits decoding */
25371f2e87e5SRichard Henderson                 switch (ap) {
25381f2e87e5SRichard Henderson                 case 0:
25391f2e87e5SRichard Henderson                     break; /* no access */
25401f2e87e5SRichard Henderson                 case 1:
25411f2e87e5SRichard Henderson                 case 2:
25421f2e87e5SRichard Henderson                 case 3:
25437fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_WRITE;
25441f2e87e5SRichard Henderson                     /* fall through */
25451f2e87e5SRichard Henderson                 case 5:
25461f2e87e5SRichard Henderson                 case 6:
25477fa7ea8fSRichard Henderson                     result->f.prot |= PAGE_READ | PAGE_EXEC;
25481f2e87e5SRichard Henderson                     break;
25491f2e87e5SRichard Henderson                 case 7:
25501f2e87e5SRichard Henderson                     /* for v7M, same as 6; for R profile a reserved value */
25511f2e87e5SRichard Henderson                     if (arm_feature(env, ARM_FEATURE_M)) {
25527fa7ea8fSRichard Henderson                         result->f.prot |= PAGE_READ | PAGE_EXEC;
25531f2e87e5SRichard Henderson                         break;
25541f2e87e5SRichard Henderson                     }
25551f2e87e5SRichard Henderson                     /* fall through */
25561f2e87e5SRichard Henderson                 default:
25571f2e87e5SRichard Henderson                     qemu_log_mask(LOG_GUEST_ERROR,
25581f2e87e5SRichard Henderson                                   "DRACR[%d]: Bad value for AP bits: 0x%"
25591f2e87e5SRichard Henderson                                   PRIx32 "\n", n, ap);
25601f2e87e5SRichard Henderson                 }
25611f2e87e5SRichard Henderson             }
25621f2e87e5SRichard Henderson 
25631f2e87e5SRichard Henderson             /* execute never */
25641f2e87e5SRichard Henderson             if (xn) {
25657fa7ea8fSRichard Henderson                 result->f.prot &= ~PAGE_EXEC;
25661f2e87e5SRichard Henderson             }
25671f2e87e5SRichard Henderson         }
25681f2e87e5SRichard Henderson     }
25691f2e87e5SRichard Henderson 
25701f2e87e5SRichard Henderson     fi->type = ARMFault_Permission;
25711f2e87e5SRichard Henderson     fi->level = 1;
25727fa7ea8fSRichard Henderson     return !(result->f.prot & (1 << access_type));
25731f2e87e5SRichard Henderson }
25741f2e87e5SRichard Henderson 
regime_rbar(CPUARMState * env,ARMMMUIdx mmu_idx,uint32_t secure)2575fca45e34STobias Röhmel static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
2576fca45e34STobias Röhmel                              uint32_t secure)
2577fca45e34STobias Röhmel {
2578fca45e34STobias Röhmel     if (regime_el(env, mmu_idx) == 2) {
2579fca45e34STobias Röhmel         return env->pmsav8.hprbar;
2580fca45e34STobias Röhmel     } else {
2581fca45e34STobias Röhmel         return env->pmsav8.rbar[secure];
2582fca45e34STobias Röhmel     }
2583fca45e34STobias Röhmel }
2584fca45e34STobias Röhmel 
regime_rlar(CPUARMState * env,ARMMMUIdx mmu_idx,uint32_t secure)2585fca45e34STobias Röhmel static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
2586fca45e34STobias Röhmel                              uint32_t secure)
2587fca45e34STobias Röhmel {
2588fca45e34STobias Röhmel     if (regime_el(env, mmu_idx) == 2) {
2589fca45e34STobias Röhmel         return env->pmsav8.hprlar;
2590fca45e34STobias Röhmel     } else {
2591fca45e34STobias Röhmel         return env->pmsav8.rlar[secure];
2592fca45e34STobias Röhmel     }
2593fca45e34STobias Röhmel }
2594fca45e34STobias Röhmel 
pmsav8_mpu_lookup(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,bool secure,GetPhysAddrResult * result,ARMMMUFaultInfo * fi,uint32_t * mregion)2595fedbaa05SRichard Henderson bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
2596fedbaa05SRichard Henderson                        MMUAccessType access_type, ARMMMUIdx mmu_idx,
2597e9fb7090SRichard Henderson                        bool secure, GetPhysAddrResult *result,
2598e9fb7090SRichard Henderson                        ARMMMUFaultInfo *fi, uint32_t *mregion)
2599fedbaa05SRichard Henderson {
2600fedbaa05SRichard Henderson     /*
2601fedbaa05SRichard Henderson      * Perform a PMSAv8 MPU lookup (without also doing the SAU check
2602fedbaa05SRichard Henderson      * that a full phys-to-virt translation does).
2603fedbaa05SRichard Henderson      * mregion is (if not NULL) set to the region number which matched,
2604fedbaa05SRichard Henderson      * or -1 if no region number is returned (MPU off, address did not
2605fedbaa05SRichard Henderson      * hit a region, address hit in multiple regions).
2606652c750eSRichard Henderson      * If the region hit doesn't cover the entire TARGET_PAGE the address
2607652c750eSRichard Henderson      * is within, then we set the result page_size to 1 to force the
2608652c750eSRichard Henderson      * memory system to use a subpage.
2609fedbaa05SRichard Henderson      */
2610fedbaa05SRichard Henderson     ARMCPU *cpu = env_archcpu(env);
2611fedbaa05SRichard Henderson     bool is_user = regime_is_user(env, mmu_idx);
2612fedbaa05SRichard Henderson     int n;
2613fedbaa05SRichard Henderson     int matchregion = -1;
2614fedbaa05SRichard Henderson     bool hit = false;
2615fedbaa05SRichard Henderson     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
2616fedbaa05SRichard Henderson     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
2617fca45e34STobias Röhmel     int region_counter;
2618fca45e34STobias Röhmel 
2619fca45e34STobias Röhmel     if (regime_el(env, mmu_idx) == 2) {
2620fca45e34STobias Röhmel         region_counter = cpu->pmsav8r_hdregion;
2621fca45e34STobias Röhmel     } else {
2622fca45e34STobias Röhmel         region_counter = cpu->pmsav7_dregion;
2623fca45e34STobias Röhmel     }
2624fedbaa05SRichard Henderson 
26257fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
26267fa7ea8fSRichard Henderson     result->f.phys_addr = address;
26277fa7ea8fSRichard Henderson     result->f.prot = 0;
2628fedbaa05SRichard Henderson     if (mregion) {
2629fedbaa05SRichard Henderson         *mregion = -1;
2630fedbaa05SRichard Henderson     }
2631fedbaa05SRichard Henderson 
2632fca45e34STobias Röhmel     if (mmu_idx == ARMMMUIdx_Stage2) {
2633fca45e34STobias Röhmel         fi->stage2 = true;
2634fca45e34STobias Röhmel     }
2635fca45e34STobias Röhmel 
2636fedbaa05SRichard Henderson     /*
2637fedbaa05SRichard Henderson      * Unlike the ARM ARM pseudocode, we don't need to check whether this
2638fedbaa05SRichard Henderson      * was an exception vector read from the vector table (which is always
2639fedbaa05SRichard Henderson      * done using the default system address map), because those accesses
2640fedbaa05SRichard Henderson      * are done in arm_v7m_load_vector(), which always does a direct
2641fedbaa05SRichard Henderson      * read using address_space_ldl(), rather than going via this function.
2642fedbaa05SRichard Henderson      */
2643d1289140SPeter Maydell     if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) {
2644d1289140SPeter Maydell         /* MPU disabled */
2645fedbaa05SRichard Henderson         hit = true;
2646fedbaa05SRichard Henderson     } else if (m_is_ppb_region(env, address)) {
2647fedbaa05SRichard Henderson         hit = true;
2648fedbaa05SRichard Henderson     } else {
26491a469cf7SRichard Henderson         if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
2650fedbaa05SRichard Henderson             hit = true;
2651fedbaa05SRichard Henderson         }
2652fedbaa05SRichard Henderson 
2653fca45e34STobias Röhmel         uint32_t bitmask;
2654fca45e34STobias Röhmel         if (arm_feature(env, ARM_FEATURE_M)) {
2655fca45e34STobias Röhmel             bitmask = 0x1f;
2656fca45e34STobias Röhmel         } else {
2657fca45e34STobias Röhmel             bitmask = 0x3f;
2658fca45e34STobias Röhmel             fi->level = 0;
2659fca45e34STobias Röhmel         }
2660fca45e34STobias Röhmel 
2661fca45e34STobias Röhmel         for (n = region_counter - 1; n >= 0; n--) {
2662fedbaa05SRichard Henderson             /* region search */
2663fedbaa05SRichard Henderson             /*
2664fca45e34STobias Röhmel              * Note that the base address is bits [31:x] from the register
2665fca45e34STobias Röhmel              * with bits [x-1:0] all zeroes, but the limit address is bits
2666fca45e34STobias Röhmel              * [31:x] from the register with bits [x:0] all ones. Where x is
2667fca45e34STobias Röhmel              * 5 for Cortex-M and 6 for Cortex-R
2668fedbaa05SRichard Henderson              */
2669fca45e34STobias Röhmel             uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask;
2670fca45e34STobias Röhmel             uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask;
2671fedbaa05SRichard Henderson 
2672fca45e34STobias Röhmel             if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) {
2673fedbaa05SRichard Henderson                 /* Region disabled */
2674fedbaa05SRichard Henderson                 continue;
2675fedbaa05SRichard Henderson             }
2676fedbaa05SRichard Henderson 
2677fedbaa05SRichard Henderson             if (address < base || address > limit) {
2678fedbaa05SRichard Henderson                 /*
2679fedbaa05SRichard Henderson                  * Address not in this region. We must check whether the
2680fedbaa05SRichard Henderson                  * region covers addresses in the same page as our address.
2681fedbaa05SRichard Henderson                  * In that case we must not report a size that covers the
2682fedbaa05SRichard Henderson                  * whole page for a subsequent hit against a different MPU
2683fedbaa05SRichard Henderson                  * region or the background region, because it would result in
2684fedbaa05SRichard Henderson                  * incorrect TLB hits for subsequent accesses to addresses that
2685fedbaa05SRichard Henderson                  * are in this MPU region.
2686fedbaa05SRichard Henderson                  */
2687fedbaa05SRichard Henderson                 if (limit >= base &&
2688fedbaa05SRichard Henderson                     ranges_overlap(base, limit - base + 1,
2689fedbaa05SRichard Henderson                                    addr_page_base,
2690fedbaa05SRichard Henderson                                    TARGET_PAGE_SIZE)) {
26917fa7ea8fSRichard Henderson                     result->f.lg_page_size = 0;
2692fedbaa05SRichard Henderson                 }
2693fedbaa05SRichard Henderson                 continue;
2694fedbaa05SRichard Henderson             }
2695fedbaa05SRichard Henderson 
2696fedbaa05SRichard Henderson             if (base > addr_page_base || limit < addr_page_limit) {
26977fa7ea8fSRichard Henderson                 result->f.lg_page_size = 0;
2698fedbaa05SRichard Henderson             }
2699fedbaa05SRichard Henderson 
2700fedbaa05SRichard Henderson             if (matchregion != -1) {
2701fedbaa05SRichard Henderson                 /*
2702fedbaa05SRichard Henderson                  * Multiple regions match -- always a failure (unlike
2703fedbaa05SRichard Henderson                  * PMSAv7 where highest-numbered-region wins)
2704fedbaa05SRichard Henderson                  */
2705fedbaa05SRichard Henderson                 fi->type = ARMFault_Permission;
2706fca45e34STobias Röhmel                 if (arm_feature(env, ARM_FEATURE_M)) {
2707fedbaa05SRichard Henderson                     fi->level = 1;
2708fca45e34STobias Röhmel                 }
2709fedbaa05SRichard Henderson                 return true;
2710fedbaa05SRichard Henderson             }
2711fedbaa05SRichard Henderson 
2712fedbaa05SRichard Henderson             matchregion = n;
2713fedbaa05SRichard Henderson             hit = true;
2714fedbaa05SRichard Henderson         }
2715fedbaa05SRichard Henderson     }
2716fedbaa05SRichard Henderson 
2717fedbaa05SRichard Henderson     if (!hit) {
2718fca45e34STobias Röhmel         if (arm_feature(env, ARM_FEATURE_M)) {
2719fedbaa05SRichard Henderson             fi->type = ARMFault_Background;
2720fca45e34STobias Röhmel         } else {
2721fca45e34STobias Röhmel             fi->type = ARMFault_Permission;
2722fca45e34STobias Röhmel         }
2723fedbaa05SRichard Henderson         return true;
2724fedbaa05SRichard Henderson     }
2725fedbaa05SRichard Henderson 
2726fedbaa05SRichard Henderson     if (matchregion == -1) {
2727fedbaa05SRichard Henderson         /* hit using the background region */
27287fa7ea8fSRichard Henderson         get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot);
2729fedbaa05SRichard Henderson     } else {
2730fca45e34STobias Röhmel         uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion];
2731fca45e34STobias Röhmel         uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion];
2732fca45e34STobias Röhmel         uint32_t ap = extract32(matched_rbar, 1, 2);
2733fca45e34STobias Röhmel         uint32_t xn = extract32(matched_rbar, 0, 1);
2734fedbaa05SRichard Henderson         bool pxn = false;
2735fedbaa05SRichard Henderson 
2736fedbaa05SRichard Henderson         if (arm_feature(env, ARM_FEATURE_V8_1M)) {
2737fca45e34STobias Röhmel             pxn = extract32(matched_rlar, 4, 1);
2738fedbaa05SRichard Henderson         }
2739fedbaa05SRichard Henderson 
2740fedbaa05SRichard Henderson         if (m_is_system_region(env, address)) {
2741fedbaa05SRichard Henderson             /* System space is always execute never */
2742fedbaa05SRichard Henderson             xn = 1;
2743fedbaa05SRichard Henderson         }
2744fedbaa05SRichard Henderson 
2745fca45e34STobias Röhmel         if (regime_el(env, mmu_idx) == 2) {
2746fca45e34STobias Röhmel             result->f.prot = simple_ap_to_rw_prot_is_user(ap,
2747fca45e34STobias Röhmel                                             mmu_idx != ARMMMUIdx_E2);
2748fca45e34STobias Röhmel         } else {
27497fa7ea8fSRichard Henderson             result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
2750fca45e34STobias Röhmel         }
2751fca45e34STobias Röhmel 
2752fca45e34STobias Röhmel         if (!arm_feature(env, ARM_FEATURE_M)) {
2753fca45e34STobias Röhmel             uint8_t attrindx = extract32(matched_rlar, 1, 3);
2754fca45e34STobias Röhmel             uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
2755fca45e34STobias Röhmel             uint8_t sh = extract32(matched_rlar, 3, 2);
2756fca45e34STobias Röhmel 
2757fca45e34STobias Röhmel             if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
2758fca45e34STobias Röhmel                 result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) {
2759fca45e34STobias Röhmel                 xn = 0x1;
2760fca45e34STobias Röhmel             }
2761fca45e34STobias Röhmel 
2762fca45e34STobias Röhmel             if ((regime_el(env, mmu_idx) == 1) &&
2763fca45e34STobias Röhmel                 regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
2764fca45e34STobias Röhmel                 pxn = 0x1;
2765fca45e34STobias Röhmel             }
2766fca45e34STobias Röhmel 
2767fca45e34STobias Röhmel             result->cacheattrs.is_s2_format = false;
2768fca45e34STobias Röhmel             result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
2769fca45e34STobias Röhmel             result->cacheattrs.shareability = sh;
2770fca45e34STobias Röhmel         }
2771fca45e34STobias Röhmel 
27727fa7ea8fSRichard Henderson         if (result->f.prot && !xn && !(pxn && !is_user)) {
27737fa7ea8fSRichard Henderson             result->f.prot |= PAGE_EXEC;
2774fedbaa05SRichard Henderson         }
2775fca45e34STobias Röhmel 
2776fedbaa05SRichard Henderson         if (mregion) {
2777fedbaa05SRichard Henderson             *mregion = matchregion;
2778fedbaa05SRichard Henderson         }
2779fedbaa05SRichard Henderson     }
2780fedbaa05SRichard Henderson 
2781fedbaa05SRichard Henderson     fi->type = ARMFault_Permission;
2782fca45e34STobias Röhmel     if (arm_feature(env, ARM_FEATURE_M)) {
2783fedbaa05SRichard Henderson         fi->level = 1;
2784fca45e34STobias Röhmel     }
27857fa7ea8fSRichard Henderson     return !(result->f.prot & (1 << access_type));
2786fedbaa05SRichard Henderson }
2787fedbaa05SRichard Henderson 
v8m_is_sau_exempt(CPUARMState * env,uint32_t address,MMUAccessType access_type)27882c1f429dSRichard Henderson static bool v8m_is_sau_exempt(CPUARMState *env,
27892c1f429dSRichard Henderson                               uint32_t address, MMUAccessType access_type)
27902c1f429dSRichard Henderson {
27912c1f429dSRichard Henderson     /*
27922c1f429dSRichard Henderson      * The architecture specifies that certain address ranges are
27932c1f429dSRichard Henderson      * exempt from v8M SAU/IDAU checks.
27942c1f429dSRichard Henderson      */
27952c1f429dSRichard Henderson     return
27962c1f429dSRichard Henderson         (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
27972c1f429dSRichard Henderson         (address >= 0xe0000000 && address <= 0xe0002fff) ||
27982c1f429dSRichard Henderson         (address >= 0xe000e000 && address <= 0xe000efff) ||
27992c1f429dSRichard Henderson         (address >= 0xe002e000 && address <= 0xe002efff) ||
28002c1f429dSRichard Henderson         (address >= 0xe0040000 && address <= 0xe0041fff) ||
28012c1f429dSRichard Henderson         (address >= 0xe00ff000 && address <= 0xe00fffff);
28022c1f429dSRichard Henderson }
28032c1f429dSRichard Henderson 
v8m_security_lookup(CPUARMState * env,uint32_t address,MMUAccessType access_type,ARMMMUIdx mmu_idx,bool is_secure,V8M_SAttributes * sattrs)28042c1f429dSRichard Henderson void v8m_security_lookup(CPUARMState *env, uint32_t address,
28052c1f429dSRichard Henderson                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
2806dbf2a71aSRichard Henderson                          bool is_secure, V8M_SAttributes *sattrs)
28072c1f429dSRichard Henderson {
28082c1f429dSRichard Henderson     /*
28092c1f429dSRichard Henderson      * Look up the security attributes for this address. Compare the
28102c1f429dSRichard Henderson      * pseudocode SecurityCheck() function.
28112c1f429dSRichard Henderson      * We assume the caller has zero-initialized *sattrs.
28122c1f429dSRichard Henderson      */
28132c1f429dSRichard Henderson     ARMCPU *cpu = env_archcpu(env);
28142c1f429dSRichard Henderson     int r;
28152c1f429dSRichard Henderson     bool idau_exempt = false, idau_ns = true, idau_nsc = true;
28162c1f429dSRichard Henderson     int idau_region = IREGION_NOTVALID;
28172c1f429dSRichard Henderson     uint32_t addr_page_base = address & TARGET_PAGE_MASK;
28182c1f429dSRichard Henderson     uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
28192c1f429dSRichard Henderson 
28202c1f429dSRichard Henderson     if (cpu->idau) {
28212c1f429dSRichard Henderson         IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
28222c1f429dSRichard Henderson         IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
28232c1f429dSRichard Henderson 
28242c1f429dSRichard Henderson         iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
28252c1f429dSRichard Henderson                    &idau_nsc);
28262c1f429dSRichard Henderson     }
28272c1f429dSRichard Henderson 
28282c1f429dSRichard Henderson     if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
28292c1f429dSRichard Henderson         /* 0xf0000000..0xffffffff is always S for insn fetches */
28302c1f429dSRichard Henderson         return;
28312c1f429dSRichard Henderson     }
28322c1f429dSRichard Henderson 
28332c1f429dSRichard Henderson     if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
2834dbf2a71aSRichard Henderson         sattrs->ns = !is_secure;
28352c1f429dSRichard Henderson         return;
28362c1f429dSRichard Henderson     }
28372c1f429dSRichard Henderson 
28382c1f429dSRichard Henderson     if (idau_region != IREGION_NOTVALID) {
28392c1f429dSRichard Henderson         sattrs->irvalid = true;
28402c1f429dSRichard Henderson         sattrs->iregion = idau_region;
28412c1f429dSRichard Henderson     }
28422c1f429dSRichard Henderson 
28432c1f429dSRichard Henderson     switch (env->sau.ctrl & 3) {
28442c1f429dSRichard Henderson     case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
28452c1f429dSRichard Henderson         break;
28462c1f429dSRichard Henderson     case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
28472c1f429dSRichard Henderson         sattrs->ns = true;
28482c1f429dSRichard Henderson         break;
28492c1f429dSRichard Henderson     default: /* SAU.ENABLE == 1 */
28502c1f429dSRichard Henderson         for (r = 0; r < cpu->sau_sregion; r++) {
28512c1f429dSRichard Henderson             if (env->sau.rlar[r] & 1) {
28522c1f429dSRichard Henderson                 uint32_t base = env->sau.rbar[r] & ~0x1f;
28532c1f429dSRichard Henderson                 uint32_t limit = env->sau.rlar[r] | 0x1f;
28542c1f429dSRichard Henderson 
28552c1f429dSRichard Henderson                 if (base <= address && limit >= address) {
28562c1f429dSRichard Henderson                     if (base > addr_page_base || limit < addr_page_limit) {
28572c1f429dSRichard Henderson                         sattrs->subpage = true;
28582c1f429dSRichard Henderson                     }
28592c1f429dSRichard Henderson                     if (sattrs->srvalid) {
28602c1f429dSRichard Henderson                         /*
28612c1f429dSRichard Henderson                          * If we hit in more than one region then we must report
28622c1f429dSRichard Henderson                          * as Secure, not NS-Callable, with no valid region
28632c1f429dSRichard Henderson                          * number info.
28642c1f429dSRichard Henderson                          */
28652c1f429dSRichard Henderson                         sattrs->ns = false;
28662c1f429dSRichard Henderson                         sattrs->nsc = false;
28672c1f429dSRichard Henderson                         sattrs->sregion = 0;
28682c1f429dSRichard Henderson                         sattrs->srvalid = false;
28692c1f429dSRichard Henderson                         break;
28702c1f429dSRichard Henderson                     } else {
28712c1f429dSRichard Henderson                         if (env->sau.rlar[r] & 2) {
28722c1f429dSRichard Henderson                             sattrs->nsc = true;
28732c1f429dSRichard Henderson                         } else {
28742c1f429dSRichard Henderson                             sattrs->ns = true;
28752c1f429dSRichard Henderson                         }
28762c1f429dSRichard Henderson                         sattrs->srvalid = true;
28772c1f429dSRichard Henderson                         sattrs->sregion = r;
28782c1f429dSRichard Henderson                     }
28792c1f429dSRichard Henderson                 } else {
28802c1f429dSRichard Henderson                     /*
28812c1f429dSRichard Henderson                      * Address not in this region. We must check whether the
28822c1f429dSRichard Henderson                      * region covers addresses in the same page as our address.
28832c1f429dSRichard Henderson                      * In that case we must not report a size that covers the
28842c1f429dSRichard Henderson                      * whole page for a subsequent hit against a different MPU
28852c1f429dSRichard Henderson                      * region or the background region, because it would result
28862c1f429dSRichard Henderson                      * in incorrect TLB hits for subsequent accesses to
28872c1f429dSRichard Henderson                      * addresses that are in this MPU region.
28882c1f429dSRichard Henderson                      */
28892c1f429dSRichard Henderson                     if (limit >= base &&
28902c1f429dSRichard Henderson                         ranges_overlap(base, limit - base + 1,
28912c1f429dSRichard Henderson                                        addr_page_base,
28922c1f429dSRichard Henderson                                        TARGET_PAGE_SIZE)) {
28932c1f429dSRichard Henderson                         sattrs->subpage = true;
28942c1f429dSRichard Henderson                     }
28952c1f429dSRichard Henderson                 }
28962c1f429dSRichard Henderson             }
28972c1f429dSRichard Henderson         }
28982c1f429dSRichard Henderson         break;
28992c1f429dSRichard Henderson     }
29002c1f429dSRichard Henderson 
29012c1f429dSRichard Henderson     /*
29022c1f429dSRichard Henderson      * The IDAU will override the SAU lookup results if it specifies
29032c1f429dSRichard Henderson      * higher security than the SAU does.
29042c1f429dSRichard Henderson      */
29052c1f429dSRichard Henderson     if (!idau_ns) {
29062c1f429dSRichard Henderson         if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
29072c1f429dSRichard Henderson             sattrs->ns = false;
29082c1f429dSRichard Henderson             sattrs->nsc = idau_nsc;
29092c1f429dSRichard Henderson         }
29102c1f429dSRichard Henderson     }
29112c1f429dSRichard Henderson }
29122c1f429dSRichard Henderson 
get_phys_addr_pmsav8(CPUARMState * env,S1Translate * ptw,uint32_t address,MMUAccessType access_type,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)2913a5637becSPeter Maydell static bool get_phys_addr_pmsav8(CPUARMState *env,
2914a5637becSPeter Maydell                                  S1Translate *ptw,
2915a5637becSPeter Maydell                                  uint32_t address,
2916a5637becSPeter Maydell                                  MMUAccessType access_type,
2917a5637becSPeter Maydell                                  GetPhysAddrResult *result,
2918730d5c31SRichard Henderson                                  ARMMMUFaultInfo *fi)
2919730d5c31SRichard Henderson {
2920730d5c31SRichard Henderson     V8M_SAttributes sattrs = {};
2921a5637becSPeter Maydell     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
2922a5637becSPeter Maydell     bool secure = arm_space_is_secure(ptw->in_space);
2923730d5c31SRichard Henderson     bool ret;
2924730d5c31SRichard Henderson 
2925730d5c31SRichard Henderson     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2926dbf2a71aSRichard Henderson         v8m_security_lookup(env, address, access_type, mmu_idx,
2927dbf2a71aSRichard Henderson                             secure, &sattrs);
2928730d5c31SRichard Henderson         if (access_type == MMU_INST_FETCH) {
2929730d5c31SRichard Henderson             /*
2930730d5c31SRichard Henderson              * Instruction fetches always use the MMU bank and the
2931730d5c31SRichard Henderson              * transaction attribute determined by the fetch address,
2932730d5c31SRichard Henderson              * regardless of CPU state. This is painful for QEMU
2933730d5c31SRichard Henderson              * to handle, because it would mean we need to encode
2934730d5c31SRichard Henderson              * into the mmu_idx not just the (user, negpri) information
2935730d5c31SRichard Henderson              * for the current security state but also that for the
2936730d5c31SRichard Henderson              * other security state, which would balloon the number
2937730d5c31SRichard Henderson              * of mmu_idx values needed alarmingly.
2938730d5c31SRichard Henderson              * Fortunately we can avoid this because it's not actually
2939730d5c31SRichard Henderson              * possible to arbitrarily execute code from memory with
2940730d5c31SRichard Henderson              * the wrong security attribute: it will always generate
2941730d5c31SRichard Henderson              * an exception of some kind or another, apart from the
2942730d5c31SRichard Henderson              * special case of an NS CPU executing an SG instruction
2943730d5c31SRichard Henderson              * in S&NSC memory. So we always just fail the translation
2944730d5c31SRichard Henderson              * here and sort things out in the exception handler
2945730d5c31SRichard Henderson              * (including possibly emulating an SG instruction).
2946730d5c31SRichard Henderson              */
2947730d5c31SRichard Henderson             if (sattrs.ns != !secure) {
2948730d5c31SRichard Henderson                 if (sattrs.nsc) {
2949730d5c31SRichard Henderson                     fi->type = ARMFault_QEMU_NSCExec;
2950730d5c31SRichard Henderson                 } else {
2951730d5c31SRichard Henderson                     fi->type = ARMFault_QEMU_SFault;
2952730d5c31SRichard Henderson                 }
29537fa7ea8fSRichard Henderson                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
29547fa7ea8fSRichard Henderson                 result->f.phys_addr = address;
29557fa7ea8fSRichard Henderson                 result->f.prot = 0;
2956730d5c31SRichard Henderson                 return true;
2957730d5c31SRichard Henderson             }
2958730d5c31SRichard Henderson         } else {
2959730d5c31SRichard Henderson             /*
2960730d5c31SRichard Henderson              * For data accesses we always use the MMU bank indicated
2961730d5c31SRichard Henderson              * by the current CPU state, but the security attributes
2962730d5c31SRichard Henderson              * might downgrade a secure access to nonsecure.
2963730d5c31SRichard Henderson              */
2964730d5c31SRichard Henderson             if (sattrs.ns) {
29657fa7ea8fSRichard Henderson                 result->f.attrs.secure = false;
296690c66293SRichard Henderson                 result->f.attrs.space = ARMSS_NonSecure;
2967730d5c31SRichard Henderson             } else if (!secure) {
2968730d5c31SRichard Henderson                 /*
2969730d5c31SRichard Henderson                  * NS access to S memory must fault.
2970730d5c31SRichard Henderson                  * Architecturally we should first check whether the
2971730d5c31SRichard Henderson                  * MPU information for this address indicates that we
2972730d5c31SRichard Henderson                  * are doing an unaligned access to Device memory, which
2973730d5c31SRichard Henderson                  * should generate a UsageFault instead. QEMU does not
2974730d5c31SRichard Henderson                  * currently check for that kind of unaligned access though.
2975730d5c31SRichard Henderson                  * If we added it we would need to do so as a special case
2976730d5c31SRichard Henderson                  * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
2977730d5c31SRichard Henderson                  */
2978730d5c31SRichard Henderson                 fi->type = ARMFault_QEMU_SFault;
29797fa7ea8fSRichard Henderson                 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS;
29807fa7ea8fSRichard Henderson                 result->f.phys_addr = address;
29817fa7ea8fSRichard Henderson                 result->f.prot = 0;
2982730d5c31SRichard Henderson                 return true;
2983730d5c31SRichard Henderson             }
2984730d5c31SRichard Henderson         }
2985730d5c31SRichard Henderson     }
2986730d5c31SRichard Henderson 
2987e9fb7090SRichard Henderson     ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
2988652c750eSRichard Henderson                             result, fi, NULL);
2989652c750eSRichard Henderson     if (sattrs.subpage) {
29907fa7ea8fSRichard Henderson         result->f.lg_page_size = 0;
2991652c750eSRichard Henderson     }
2992730d5c31SRichard Henderson     return ret;
2993730d5c31SRichard Henderson }
2994730d5c31SRichard Henderson 
2995966f4bb7SRichard Henderson /*
2996966f4bb7SRichard Henderson  * Translate from the 4-bit stage 2 representation of
2997966f4bb7SRichard Henderson  * memory attributes (without cache-allocation hints) to
2998966f4bb7SRichard Henderson  * the 8-bit representation of the stage 1 MAIR registers
2999966f4bb7SRichard Henderson  * (which includes allocation hints).
3000966f4bb7SRichard Henderson  *
3001966f4bb7SRichard Henderson  * ref: shared/translation/attrs/S2AttrDecode()
3002966f4bb7SRichard Henderson  *      .../S2ConvertAttrsHints()
3003966f4bb7SRichard Henderson  */
convert_stage2_attrs(uint64_t hcr,uint8_t s2attrs)3004ac76c2e5SRichard Henderson static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs)
3005966f4bb7SRichard Henderson {
3006966f4bb7SRichard Henderson     uint8_t hiattr = extract32(s2attrs, 2, 2);
3007966f4bb7SRichard Henderson     uint8_t loattr = extract32(s2attrs, 0, 2);
3008966f4bb7SRichard Henderson     uint8_t hihint = 0, lohint = 0;
3009966f4bb7SRichard Henderson 
3010966f4bb7SRichard Henderson     if (hiattr != 0) { /* normal memory */
3011ac76c2e5SRichard Henderson         if (hcr & HCR_CD) { /* cache disabled */
3012966f4bb7SRichard Henderson             hiattr = loattr = 1; /* non-cacheable */
3013966f4bb7SRichard Henderson         } else {
3014966f4bb7SRichard Henderson             if (hiattr != 1) { /* Write-through or write-back */
3015966f4bb7SRichard Henderson                 hihint = 3; /* RW allocate */
3016966f4bb7SRichard Henderson             }
3017966f4bb7SRichard Henderson             if (loattr != 1) { /* Write-through or write-back */
3018966f4bb7SRichard Henderson                 lohint = 3; /* RW allocate */
3019966f4bb7SRichard Henderson             }
3020966f4bb7SRichard Henderson         }
3021966f4bb7SRichard Henderson     }
3022966f4bb7SRichard Henderson 
3023966f4bb7SRichard Henderson     return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
3024966f4bb7SRichard Henderson }
3025966f4bb7SRichard Henderson 
3026966f4bb7SRichard Henderson /*
3027966f4bb7SRichard Henderson  * Combine either inner or outer cacheability attributes for normal
3028966f4bb7SRichard Henderson  * memory, according to table D4-42 and pseudocode procedure
3029966f4bb7SRichard Henderson  * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
3030966f4bb7SRichard Henderson  *
3031966f4bb7SRichard Henderson  * NB: only stage 1 includes allocation hints (RW bits), leading to
3032966f4bb7SRichard Henderson  * some asymmetry.
3033966f4bb7SRichard Henderson  */
combine_cacheattr_nibble(uint8_t s1,uint8_t s2)3034966f4bb7SRichard Henderson static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
3035966f4bb7SRichard Henderson {
3036966f4bb7SRichard Henderson     if (s1 == 4 || s2 == 4) {
3037966f4bb7SRichard Henderson         /* non-cacheable has precedence */
3038966f4bb7SRichard Henderson         return 4;
3039966f4bb7SRichard Henderson     } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
3040966f4bb7SRichard Henderson         /* stage 1 write-through takes precedence */
3041966f4bb7SRichard Henderson         return s1;
3042966f4bb7SRichard Henderson     } else if (extract32(s2, 2, 2) == 2) {
3043966f4bb7SRichard Henderson         /* stage 2 write-through takes precedence, but the allocation hint
3044966f4bb7SRichard Henderson          * is still taken from stage 1
3045966f4bb7SRichard Henderson          */
3046966f4bb7SRichard Henderson         return (2 << 2) | extract32(s1, 0, 2);
3047966f4bb7SRichard Henderson     } else { /* write-back */
3048966f4bb7SRichard Henderson         return s1;
3049966f4bb7SRichard Henderson     }
3050966f4bb7SRichard Henderson }
3051966f4bb7SRichard Henderson 
3052966f4bb7SRichard Henderson /*
3053966f4bb7SRichard Henderson  * Combine the memory type and cacheability attributes of
3054966f4bb7SRichard Henderson  * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
3055966f4bb7SRichard Henderson  * combined attributes in MAIR_EL1 format.
3056966f4bb7SRichard Henderson  */
combined_attrs_nofwb(uint64_t hcr,ARMCacheAttrs s1,ARMCacheAttrs s2)3057ac76c2e5SRichard Henderson static uint8_t combined_attrs_nofwb(uint64_t hcr,
3058966f4bb7SRichard Henderson                                     ARMCacheAttrs s1, ARMCacheAttrs s2)
3059966f4bb7SRichard Henderson {
3060966f4bb7SRichard Henderson     uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
3061966f4bb7SRichard Henderson 
3062faa1451eSTobias Röhmel     if (s2.is_s2_format) {
3063ac76c2e5SRichard Henderson         s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs);
3064faa1451eSTobias Röhmel     } else {
3065faa1451eSTobias Röhmel         s2_mair_attrs = s2.attrs;
3066faa1451eSTobias Röhmel     }
3067966f4bb7SRichard Henderson 
3068966f4bb7SRichard Henderson     s1lo = extract32(s1.attrs, 0, 4);
3069966f4bb7SRichard Henderson     s2lo = extract32(s2_mair_attrs, 0, 4);
3070966f4bb7SRichard Henderson     s1hi = extract32(s1.attrs, 4, 4);
3071966f4bb7SRichard Henderson     s2hi = extract32(s2_mair_attrs, 4, 4);
3072966f4bb7SRichard Henderson 
3073966f4bb7SRichard Henderson     /* Combine memory type and cacheability attributes */
3074966f4bb7SRichard Henderson     if (s1hi == 0 || s2hi == 0) {
3075966f4bb7SRichard Henderson         /* Device has precedence over normal */
3076966f4bb7SRichard Henderson         if (s1lo == 0 || s2lo == 0) {
3077966f4bb7SRichard Henderson             /* nGnRnE has precedence over anything */
3078966f4bb7SRichard Henderson             ret_attrs = 0;
3079966f4bb7SRichard Henderson         } else if (s1lo == 4 || s2lo == 4) {
3080966f4bb7SRichard Henderson             /* non-Reordering has precedence over Reordering */
3081966f4bb7SRichard Henderson             ret_attrs = 4;  /* nGnRE */
3082966f4bb7SRichard Henderson         } else if (s1lo == 8 || s2lo == 8) {
3083966f4bb7SRichard Henderson             /* non-Gathering has precedence over Gathering */
3084966f4bb7SRichard Henderson             ret_attrs = 8;  /* nGRE */
3085966f4bb7SRichard Henderson         } else {
3086966f4bb7SRichard Henderson             ret_attrs = 0xc; /* GRE */
3087966f4bb7SRichard Henderson         }
3088966f4bb7SRichard Henderson     } else { /* Normal memory */
3089966f4bb7SRichard Henderson         /* Outer/inner cacheability combine independently */
3090966f4bb7SRichard Henderson         ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
3091966f4bb7SRichard Henderson                   | combine_cacheattr_nibble(s1lo, s2lo);
3092966f4bb7SRichard Henderson     }
3093966f4bb7SRichard Henderson     return ret_attrs;
3094966f4bb7SRichard Henderson }
3095966f4bb7SRichard Henderson 
force_cacheattr_nibble_wb(uint8_t attr)3096966f4bb7SRichard Henderson static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
3097966f4bb7SRichard Henderson {
3098966f4bb7SRichard Henderson     /*
3099966f4bb7SRichard Henderson      * Given the 4 bits specifying the outer or inner cacheability
3100966f4bb7SRichard Henderson      * in MAIR format, return a value specifying Normal Write-Back,
3101966f4bb7SRichard Henderson      * with the allocation and transient hints taken from the input
3102966f4bb7SRichard Henderson      * if the input specified some kind of cacheable attribute.
3103966f4bb7SRichard Henderson      */
3104966f4bb7SRichard Henderson     if (attr == 0 || attr == 4) {
3105966f4bb7SRichard Henderson         /*
3106966f4bb7SRichard Henderson          * 0 == an UNPREDICTABLE encoding
3107966f4bb7SRichard Henderson          * 4 == Non-cacheable
3108966f4bb7SRichard Henderson          * Either way, force Write-Back RW allocate non-transient
3109966f4bb7SRichard Henderson          */
3110966f4bb7SRichard Henderson         return 0xf;
3111966f4bb7SRichard Henderson     }
3112966f4bb7SRichard Henderson     /* Change WriteThrough to WriteBack, keep allocation and transient hints */
3113966f4bb7SRichard Henderson     return attr | 4;
3114966f4bb7SRichard Henderson }
3115966f4bb7SRichard Henderson 
3116966f4bb7SRichard Henderson /*
3117966f4bb7SRichard Henderson  * Combine the memory type and cacheability attributes of
3118966f4bb7SRichard Henderson  * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
3119966f4bb7SRichard Henderson  * combined attributes in MAIR_EL1 format.
3120966f4bb7SRichard Henderson  */
combined_attrs_fwb(ARMCacheAttrs s1,ARMCacheAttrs s2)312172cef09cSRichard Henderson static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2)
3122966f4bb7SRichard Henderson {
3123faa1451eSTobias Röhmel     assert(s2.is_s2_format && !s1.is_s2_format);
3124faa1451eSTobias Röhmel 
3125966f4bb7SRichard Henderson     switch (s2.attrs) {
3126966f4bb7SRichard Henderson     case 7:
3127966f4bb7SRichard Henderson         /* Use stage 1 attributes */
3128966f4bb7SRichard Henderson         return s1.attrs;
3129966f4bb7SRichard Henderson     case 6:
3130966f4bb7SRichard Henderson         /*
3131966f4bb7SRichard Henderson          * Force Normal Write-Back. Note that if S1 is Normal cacheable
3132966f4bb7SRichard Henderson          * then we take the allocation hints from it; otherwise it is
3133966f4bb7SRichard Henderson          * RW allocate, non-transient.
3134966f4bb7SRichard Henderson          */
3135966f4bb7SRichard Henderson         if ((s1.attrs & 0xf0) == 0) {
3136966f4bb7SRichard Henderson             /* S1 is Device */
3137966f4bb7SRichard Henderson             return 0xff;
3138966f4bb7SRichard Henderson         }
3139966f4bb7SRichard Henderson         /* Need to check the Inner and Outer nibbles separately */
3140966f4bb7SRichard Henderson         return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
3141966f4bb7SRichard Henderson             force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
3142966f4bb7SRichard Henderson     case 5:
3143966f4bb7SRichard Henderson         /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
3144966f4bb7SRichard Henderson         if ((s1.attrs & 0xf0) == 0) {
3145966f4bb7SRichard Henderson             return s1.attrs;
3146966f4bb7SRichard Henderson         }
3147966f4bb7SRichard Henderson         return 0x44;
3148966f4bb7SRichard Henderson     case 0 ... 3:
3149966f4bb7SRichard Henderson         /* Force Device, of subtype specified by S2 */
3150966f4bb7SRichard Henderson         return s2.attrs << 2;
3151966f4bb7SRichard Henderson     default:
3152966f4bb7SRichard Henderson         /*
3153966f4bb7SRichard Henderson          * RESERVED values (including RES0 descriptor bit [5] being nonzero);
3154966f4bb7SRichard Henderson          * arbitrarily force Device.
3155966f4bb7SRichard Henderson          */
3156966f4bb7SRichard Henderson         return 0;
3157966f4bb7SRichard Henderson     }
3158966f4bb7SRichard Henderson }
3159966f4bb7SRichard Henderson 
3160966f4bb7SRichard Henderson /*
3161966f4bb7SRichard Henderson  * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
3162966f4bb7SRichard Henderson  * and CombineS1S2Desc()
3163966f4bb7SRichard Henderson  *
3164966f4bb7SRichard Henderson  * @env:     CPUARMState
3165966f4bb7SRichard Henderson  * @s1:      Attributes from stage 1 walk
3166966f4bb7SRichard Henderson  * @s2:      Attributes from stage 2 walk
3167966f4bb7SRichard Henderson  */
combine_cacheattrs(uint64_t hcr,ARMCacheAttrs s1,ARMCacheAttrs s2)3168ac76c2e5SRichard Henderson static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
3169966f4bb7SRichard Henderson                                         ARMCacheAttrs s1, ARMCacheAttrs s2)
3170966f4bb7SRichard Henderson {
3171966f4bb7SRichard Henderson     ARMCacheAttrs ret;
3172966f4bb7SRichard Henderson     bool tagged = false;
3173966f4bb7SRichard Henderson 
3174faa1451eSTobias Röhmel     assert(!s1.is_s2_format);
3175966f4bb7SRichard Henderson     ret.is_s2_format = false;
3176966f4bb7SRichard Henderson 
3177966f4bb7SRichard Henderson     if (s1.attrs == 0xf0) {
3178966f4bb7SRichard Henderson         tagged = true;
3179966f4bb7SRichard Henderson         s1.attrs = 0xff;
3180966f4bb7SRichard Henderson     }
3181966f4bb7SRichard Henderson 
3182966f4bb7SRichard Henderson     /* Combine shareability attributes (table D4-43) */
3183966f4bb7SRichard Henderson     if (s1.shareability == 2 || s2.shareability == 2) {
3184966f4bb7SRichard Henderson         /* if either are outer-shareable, the result is outer-shareable */
3185966f4bb7SRichard Henderson         ret.shareability = 2;
3186966f4bb7SRichard Henderson     } else if (s1.shareability == 3 || s2.shareability == 3) {
3187966f4bb7SRichard Henderson         /* if either are inner-shareable, the result is inner-shareable */
3188966f4bb7SRichard Henderson         ret.shareability = 3;
3189966f4bb7SRichard Henderson     } else {
3190966f4bb7SRichard Henderson         /* both non-shareable */
3191966f4bb7SRichard Henderson         ret.shareability = 0;
3192966f4bb7SRichard Henderson     }
3193966f4bb7SRichard Henderson 
3194966f4bb7SRichard Henderson     /* Combine memory type and cacheability attributes */
3195ac76c2e5SRichard Henderson     if (hcr & HCR_FWB) {
319672cef09cSRichard Henderson         ret.attrs = combined_attrs_fwb(s1, s2);
3197966f4bb7SRichard Henderson     } else {
3198ac76c2e5SRichard Henderson         ret.attrs = combined_attrs_nofwb(hcr, s1, s2);
3199966f4bb7SRichard Henderson     }
3200966f4bb7SRichard Henderson 
3201966f4bb7SRichard Henderson     /*
3202966f4bb7SRichard Henderson      * Any location for which the resultant memory type is any
3203966f4bb7SRichard Henderson      * type of Device memory is always treated as Outer Shareable.
3204966f4bb7SRichard Henderson      * Any location for which the resultant memory type is Normal
3205966f4bb7SRichard Henderson      * Inner Non-cacheable, Outer Non-cacheable is always treated
3206966f4bb7SRichard Henderson      * as Outer Shareable.
3207966f4bb7SRichard Henderson      * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
3208966f4bb7SRichard Henderson      */
3209966f4bb7SRichard Henderson     if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
3210966f4bb7SRichard Henderson         ret.shareability = 2;
3211966f4bb7SRichard Henderson     }
3212966f4bb7SRichard Henderson 
3213966f4bb7SRichard Henderson     /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
3214966f4bb7SRichard Henderson     if (tagged && ret.attrs == 0xff) {
3215966f4bb7SRichard Henderson         ret.attrs = 0xf0;
3216966f4bb7SRichard Henderson     }
3217966f4bb7SRichard Henderson 
3218966f4bb7SRichard Henderson     return ret;
3219966f4bb7SRichard Henderson }
3220966f4bb7SRichard Henderson 
3221448e42fdSRichard Henderson /*
3222448e42fdSRichard Henderson  * MMU disabled.  S1 addresses within aa64 translation regimes are
3223448e42fdSRichard Henderson  * still checked for bounds -- see AArch64.S1DisabledOutput().
3224448e42fdSRichard Henderson  */
get_phys_addr_disabled(CPUARMState * env,S1Translate * ptw,vaddr address,MMUAccessType access_type,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)3225a5637becSPeter Maydell static bool get_phys_addr_disabled(CPUARMState *env,
3226a5637becSPeter Maydell                                    S1Translate *ptw,
322767d762e7SArd Biesheuvel                                    vaddr address,
3228448e42fdSRichard Henderson                                    MMUAccessType access_type,
3229448e42fdSRichard Henderson                                    GetPhysAddrResult *result,
3230448e42fdSRichard Henderson                                    ARMMMUFaultInfo *fi)
3231448e42fdSRichard Henderson {
3232a5637becSPeter Maydell     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
32335b74f9b4SRichard Henderson     uint8_t memattr = 0x00;    /* Device nGnRnE */
323446f38c97SRichard Henderson     uint8_t shareability = 0;  /* non-shareable */
3235a1ce3084SRichard Henderson     int r_el;
3236448e42fdSRichard Henderson 
3237a1ce3084SRichard Henderson     switch (mmu_idx) {
3238a1ce3084SRichard Henderson     case ARMMMUIdx_Stage2:
3239a1ce3084SRichard Henderson     case ARMMMUIdx_Stage2_S:
3240a1ce3084SRichard Henderson     case ARMMMUIdx_Phys_S:
3241bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_NS:
3242bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Root:
3243bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Realm:
3244a1ce3084SRichard Henderson         break;
32455b74f9b4SRichard Henderson 
3246a1ce3084SRichard Henderson     default:
3247a1ce3084SRichard Henderson         r_el = regime_el(env, mmu_idx);
3248448e42fdSRichard Henderson         if (arm_el_is_aa64(env, r_el)) {
3249448e42fdSRichard Henderson             int pamax = arm_pamax(env_archcpu(env));
3250448e42fdSRichard Henderson             uint64_t tcr = env->cp15.tcr_el[r_el];
3251448e42fdSRichard Henderson             int addrtop, tbi;
3252448e42fdSRichard Henderson 
3253448e42fdSRichard Henderson             tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
3254448e42fdSRichard Henderson             if (access_type == MMU_INST_FETCH) {
3255448e42fdSRichard Henderson                 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
3256448e42fdSRichard Henderson             }
3257448e42fdSRichard Henderson             tbi = (tbi >> extract64(address, 55, 1)) & 1;
3258448e42fdSRichard Henderson             addrtop = (tbi ? 55 : 63);
3259448e42fdSRichard Henderson 
3260448e42fdSRichard Henderson             if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
3261448e42fdSRichard Henderson                 fi->type = ARMFault_AddressSize;
3262448e42fdSRichard Henderson                 fi->level = 0;
3263448e42fdSRichard Henderson                 fi->stage2 = false;
3264448e42fdSRichard Henderson                 return 1;
3265448e42fdSRichard Henderson             }
3266448e42fdSRichard Henderson 
3267448e42fdSRichard Henderson             /*
3268448e42fdSRichard Henderson              * When TBI is disabled, we've just validated that all of the
3269448e42fdSRichard Henderson              * bits above PAMax are zero, so logically we only need to
3270448e42fdSRichard Henderson              * clear the top byte for TBI.  But it's clearer to follow
3271448e42fdSRichard Henderson              * the pseudocode set of addrdesc.paddress.
3272448e42fdSRichard Henderson              */
3273448e42fdSRichard Henderson             address = extract64(address, 0, 52);
3274448e42fdSRichard Henderson         }
3275448e42fdSRichard Henderson 
3276448e42fdSRichard Henderson         /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
32775b74f9b4SRichard Henderson         if (r_el == 1) {
32782d12bb96SPeter Maydell             uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
3279448e42fdSRichard Henderson             if (hcr & HCR_DC) {
3280448e42fdSRichard Henderson                 if (hcr & HCR_DCT) {
3281448e42fdSRichard Henderson                     memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
3282448e42fdSRichard Henderson                 } else {
3283448e42fdSRichard Henderson                     memattr = 0xff;  /* Normal, WB, RWA */
3284448e42fdSRichard Henderson                 }
32855b74f9b4SRichard Henderson             }
32865b74f9b4SRichard Henderson         }
32873d9ca962SPeter Maydell         if (memattr == 0) {
32883d9ca962SPeter Maydell             if (access_type == MMU_INST_FETCH) {
3289448e42fdSRichard Henderson                 if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
3290448e42fdSRichard Henderson                     memattr = 0xee;  /* Normal, WT, RA, NT */
3291448e42fdSRichard Henderson                 } else {
3292448e42fdSRichard Henderson                     memattr = 0x44;  /* Normal, NC, No */
3293448e42fdSRichard Henderson                 }
32943d9ca962SPeter Maydell             }
329546f38c97SRichard Henderson             shareability = 2; /* outer shareable */
3296448e42fdSRichard Henderson         }
32975b74f9b4SRichard Henderson         result->cacheattrs.is_s2_format = false;
3298a1ce3084SRichard Henderson         break;
32995b74f9b4SRichard Henderson     }
33005b74f9b4SRichard Henderson 
33017fa7ea8fSRichard Henderson     result->f.phys_addr = address;
33027fa7ea8fSRichard Henderson     result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
33037fa7ea8fSRichard Henderson     result->f.lg_page_size = TARGET_PAGE_BITS;
33045b74f9b4SRichard Henderson     result->cacheattrs.shareability = shareability;
3305448e42fdSRichard Henderson     result->cacheattrs.attrs = memattr;
33066b72c542SRichard Henderson     return false;
3307448e42fdSRichard Henderson }
3308448e42fdSRichard Henderson 
get_phys_addr_twostage(CPUARMState * env,S1Translate * ptw,vaddr address,MMUAccessType access_type,MemOp memop,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)33093f5a74c5SRichard Henderson static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
331067d762e7SArd Biesheuvel                                    vaddr address,
331121e5a287SRichard Henderson                                    MMUAccessType access_type, MemOp memop,
33124a358556SRichard Henderson                                    GetPhysAddrResult *result,
3313def8aa5bSRichard Henderson                                    ARMMMUFaultInfo *fi)
33148ae08860SRichard Henderson {
33158ae08860SRichard Henderson     hwaddr ipa;
3316c8d6c286SRichard Henderson     int s1_prot, s1_lgpgsz;
3317eeb9578cSPeter Maydell     ARMSecuritySpace in_space = ptw->in_space;
33184c09abeaSPeter Maydell     bool ret, ipa_secure, s1_guarded;
3319de05a709SRichard Henderson     ARMCacheAttrs cacheattrs1;
332090c66293SRichard Henderson     ARMSecuritySpace ipa_space;
3321ac76c2e5SRichard Henderson     uint64_t hcr;
33228ae08860SRichard Henderson 
332321e5a287SRichard Henderson     ret = get_phys_addr_nogpc(env, ptw, address, access_type,
332421e5a287SRichard Henderson                               memop, result, fi);
33258ae08860SRichard Henderson 
332626ba00cfSPeter Maydell     /* If S1 fails, return early.  */
332726ba00cfSPeter Maydell     if (ret) {
33288ae08860SRichard Henderson         return ret;
33298ae08860SRichard Henderson     }
33308ae08860SRichard Henderson 
33317fa7ea8fSRichard Henderson     ipa = result->f.phys_addr;
33327fa7ea8fSRichard Henderson     ipa_secure = result->f.attrs.secure;
333390c66293SRichard Henderson     ipa_space = result->f.attrs.space;
33348ae08860SRichard Henderson 
33357c19b2d6SRichard Henderson     ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
3336fcc0b041SPeter Maydell     ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
333790c66293SRichard Henderson     ptw->in_space = ipa_space;
3338fcc0b041SPeter Maydell     ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
33398ae08860SRichard Henderson 
3340de05a709SRichard Henderson     /*
3341de05a709SRichard Henderson      * S1 is done, now do S2 translation.
33423f5a74c5SRichard Henderson      * Save the stage1 results so that we may merge prot and cacheattrs later.
3343de05a709SRichard Henderson      */
33447fa7ea8fSRichard Henderson     s1_prot = result->f.prot;
3345c8d6c286SRichard Henderson     s1_lgpgsz = result->f.lg_page_size;
33464c09abeaSPeter Maydell     s1_guarded = result->f.extra.arm.guarded;
3347de05a709SRichard Henderson     cacheattrs1 = result->cacheattrs;
3348de05a709SRichard Henderson     memset(result, 0, sizeof(*result));
3349de05a709SRichard Henderson 
335021e5a287SRichard Henderson     ret = get_phys_addr_nogpc(env, ptw, ipa, access_type,
335121e5a287SRichard Henderson                               memop, result, fi);
33528ae08860SRichard Henderson     fi->s2addr = ipa;
3353de05a709SRichard Henderson 
33548ae08860SRichard Henderson     /* Combine the S1 and S2 perms.  */
33557fa7ea8fSRichard Henderson     result->f.prot &= s1_prot;
33568ae08860SRichard Henderson 
33578ae08860SRichard Henderson     /* If S2 fails, return early.  */
33588ae08860SRichard Henderson     if (ret) {
33598ae08860SRichard Henderson         return ret;
33608ae08860SRichard Henderson     }
33618ae08860SRichard Henderson 
3362c8d6c286SRichard Henderson     /*
33639e65f4e6SPeter Maydell      * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE,
33649e65f4e6SPeter Maydell      * this means "don't put this in the TLB"; in this case, return a
33659e65f4e6SPeter Maydell      * result with lg_page_size == 0 to achieve that. Otherwise,
33669e65f4e6SPeter Maydell      * use the maximum of the S1 & S2 page size, so that invalidation
33679e65f4e6SPeter Maydell      * of pages > TARGET_PAGE_SIZE works correctly. (This works even though
33689e65f4e6SPeter Maydell      * we know the combined result permissions etc only cover the minimum
33699e65f4e6SPeter Maydell      * of the S1 and S2 page size, because we know that the common TLB code
33709e65f4e6SPeter Maydell      * never actually creates TLB entries bigger than TARGET_PAGE_SIZE,
33719e65f4e6SPeter Maydell      * and passing a larger page size value only affects invalidations.)
3372c8d6c286SRichard Henderson      */
33739e65f4e6SPeter Maydell     if (result->f.lg_page_size < TARGET_PAGE_BITS ||
33749e65f4e6SPeter Maydell         s1_lgpgsz < TARGET_PAGE_BITS) {
33759e65f4e6SPeter Maydell         result->f.lg_page_size = 0;
33769e65f4e6SPeter Maydell     } else if (result->f.lg_page_size < s1_lgpgsz) {
3377c8d6c286SRichard Henderson         result->f.lg_page_size = s1_lgpgsz;
3378c8d6c286SRichard Henderson     }
3379c8d6c286SRichard Henderson 
33808ae08860SRichard Henderson     /* Combine the S1 and S2 cache attributes. */
33812d12bb96SPeter Maydell     hcr = arm_hcr_el2_eff_secstate(env, in_space);
3382ac76c2e5SRichard Henderson     if (hcr & HCR_DC) {
33838ae08860SRichard Henderson         /*
33848ae08860SRichard Henderson          * HCR.DC forces the first stage attributes to
33858ae08860SRichard Henderson          *  Normal Non-Shareable,
33868ae08860SRichard Henderson          *  Inner Write-Back Read-Allocate Write-Allocate,
33878ae08860SRichard Henderson          *  Outer Write-Back Read-Allocate Write-Allocate.
33888ae08860SRichard Henderson          * Do not overwrite Tagged within attrs.
33898ae08860SRichard Henderson          */
3390de05a709SRichard Henderson         if (cacheattrs1.attrs != 0xf0) {
3391de05a709SRichard Henderson             cacheattrs1.attrs = 0xff;
33928ae08860SRichard Henderson         }
3393de05a709SRichard Henderson         cacheattrs1.shareability = 0;
33948ae08860SRichard Henderson     }
3395ac76c2e5SRichard Henderson     result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1,
3396de05a709SRichard Henderson                                             result->cacheattrs);
33978ae08860SRichard Henderson 
33984c09abeaSPeter Maydell     /* No BTI GP information in stage 2, we just use the S1 value */
33994c09abeaSPeter Maydell     result->f.extra.arm.guarded = s1_guarded;
34004c09abeaSPeter Maydell 
34019b5ba97aSRichard Henderson     /*
34029b5ba97aSRichard Henderson      * Check if IPA translates to secure or non-secure PA space.
34039b5ba97aSRichard Henderson      * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA.
34049b5ba97aSRichard Henderson      */
3405eeb9578cSPeter Maydell     if (in_space == ARMSS_Secure) {
34067fa7ea8fSRichard Henderson         result->f.attrs.secure =
3407eeb9578cSPeter Maydell             !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
34089b5ba97aSRichard Henderson             && (ipa_secure
3409eeb9578cSPeter Maydell                 || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
3410eeb9578cSPeter Maydell         result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
3411eeb9578cSPeter Maydell     }
34129b5ba97aSRichard Henderson 
34136b72c542SRichard Henderson     return false;
34143f5a74c5SRichard Henderson }
34153f5a74c5SRichard Henderson 
get_phys_addr_nogpc(CPUARMState * env,S1Translate * ptw,vaddr address,MMUAccessType access_type,MemOp memop,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)341646f38c97SRichard Henderson static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
341767d762e7SArd Biesheuvel                                       vaddr address,
3418c6cd9f9fSRichard Henderson                                       MMUAccessType access_type, MemOp memop,
34193f5a74c5SRichard Henderson                                       GetPhysAddrResult *result,
34203f5a74c5SRichard Henderson                                       ARMMMUFaultInfo *fi)
34213f5a74c5SRichard Henderson {
34223f5a74c5SRichard Henderson     ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
342348da29e4SRichard Henderson     ARMMMUIdx s1_mmu_idx;
34243f5a74c5SRichard Henderson 
3425cead7fa4SRichard Henderson     /*
342690c66293SRichard Henderson      * The page table entries may downgrade Secure to NonSecure, but
342790c66293SRichard Henderson      * cannot upgrade a NonSecure translation regime's attributes
342890c66293SRichard Henderson      * to Secure or Realm.
3429cead7fa4SRichard Henderson      */
343090c66293SRichard Henderson     result->f.attrs.space = ptw->in_space;
3431cdbae5e7SPeter Maydell     result->f.attrs.secure = arm_space_is_secure(ptw->in_space);
3432cead7fa4SRichard Henderson 
343348da29e4SRichard Henderson     switch (mmu_idx) {
343448da29e4SRichard Henderson     case ARMMMUIdx_Phys_S:
343548da29e4SRichard Henderson     case ARMMMUIdx_Phys_NS:
3436bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Root:
3437bb5cc2c8SRichard Henderson     case ARMMMUIdx_Phys_Realm:
343848da29e4SRichard Henderson         /* Checking Phys early avoids special casing later vs regime_el. */
3439a5637becSPeter Maydell         return get_phys_addr_disabled(env, ptw, address, access_type,
3440a5637becSPeter Maydell                                       result, fi);
344148da29e4SRichard Henderson 
344248da29e4SRichard Henderson     case ARMMMUIdx_Stage1_E0:
344348da29e4SRichard Henderson     case ARMMMUIdx_Stage1_E1:
344448da29e4SRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
3445cdbae5e7SPeter Maydell         /*
3446cdbae5e7SPeter Maydell          * First stage lookup uses second stage for ptw; only
3447cdbae5e7SPeter Maydell          * Secure has both S and NS IPA and starts with Stage2_S.
3448cdbae5e7SPeter Maydell          */
3449cdbae5e7SPeter Maydell         ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ?
3450cdbae5e7SPeter Maydell             ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
345148da29e4SRichard Henderson         break;
345248da29e4SRichard Henderson 
3453fcc0b041SPeter Maydell     case ARMMMUIdx_Stage2:
3454fcc0b041SPeter Maydell     case ARMMMUIdx_Stage2_S:
3455fcc0b041SPeter Maydell         /*
3456fcc0b041SPeter Maydell          * Second stage lookup uses physical for ptw; whether this is S or
3457fcc0b041SPeter Maydell          * NS may depend on the SW/NSW bits if this is a stage 2 lookup for
3458fcc0b041SPeter Maydell          * the Secure EL2&0 regime.
3459fcc0b041SPeter Maydell          */
3460fcc0b041SPeter Maydell         ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx);
3461fcc0b041SPeter Maydell         break;
3462fcc0b041SPeter Maydell 
346348da29e4SRichard Henderson     case ARMMMUIdx_E10_0:
346448da29e4SRichard Henderson         s1_mmu_idx = ARMMMUIdx_Stage1_E0;
346548da29e4SRichard Henderson         goto do_twostage;
346648da29e4SRichard Henderson     case ARMMMUIdx_E10_1:
346748da29e4SRichard Henderson         s1_mmu_idx = ARMMMUIdx_Stage1_E1;
346848da29e4SRichard Henderson         goto do_twostage;
346948da29e4SRichard Henderson     case ARMMMUIdx_E10_1_PAN:
347048da29e4SRichard Henderson         s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
347148da29e4SRichard Henderson     do_twostage:
34728ae08860SRichard Henderson         /*
34733f5a74c5SRichard Henderson          * Call ourselves recursively to do the stage 1 and then stage 2
34743f5a74c5SRichard Henderson          * translations if mmu_idx is a two-stage regime, and EL2 present.
34753f5a74c5SRichard Henderson          * Otherwise, a stage1+stage2 translation is just stage 1.
34768ae08860SRichard Henderson          */
34773f5a74c5SRichard Henderson         ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
347826ba00cfSPeter Maydell         if (arm_feature(env, ARM_FEATURE_EL2) &&
3479d1289140SPeter Maydell             !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
34803f5a74c5SRichard Henderson             return get_phys_addr_twostage(env, ptw, address, access_type,
348121e5a287SRichard Henderson                                           memop, result, fi);
34828ae08860SRichard Henderson         }
348348da29e4SRichard Henderson         /* fall through */
348448da29e4SRichard Henderson 
348548da29e4SRichard Henderson     default:
3486fcc0b041SPeter Maydell         /* Single stage uses physical for ptw. */
348790c66293SRichard Henderson         ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space);
348848da29e4SRichard Henderson         break;
34898ae08860SRichard Henderson     }
34908ae08860SRichard Henderson 
34917fa7ea8fSRichard Henderson     result->f.attrs.user = regime_is_user(env, mmu_idx);
34928ae08860SRichard Henderson 
34938ae08860SRichard Henderson     /*
34948ae08860SRichard Henderson      * Fast Context Switch Extension. This doesn't exist at all in v8.
34958ae08860SRichard Henderson      * In v7 and earlier it affects all stage 1 translations.
34968ae08860SRichard Henderson      */
34978ae08860SRichard Henderson     if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
34988ae08860SRichard Henderson         && !arm_feature(env, ARM_FEATURE_V8)) {
34998ae08860SRichard Henderson         if (regime_el(env, mmu_idx) == 3) {
35008ae08860SRichard Henderson             address += env->cp15.fcseidr_s;
35018ae08860SRichard Henderson         } else {
35028ae08860SRichard Henderson             address += env->cp15.fcseidr_ns;
35038ae08860SRichard Henderson         }
35048ae08860SRichard Henderson     }
35058ae08860SRichard Henderson 
35068ae08860SRichard Henderson     if (arm_feature(env, ARM_FEATURE_PMSA)) {
35078ae08860SRichard Henderson         bool ret;
35087fa7ea8fSRichard Henderson         result->f.lg_page_size = TARGET_PAGE_BITS;
35098ae08860SRichard Henderson 
35108ae08860SRichard Henderson         if (arm_feature(env, ARM_FEATURE_V8)) {
35118ae08860SRichard Henderson             /* PMSAv8 */
3512a5637becSPeter Maydell             ret = get_phys_addr_pmsav8(env, ptw, address, access_type,
3513a5637becSPeter Maydell                                        result, fi);
35148ae08860SRichard Henderson         } else if (arm_feature(env, ARM_FEATURE_V7)) {
35158ae08860SRichard Henderson             /* PMSAv7 */
3516a5637becSPeter Maydell             ret = get_phys_addr_pmsav7(env, ptw, address, access_type,
3517a5637becSPeter Maydell                                        result, fi);
35188ae08860SRichard Henderson         } else {
35198ae08860SRichard Henderson             /* Pre-v7 MPU */
3520a5637becSPeter Maydell             ret = get_phys_addr_pmsav5(env, ptw, address, access_type,
3521a5637becSPeter Maydell                                        result, fi);
35228ae08860SRichard Henderson         }
35238ae08860SRichard Henderson         qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
35248ae08860SRichard Henderson                       " mmu_idx %u -> %s (prot %c%c%c)\n",
35258ae08860SRichard Henderson                       access_type == MMU_DATA_LOAD ? "reading" :
35268ae08860SRichard Henderson                       (access_type == MMU_DATA_STORE ? "writing" : "execute"),
35278ae08860SRichard Henderson                       (uint32_t)address, mmu_idx,
35288ae08860SRichard Henderson                       ret ? "Miss" : "Hit",
35297fa7ea8fSRichard Henderson                       result->f.prot & PAGE_READ ? 'r' : '-',
35307fa7ea8fSRichard Henderson                       result->f.prot & PAGE_WRITE ? 'w' : '-',
35317fa7ea8fSRichard Henderson                       result->f.prot & PAGE_EXEC ? 'x' : '-');
35328ae08860SRichard Henderson 
35338ae08860SRichard Henderson         return ret;
35348ae08860SRichard Henderson     }
35358ae08860SRichard Henderson 
35368ae08860SRichard Henderson     /* Definitely a real MMU, not an MPU */
35378ae08860SRichard Henderson 
3538d1289140SPeter Maydell     if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
3539a5637becSPeter Maydell         return get_phys_addr_disabled(env, ptw, address, access_type,
3540a5637becSPeter Maydell                                       result, fi);
35418ae08860SRichard Henderson     }
35426d2654ffSRichard Henderson 
35438ae08860SRichard Henderson     if (regime_using_lpae_format(env, mmu_idx)) {
3544c053f40bSRichard Henderson         return get_phys_addr_lpae(env, ptw, address, access_type,
3545c053f40bSRichard Henderson                                   memop, result, fi);
35466f2d9d74STimofey Kutergin     } else if (arm_feature(env, ARM_FEATURE_V7) ||
35476f2d9d74STimofey Kutergin                regime_sctlr(env, mmu_idx) & SCTLR_XP) {
35484a358556SRichard Henderson         return get_phys_addr_v6(env, ptw, address, access_type, result, fi);
35498ae08860SRichard Henderson     } else {
35504a358556SRichard Henderson         return get_phys_addr_v5(env, ptw, address, access_type, result, fi);
35518ae08860SRichard Henderson     }
35528ae08860SRichard Henderson }
355323971205SRichard Henderson 
get_phys_addr_gpc(CPUARMState * env,S1Translate * ptw,vaddr address,MMUAccessType access_type,MemOp memop,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)355446f38c97SRichard Henderson static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
355567d762e7SArd Biesheuvel                               vaddr address,
35565458670bSRichard Henderson                               MMUAccessType access_type, MemOp memop,
355746f38c97SRichard Henderson                               GetPhysAddrResult *result,
355846f38c97SRichard Henderson                               ARMMMUFaultInfo *fi)
355946f38c97SRichard Henderson {
3560c6cd9f9fSRichard Henderson     if (get_phys_addr_nogpc(env, ptw, address, access_type,
3561c6cd9f9fSRichard Henderson                             memop, result, fi)) {
356246f38c97SRichard Henderson         return true;
356346f38c97SRichard Henderson     }
356446f38c97SRichard Henderson     if (!granule_protection_check(env, result->f.phys_addr,
356546f38c97SRichard Henderson                                   result->f.attrs.space, fi)) {
356646f38c97SRichard Henderson         fi->type = ARMFault_GPCFOnOutput;
356746f38c97SRichard Henderson         return true;
356846f38c97SRichard Henderson     }
356946f38c97SRichard Henderson     return false;
357046f38c97SRichard Henderson }
357146f38c97SRichard Henderson 
get_phys_addr_with_space_nogpc(CPUARMState * env,vaddr address,MMUAccessType access_type,MemOp memop,ARMMMUIdx mmu_idx,ARMSecuritySpace space,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)357267d762e7SArd Biesheuvel bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
357329b4d7dbSRichard Henderson                                     MMUAccessType access_type, MemOp memop,
3574e1ee56ecSJean-Philippe Brucker                                     ARMMMUIdx mmu_idx, ARMSecuritySpace space,
3575f1269a98SJean-Philippe Brucker                                     GetPhysAddrResult *result,
35764a358556SRichard Henderson                                     ARMMMUFaultInfo *fi)
35774a358556SRichard Henderson {
35784a358556SRichard Henderson     S1Translate ptw = {
35794a358556SRichard Henderson         .in_mmu_idx = mmu_idx,
3580e1ee56ecSJean-Philippe Brucker         .in_space = space,
35814a358556SRichard Henderson     };
3582c6cd9f9fSRichard Henderson     return get_phys_addr_nogpc(env, &ptw, address, access_type,
3583c6cd9f9fSRichard Henderson                                memop, result, fi);
35844a358556SRichard Henderson }
35854a358556SRichard Henderson 
get_phys_addr(CPUARMState * env,vaddr address,MMUAccessType access_type,MemOp memop,ARMMMUIdx mmu_idx,GetPhysAddrResult * result,ARMMMUFaultInfo * fi)358667d762e7SArd Biesheuvel bool get_phys_addr(CPUARMState *env, vaddr address,
3587ec2c9337SRichard Henderson                    MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
3588def8aa5bSRichard Henderson                    GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
3589def8aa5bSRichard Henderson {
359090c66293SRichard Henderson     S1Translate ptw = {
359190c66293SRichard Henderson         .in_mmu_idx = mmu_idx,
359290c66293SRichard Henderson     };
359390c66293SRichard Henderson     ARMSecuritySpace ss;
359403bea66eSRichard Henderson 
359503bea66eSRichard Henderson     switch (mmu_idx) {
359603bea66eSRichard Henderson     case ARMMMUIdx_E10_0:
359703bea66eSRichard Henderson     case ARMMMUIdx_E10_1:
359803bea66eSRichard Henderson     case ARMMMUIdx_E10_1_PAN:
359903bea66eSRichard Henderson     case ARMMMUIdx_E20_0:
360003bea66eSRichard Henderson     case ARMMMUIdx_E20_2:
360103bea66eSRichard Henderson     case ARMMMUIdx_E20_2_PAN:
360203bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E0:
360303bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E1:
360403bea66eSRichard Henderson     case ARMMMUIdx_Stage1_E1_PAN:
360503bea66eSRichard Henderson     case ARMMMUIdx_E2:
360690c66293SRichard Henderson         ss = arm_security_space_below_el3(env);
3607d902ae75SRichard Henderson         break;
360803bea66eSRichard Henderson     case ARMMMUIdx_Stage2:
360990c66293SRichard Henderson         /*
361090c66293SRichard Henderson          * For Secure EL2, we need this index to be NonSecure;
361190c66293SRichard Henderson          * otherwise this will already be NonSecure or Realm.
361290c66293SRichard Henderson          */
361390c66293SRichard Henderson         ss = arm_security_space_below_el3(env);
361490c66293SRichard Henderson         if (ss == ARMSS_Secure) {
361590c66293SRichard Henderson             ss = ARMSS_NonSecure;
361690c66293SRichard Henderson         }
361790c66293SRichard Henderson         break;
3618a1ce3084SRichard Henderson     case ARMMMUIdx_Phys_NS:
361903bea66eSRichard Henderson     case ARMMMUIdx_MPrivNegPri:
362003bea66eSRichard Henderson     case ARMMMUIdx_MUserNegPri:
362103bea66eSRichard Henderson     case ARMMMUIdx_MPriv:
362203bea66eSRichard Henderson     case ARMMMUIdx_MUser:
362390c66293SRichard Henderson         ss = ARMSS_NonSecure;
362403bea66eSRichard Henderson         break;
362503bea66eSRichard Henderson     case ARMMMUIdx_Stage2_S:
3626a1ce3084SRichard Henderson     case ARMMMUIdx_Phys_S:
362703bea66eSRichard Henderson     case ARMMMUIdx_MSPrivNegPri:
362803bea66eSRichard Henderson     case ARMMMUIdx_MSUserNegPri:
362903bea66eSRichard Henderson     case ARMMMUIdx_MSPriv:
363003bea66eSRichard Henderson     case ARMMMUIdx_MSUser:
363190c66293SRichard Henderson         ss = ARMSS_Secure;
363290c66293SRichard Henderson         break;
363390c66293SRichard Henderson     case ARMMMUIdx_E3:
3634efbe180aSPeter Maydell     case ARMMMUIdx_E30_0:
3635efbe180aSPeter Maydell     case ARMMMUIdx_E30_3_PAN:
363690c66293SRichard Henderson         if (arm_feature(env, ARM_FEATURE_AARCH64) &&
363790c66293SRichard Henderson             cpu_isar_feature(aa64_rme, env_archcpu(env))) {
363890c66293SRichard Henderson             ss = ARMSS_Root;
363990c66293SRichard Henderson         } else {
364090c66293SRichard Henderson             ss = ARMSS_Secure;
364190c66293SRichard Henderson         }
364290c66293SRichard Henderson         break;
364390c66293SRichard Henderson     case ARMMMUIdx_Phys_Root:
364490c66293SRichard Henderson         ss = ARMSS_Root;
364590c66293SRichard Henderson         break;
364690c66293SRichard Henderson     case ARMMMUIdx_Phys_Realm:
364790c66293SRichard Henderson         ss = ARMSS_Realm;
364803bea66eSRichard Henderson         break;
364903bea66eSRichard Henderson     default:
365003bea66eSRichard Henderson         g_assert_not_reached();
365103bea66eSRichard Henderson     }
365290c66293SRichard Henderson 
365390c66293SRichard Henderson     ptw.in_space = ss;
36545458670bSRichard Henderson     return get_phys_addr_gpc(env, &ptw, address, access_type,
36555458670bSRichard Henderson                              memop, result, fi);
3656def8aa5bSRichard Henderson }
3657def8aa5bSRichard Henderson 
arm_cpu_get_phys_page_attrs_debug(CPUState * cs,vaddr addr,MemTxAttrs * attrs)365823971205SRichard Henderson hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
365923971205SRichard Henderson                                          MemTxAttrs *attrs)
366023971205SRichard Henderson {
366123971205SRichard Henderson     ARMCPU *cpu = ARM_CPU(cs);
366223971205SRichard Henderson     CPUARMState *env = &cpu->env;
366390c66293SRichard Henderson     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
366490c66293SRichard Henderson     ARMSecuritySpace ss = arm_security_space(env);
36654a358556SRichard Henderson     S1Translate ptw = {
366690c66293SRichard Henderson         .in_mmu_idx = mmu_idx,
366790c66293SRichard Henderson         .in_space = ss,
36684a358556SRichard Henderson         .in_debug = true,
36694a358556SRichard Henderson     };
3670de05a709SRichard Henderson     GetPhysAddrResult res = {};
367123971205SRichard Henderson     ARMMMUFaultInfo fi = {};
3672de05a709SRichard Henderson     bool ret;
367323971205SRichard Henderson 
36745458670bSRichard Henderson     ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
36757fa7ea8fSRichard Henderson     *attrs = res.f.attrs;
367623971205SRichard Henderson 
367723971205SRichard Henderson     if (ret) {
367823971205SRichard Henderson         return -1;
367923971205SRichard Henderson     }
36807fa7ea8fSRichard Henderson     return res.f.phys_addr;
368123971205SRichard Henderson }
3682