18ae08860SRichard Henderson /* 28ae08860SRichard Henderson * ARM page table walking. 38ae08860SRichard Henderson * 48ae08860SRichard Henderson * This code is licensed under the GNU GPL v2 or later. 58ae08860SRichard Henderson * 68ae08860SRichard Henderson * SPDX-License-Identifier: GPL-2.0-or-later 78ae08860SRichard Henderson */ 88ae08860SRichard Henderson 98ae08860SRichard Henderson #include "qemu/osdep.h" 108ae08860SRichard Henderson #include "qemu/log.h" 111f2e87e5SRichard Henderson #include "qemu/range.h" 1271943a1eSRichard Henderson #include "qemu/main-loop.h" 13f3639a64SRichard Henderson #include "exec/exec-all.h" 1474781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h" 158ae08860SRichard Henderson #include "cpu.h" 168ae08860SRichard Henderson #include "internals.h" 175a534314SPeter Maydell #include "cpu-features.h" 182c1f429dSRichard Henderson #include "idau.h" 19007cd176SRichard Henderson #ifdef CONFIG_TCG 2070f168f8SRichard Henderson # include "tcg/oversized-guest.h" 21007cd176SRichard Henderson #endif 228ae08860SRichard Henderson 236d2654ffSRichard Henderson typedef struct S1Translate { 2434eed551SPeter Maydell /* 2534eed551SPeter Maydell * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk. 2634eed551SPeter Maydell * Together with in_space, specifies the architectural translation regime. 2734eed551SPeter Maydell */ 286d2654ffSRichard Henderson ARMMMUIdx in_mmu_idx; 2934eed551SPeter Maydell /* 3034eed551SPeter Maydell * in_ptw_idx: specifies which mmuidx to use for the actual 3134eed551SPeter Maydell * page table descriptor load operations. This will be one of the 3234eed551SPeter Maydell * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes. 3334eed551SPeter Maydell * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, 3434eed551SPeter Maydell * this field is updated accordingly. 3534eed551SPeter Maydell */ 3648da29e4SRichard Henderson ARMMMUIdx in_ptw_idx; 3734eed551SPeter Maydell /* 3834eed551SPeter Maydell * in_space: the security space for this walk. This plus 3934eed551SPeter Maydell * the in_mmu_idx specify the architectural translation regime. 4034eed551SPeter Maydell * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, 4134eed551SPeter Maydell * this field is updated accordingly. 4234eed551SPeter Maydell * 4334eed551SPeter Maydell * Note that the security space for the in_ptw_idx may be different 4434eed551SPeter Maydell * from that for the in_mmu_idx. We do not need to explicitly track 4534eed551SPeter Maydell * the in_ptw_idx security space because: 4634eed551SPeter Maydell * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx 4734eed551SPeter Maydell * itself specifies the security space 4834eed551SPeter Maydell * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security 4934eed551SPeter Maydell * space used for ptw reads is the same as that of the security 5034eed551SPeter Maydell * space of the stage 1 translation for all cases except where 5134eed551SPeter Maydell * stage 1 is Secure; in that case the only possibilities for 5234eed551SPeter Maydell * the ptw read are Secure and NonSecure, and the in_ptw_idx 5334eed551SPeter Maydell * value being Stage2 vs Stage2_S distinguishes those. 5434eed551SPeter Maydell */ 5590c66293SRichard Henderson ARMSecuritySpace in_space; 5634eed551SPeter Maydell /* 5734eed551SPeter Maydell * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug 5834eed551SPeter Maydell * accesses will not update the guest page table access flags 5934eed551SPeter Maydell * and will not change the state of the softmmu TLBs. 6034eed551SPeter Maydell */ 614a358556SRichard Henderson bool in_debug; 627c19b2d6SRichard Henderson /* 637c19b2d6SRichard Henderson * If this is stage 2 of a stage 1+2 page table walk, then this must 647c19b2d6SRichard Henderson * be true if stage 1 is an EL0 access; otherwise this is ignored. 657c19b2d6SRichard Henderson * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}. 667c19b2d6SRichard Henderson */ 677c19b2d6SRichard Henderson bool in_s1_is_el0; 6871943a1eSRichard Henderson bool out_rw; 694e7a2c98SRichard Henderson bool out_be; 7090c66293SRichard Henderson ARMSecuritySpace out_space; 7171943a1eSRichard Henderson hwaddr out_virt; 726d2654ffSRichard Henderson hwaddr out_phys; 73f3639a64SRichard Henderson void *out_host; 746d2654ffSRichard Henderson } S1Translate; 756d2654ffSRichard Henderson 7646f38c97SRichard Henderson static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, 7767d762e7SArd Biesheuvel vaddr address, 78c6cd9f9fSRichard Henderson MMUAccessType access_type, MemOp memop, 7946f38c97SRichard Henderson GetPhysAddrResult *result, 8046f38c97SRichard Henderson ARMMMUFaultInfo *fi); 8146f38c97SRichard Henderson 8246f38c97SRichard Henderson static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, 8367d762e7SArd Biesheuvel vaddr address, 845458670bSRichard Henderson MMUAccessType access_type, MemOp memop, 853f5a74c5SRichard Henderson GetPhysAddrResult *result, 8686a438b4SRichard Henderson ARMMMUFaultInfo *fi); 873f5a74c5SRichard Henderson 881c73d848SRichard Henderson /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ 891c73d848SRichard Henderson static const uint8_t pamax_map[] = { 901c73d848SRichard Henderson [0] = 32, 911c73d848SRichard Henderson [1] = 36, 921c73d848SRichard Henderson [2] = 40, 931c73d848SRichard Henderson [3] = 42, 941c73d848SRichard Henderson [4] = 44, 951c73d848SRichard Henderson [5] = 48, 961c73d848SRichard Henderson [6] = 52, 971c73d848SRichard Henderson }; 981c73d848SRichard Henderson 99d54ffa54SDanny Canter uint8_t round_down_to_parange_index(uint8_t bit_size) 100d54ffa54SDanny Canter { 101d54ffa54SDanny Canter for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) { 102d54ffa54SDanny Canter if (pamax_map[i] <= bit_size) { 103d54ffa54SDanny Canter return i; 104d54ffa54SDanny Canter } 105d54ffa54SDanny Canter } 106d54ffa54SDanny Canter g_assert_not_reached(); 107d54ffa54SDanny Canter } 108d54ffa54SDanny Canter 109d54ffa54SDanny Canter uint8_t round_down_to_parange_bit_size(uint8_t bit_size) 110d54ffa54SDanny Canter { 111d54ffa54SDanny Canter return pamax_map[round_down_to_parange_index(bit_size)]; 112d54ffa54SDanny Canter } 113d54ffa54SDanny Canter 11471e269fbSPeter Maydell /* 11571e269fbSPeter Maydell * The cpu-specific constant value of PAMax; also used by hw/arm/virt. 11671e269fbSPeter Maydell * Note that machvirt_init calls this on a CPU that is inited but not realized! 11771e269fbSPeter Maydell */ 1181c73d848SRichard Henderson unsigned int arm_pamax(ARMCPU *cpu) 1191c73d848SRichard Henderson { 12022536b13SRichard Henderson if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1211c73d848SRichard Henderson unsigned int parange = 1221c73d848SRichard Henderson FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); 1231c73d848SRichard Henderson 1241c73d848SRichard Henderson /* 1251c73d848SRichard Henderson * id_aa64mmfr0 is a read-only register so values outside of the 1261c73d848SRichard Henderson * supported mappings can be considered an implementation error. 1271c73d848SRichard Henderson */ 1281c73d848SRichard Henderson assert(parange < ARRAY_SIZE(pamax_map)); 1291c73d848SRichard Henderson return pamax_map[parange]; 1301c73d848SRichard Henderson } 13159e1b8a2SRichard Henderson 13271e269fbSPeter Maydell if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 13371e269fbSPeter Maydell /* v7 or v8 with LPAE */ 13422536b13SRichard Henderson return 40; 13522536b13SRichard Henderson } 13622536b13SRichard Henderson /* Anything else */ 13722536b13SRichard Henderson return 32; 13822536b13SRichard Henderson } 1391c73d848SRichard Henderson 1401d261255SRichard Henderson /* 1411d261255SRichard Henderson * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index 1421d261255SRichard Henderson */ 1431d261255SRichard Henderson ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1441d261255SRichard Henderson { 1451d261255SRichard Henderson switch (mmu_idx) { 1461d261255SRichard Henderson case ARMMMUIdx_E10_0: 1471d261255SRichard Henderson return ARMMMUIdx_Stage1_E0; 1481d261255SRichard Henderson case ARMMMUIdx_E10_1: 1491d261255SRichard Henderson return ARMMMUIdx_Stage1_E1; 1501d261255SRichard Henderson case ARMMMUIdx_E10_1_PAN: 1511d261255SRichard Henderson return ARMMMUIdx_Stage1_E1_PAN; 1521d261255SRichard Henderson default: 1531d261255SRichard Henderson return mmu_idx; 1541d261255SRichard Henderson } 1551d261255SRichard Henderson } 1561d261255SRichard Henderson 1571d261255SRichard Henderson ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1581d261255SRichard Henderson { 1591d261255SRichard Henderson return stage_1_mmu_idx(arm_mmu_idx(env)); 1601d261255SRichard Henderson } 1611d261255SRichard Henderson 162fcc0b041SPeter Maydell /* 163fcc0b041SPeter Maydell * Return where we should do ptw loads from for a stage 2 walk. 164fcc0b041SPeter Maydell * This depends on whether the address we are looking up is a 165fcc0b041SPeter Maydell * Secure IPA or a NonSecure IPA, which we know from whether this is 166fcc0b041SPeter Maydell * Stage2 or Stage2_S. 167fcc0b041SPeter Maydell * If this is the Secure EL1&0 regime we need to check the NSW and SW bits. 168fcc0b041SPeter Maydell */ 169fcc0b041SPeter Maydell static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx) 170fcc0b041SPeter Maydell { 171fcc0b041SPeter Maydell bool s2walk_secure; 172fcc0b041SPeter Maydell 173fcc0b041SPeter Maydell /* 174fcc0b041SPeter Maydell * We're OK to check the current state of the CPU here because 175da64251eSJean-Philippe Brucker * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit 176da64251eSJean-Philippe Brucker * changes. 177fcc0b041SPeter Maydell * (2) there's no way to do a lookup that cares about Stage 2 for a 178fcc0b041SPeter Maydell * different security state to the current one for AArch64, and AArch32 179fcc0b041SPeter Maydell * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do 180fcc0b041SPeter Maydell * an NS stage 1+2 lookup while the NS bit is 0.) 181fcc0b041SPeter Maydell */ 182da64251eSJean-Philippe Brucker if (!arm_el_is_aa64(env, 3)) { 183fcc0b041SPeter Maydell return ARMMMUIdx_Phys_NS; 184fcc0b041SPeter Maydell } 185da64251eSJean-Philippe Brucker 186da64251eSJean-Philippe Brucker switch (arm_security_space_below_el3(env)) { 187da64251eSJean-Philippe Brucker case ARMSS_NonSecure: 188da64251eSJean-Philippe Brucker return ARMMMUIdx_Phys_NS; 189da64251eSJean-Philippe Brucker case ARMSS_Realm: 190da64251eSJean-Philippe Brucker return ARMMMUIdx_Phys_Realm; 191da64251eSJean-Philippe Brucker case ARMSS_Secure: 192fcc0b041SPeter Maydell if (stage2idx == ARMMMUIdx_Stage2_S) { 193fcc0b041SPeter Maydell s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW); 194fcc0b041SPeter Maydell } else { 195fcc0b041SPeter Maydell s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW); 196fcc0b041SPeter Maydell } 197fcc0b041SPeter Maydell return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; 198da64251eSJean-Philippe Brucker default: 199da64251eSJean-Philippe Brucker g_assert_not_reached(); 200da64251eSJean-Philippe Brucker } 201fcc0b041SPeter Maydell } 202fcc0b041SPeter Maydell 20311552bb0SRichard Henderson static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) 20411552bb0SRichard Henderson { 20511552bb0SRichard Henderson return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 20611552bb0SRichard Henderson } 20711552bb0SRichard Henderson 2083b318aaeSRichard Henderson /* Return the TTBR associated with this translation regime */ 2093b318aaeSRichard Henderson static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) 2103b318aaeSRichard Henderson { 2113b318aaeSRichard Henderson if (mmu_idx == ARMMMUIdx_Stage2) { 2123b318aaeSRichard Henderson return env->cp15.vttbr_el2; 2133b318aaeSRichard Henderson } 2143b318aaeSRichard Henderson if (mmu_idx == ARMMMUIdx_Stage2_S) { 2153b318aaeSRichard Henderson return env->cp15.vsttbr_el2; 2163b318aaeSRichard Henderson } 2173b318aaeSRichard Henderson if (ttbrn == 0) { 2183b318aaeSRichard Henderson return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 2193b318aaeSRichard Henderson } else { 2203b318aaeSRichard Henderson return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 2213b318aaeSRichard Henderson } 2223b318aaeSRichard Henderson } 2233b318aaeSRichard Henderson 2248db1a3a0SRichard Henderson /* Return true if the specified stage of address translation is disabled */ 2257e80c0a4SRichard Henderson static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, 226d1289140SPeter Maydell ARMSecuritySpace space) 2278db1a3a0SRichard Henderson { 2288db1a3a0SRichard Henderson uint64_t hcr_el2; 2298db1a3a0SRichard Henderson 2308db1a3a0SRichard Henderson if (arm_feature(env, ARM_FEATURE_M)) { 2312d12bb96SPeter Maydell bool is_secure = arm_space_is_secure(space); 2327e80c0a4SRichard Henderson switch (env->v7m.mpu_ctrl[is_secure] & 2338db1a3a0SRichard Henderson (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 2348db1a3a0SRichard Henderson case R_V7M_MPU_CTRL_ENABLE_MASK: 2358db1a3a0SRichard Henderson /* Enabled, but not for HardFault and NMI */ 2368db1a3a0SRichard Henderson return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 2378db1a3a0SRichard Henderson case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 2388db1a3a0SRichard Henderson /* Enabled for all cases */ 2398db1a3a0SRichard Henderson return false; 2408db1a3a0SRichard Henderson case 0: 2418db1a3a0SRichard Henderson default: 2428db1a3a0SRichard Henderson /* 2438db1a3a0SRichard Henderson * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 2448db1a3a0SRichard Henderson * we warned about that in armv7m_nvic.c when the guest set it. 2458db1a3a0SRichard Henderson */ 2468db1a3a0SRichard Henderson return true; 2478db1a3a0SRichard Henderson } 2488db1a3a0SRichard Henderson } 2498db1a3a0SRichard Henderson 2508db1a3a0SRichard Henderson 2513b2af993SRichard Henderson switch (mmu_idx) { 2523b2af993SRichard Henderson case ARMMMUIdx_Stage2: 2533b2af993SRichard Henderson case ARMMMUIdx_Stage2_S: 2548db1a3a0SRichard Henderson /* HCR.DC means HCR.VM behaves as 1 */ 2552d12bb96SPeter Maydell hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 2568db1a3a0SRichard Henderson return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; 2578db1a3a0SRichard Henderson 2583b2af993SRichard Henderson case ARMMMUIdx_E10_0: 2593b2af993SRichard Henderson case ARMMMUIdx_E10_1: 2603b2af993SRichard Henderson case ARMMMUIdx_E10_1_PAN: 261fdf12933SRichard Henderson /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */ 2622d12bb96SPeter Maydell hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 263fdf12933SRichard Henderson if (hcr_el2 & HCR_TGE) { 2648db1a3a0SRichard Henderson return true; 2658db1a3a0SRichard Henderson } 2663b2af993SRichard Henderson break; 2678db1a3a0SRichard Henderson 2683b2af993SRichard Henderson case ARMMMUIdx_Stage1_E0: 2693b2af993SRichard Henderson case ARMMMUIdx_Stage1_E1: 2703b2af993SRichard Henderson case ARMMMUIdx_Stage1_E1_PAN: 2718db1a3a0SRichard Henderson /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 2722d12bb96SPeter Maydell hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 2733b2af993SRichard Henderson if (hcr_el2 & HCR_DC) { 2748db1a3a0SRichard Henderson return true; 2758db1a3a0SRichard Henderson } 2763b2af993SRichard Henderson break; 2773b2af993SRichard Henderson 2783b2af993SRichard Henderson case ARMMMUIdx_E20_0: 2793b2af993SRichard Henderson case ARMMMUIdx_E20_2: 2803b2af993SRichard Henderson case ARMMMUIdx_E20_2_PAN: 2813b2af993SRichard Henderson case ARMMMUIdx_E2: 2823b2af993SRichard Henderson case ARMMMUIdx_E3: 2833b2af993SRichard Henderson break; 2843b2af993SRichard Henderson 285a1ce3084SRichard Henderson case ARMMMUIdx_Phys_S: 286bb5cc2c8SRichard Henderson case ARMMMUIdx_Phys_NS: 287bb5cc2c8SRichard Henderson case ARMMMUIdx_Phys_Root: 288bb5cc2c8SRichard Henderson case ARMMMUIdx_Phys_Realm: 289a1ce3084SRichard Henderson /* No translation for physical address spaces. */ 290a1ce3084SRichard Henderson return true; 291a1ce3084SRichard Henderson 2923b2af993SRichard Henderson default: 2933b2af993SRichard Henderson g_assert_not_reached(); 2943b2af993SRichard Henderson } 2958db1a3a0SRichard Henderson 2968db1a3a0SRichard Henderson return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 2978db1a3a0SRichard Henderson } 2988db1a3a0SRichard Henderson 29946f38c97SRichard Henderson static bool granule_protection_check(CPUARMState *env, uint64_t paddress, 30046f38c97SRichard Henderson ARMSecuritySpace pspace, 30146f38c97SRichard Henderson ARMMMUFaultInfo *fi) 30246f38c97SRichard Henderson { 30346f38c97SRichard Henderson MemTxAttrs attrs = { 30446f38c97SRichard Henderson .secure = true, 30546f38c97SRichard Henderson .space = ARMSS_Root, 30646f38c97SRichard Henderson }; 30746f38c97SRichard Henderson ARMCPU *cpu = env_archcpu(env); 30846f38c97SRichard Henderson uint64_t gpccr = env->cp15.gpccr_el3; 30946f38c97SRichard Henderson unsigned pps, pgs, l0gptsz, level = 0; 31046f38c97SRichard Henderson uint64_t tableaddr, pps_mask, align, entry, index; 31146f38c97SRichard Henderson AddressSpace *as; 31246f38c97SRichard Henderson MemTxResult result; 31346f38c97SRichard Henderson int gpi; 31446f38c97SRichard Henderson 31546f38c97SRichard Henderson if (!FIELD_EX64(gpccr, GPCCR, GPC)) { 31646f38c97SRichard Henderson return true; 31746f38c97SRichard Henderson } 31846f38c97SRichard Henderson 31946f38c97SRichard Henderson /* 32046f38c97SRichard Henderson * GPC Priority 1 (R_GMGRR): 32146f38c97SRichard Henderson * R_JWCSM: If the configuration of GPCCR_EL3 is invalid, 32246f38c97SRichard Henderson * the access fails as GPT walk fault at level 0. 32346f38c97SRichard Henderson */ 32446f38c97SRichard Henderson 32546f38c97SRichard Henderson /* 32646f38c97SRichard Henderson * Configuration of PPS to a value exceeding the implemented 32746f38c97SRichard Henderson * physical address size is invalid. 32846f38c97SRichard Henderson */ 32946f38c97SRichard Henderson pps = FIELD_EX64(gpccr, GPCCR, PPS); 33046f38c97SRichard Henderson if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) { 33146f38c97SRichard Henderson goto fault_walk; 33246f38c97SRichard Henderson } 33346f38c97SRichard Henderson pps = pamax_map[pps]; 33446f38c97SRichard Henderson pps_mask = MAKE_64BIT_MASK(0, pps); 33546f38c97SRichard Henderson 33646f38c97SRichard Henderson switch (FIELD_EX64(gpccr, GPCCR, SH)) { 33746f38c97SRichard Henderson case 0b10: /* outer shareable */ 33846f38c97SRichard Henderson break; 33946f38c97SRichard Henderson case 0b00: /* non-shareable */ 34046f38c97SRichard Henderson case 0b11: /* inner shareable */ 34146f38c97SRichard Henderson /* Inner and Outer non-cacheable requires Outer shareable. */ 34246f38c97SRichard Henderson if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 && 34346f38c97SRichard Henderson FIELD_EX64(gpccr, GPCCR, IRGN) == 0) { 34446f38c97SRichard Henderson goto fault_walk; 34546f38c97SRichard Henderson } 34646f38c97SRichard Henderson break; 34746f38c97SRichard Henderson default: /* reserved */ 34846f38c97SRichard Henderson goto fault_walk; 34946f38c97SRichard Henderson } 35046f38c97SRichard Henderson 35146f38c97SRichard Henderson switch (FIELD_EX64(gpccr, GPCCR, PGS)) { 35246f38c97SRichard Henderson case 0b00: /* 4KB */ 35346f38c97SRichard Henderson pgs = 12; 35446f38c97SRichard Henderson break; 35546f38c97SRichard Henderson case 0b01: /* 64KB */ 35646f38c97SRichard Henderson pgs = 16; 35746f38c97SRichard Henderson break; 35846f38c97SRichard Henderson case 0b10: /* 16KB */ 35946f38c97SRichard Henderson pgs = 14; 36046f38c97SRichard Henderson break; 36146f38c97SRichard Henderson default: /* reserved */ 36246f38c97SRichard Henderson goto fault_walk; 36346f38c97SRichard Henderson } 36446f38c97SRichard Henderson 36546f38c97SRichard Henderson /* Note this field is read-only and fixed at reset. */ 36646f38c97SRichard Henderson l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ); 36746f38c97SRichard Henderson 36846f38c97SRichard Henderson /* 36946f38c97SRichard Henderson * GPC Priority 2: Secure, Realm or Root address exceeds PPS. 37046f38c97SRichard Henderson * R_CPDSB: A NonSecure physical address input exceeding PPS 37146f38c97SRichard Henderson * does not experience any fault. 37246f38c97SRichard Henderson */ 37346f38c97SRichard Henderson if (paddress & ~pps_mask) { 37446f38c97SRichard Henderson if (pspace == ARMSS_NonSecure) { 37546f38c97SRichard Henderson return true; 37646f38c97SRichard Henderson } 37746f38c97SRichard Henderson goto fault_size; 37846f38c97SRichard Henderson } 37946f38c97SRichard Henderson 38046f38c97SRichard Henderson /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */ 38146f38c97SRichard Henderson tableaddr = env->cp15.gptbr_el3 << 12; 38246f38c97SRichard Henderson if (tableaddr & ~pps_mask) { 38346f38c97SRichard Henderson goto fault_size; 38446f38c97SRichard Henderson } 38546f38c97SRichard Henderson 38646f38c97SRichard Henderson /* 38746f38c97SRichard Henderson * BADDR is aligned per a function of PPS and L0GPTSZ. 38846f38c97SRichard Henderson * These bits of GPTBR_EL3 are RES0, but are not a configuration error, 38946f38c97SRichard Henderson * unlike the RES0 bits of the GPT entries (R_XNKFZ). 39046f38c97SRichard Henderson */ 39146f38c97SRichard Henderson align = MAX(pps - l0gptsz + 3, 12); 39246f38c97SRichard Henderson align = MAKE_64BIT_MASK(0, align); 39346f38c97SRichard Henderson tableaddr &= ~align; 39446f38c97SRichard Henderson 39546f38c97SRichard Henderson as = arm_addressspace(env_cpu(env), attrs); 39646f38c97SRichard Henderson 39746f38c97SRichard Henderson /* Level 0 lookup. */ 39846f38c97SRichard Henderson index = extract64(paddress, l0gptsz, pps - l0gptsz); 39946f38c97SRichard Henderson tableaddr += index * 8; 40046f38c97SRichard Henderson entry = address_space_ldq_le(as, tableaddr, attrs, &result); 40146f38c97SRichard Henderson if (result != MEMTX_OK) { 40246f38c97SRichard Henderson goto fault_eabt; 40346f38c97SRichard Henderson } 40446f38c97SRichard Henderson 40546f38c97SRichard Henderson switch (extract32(entry, 0, 4)) { 40646f38c97SRichard Henderson case 1: /* block descriptor */ 40746f38c97SRichard Henderson if (entry >> 8) { 40846f38c97SRichard Henderson goto fault_walk; /* RES0 bits not 0 */ 40946f38c97SRichard Henderson } 41046f38c97SRichard Henderson gpi = extract32(entry, 4, 4); 41146f38c97SRichard Henderson goto found; 41246f38c97SRichard Henderson case 3: /* table descriptor */ 41346f38c97SRichard Henderson tableaddr = entry & ~0xf; 41446f38c97SRichard Henderson align = MAX(l0gptsz - pgs - 1, 12); 41546f38c97SRichard Henderson align = MAKE_64BIT_MASK(0, align); 41646f38c97SRichard Henderson if (tableaddr & (~pps_mask | align)) { 41746f38c97SRichard Henderson goto fault_walk; /* RES0 bits not 0 */ 41846f38c97SRichard Henderson } 41946f38c97SRichard Henderson break; 42046f38c97SRichard Henderson default: /* invalid */ 42146f38c97SRichard Henderson goto fault_walk; 42246f38c97SRichard Henderson } 42346f38c97SRichard Henderson 42446f38c97SRichard Henderson /* Level 1 lookup */ 42546f38c97SRichard Henderson level = 1; 42646f38c97SRichard Henderson index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4); 42746f38c97SRichard Henderson tableaddr += index * 8; 42846f38c97SRichard Henderson entry = address_space_ldq_le(as, tableaddr, attrs, &result); 42946f38c97SRichard Henderson if (result != MEMTX_OK) { 43046f38c97SRichard Henderson goto fault_eabt; 43146f38c97SRichard Henderson } 43246f38c97SRichard Henderson 43346f38c97SRichard Henderson switch (extract32(entry, 0, 4)) { 43446f38c97SRichard Henderson case 1: /* contiguous descriptor */ 43546f38c97SRichard Henderson if (entry >> 10) { 43646f38c97SRichard Henderson goto fault_walk; /* RES0 bits not 0 */ 43746f38c97SRichard Henderson } 43846f38c97SRichard Henderson /* 43946f38c97SRichard Henderson * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE, 44046f38c97SRichard Henderson * and because we cannot invalidate by pa, and thus will always 44146f38c97SRichard Henderson * flush entire tlbs, we don't actually care about the range here 44246f38c97SRichard Henderson * and can simply extract the GPI as the result. 44346f38c97SRichard Henderson */ 44446f38c97SRichard Henderson if (extract32(entry, 8, 2) == 0) { 44546f38c97SRichard Henderson goto fault_walk; /* reserved contig */ 44646f38c97SRichard Henderson } 44746f38c97SRichard Henderson gpi = extract32(entry, 4, 4); 44846f38c97SRichard Henderson break; 44946f38c97SRichard Henderson default: 45046f38c97SRichard Henderson index = extract64(paddress, pgs, 4); 45146f38c97SRichard Henderson gpi = extract64(entry, index * 4, 4); 45246f38c97SRichard Henderson break; 45346f38c97SRichard Henderson } 45446f38c97SRichard Henderson 45546f38c97SRichard Henderson found: 45646f38c97SRichard Henderson switch (gpi) { 45746f38c97SRichard Henderson case 0b0000: /* no access */ 45846f38c97SRichard Henderson break; 45946f38c97SRichard Henderson case 0b1111: /* all access */ 46046f38c97SRichard Henderson return true; 46146f38c97SRichard Henderson case 0b1000: 46246f38c97SRichard Henderson case 0b1001: 46346f38c97SRichard Henderson case 0b1010: 46446f38c97SRichard Henderson case 0b1011: 46546f38c97SRichard Henderson if (pspace == (gpi & 3)) { 46646f38c97SRichard Henderson return true; 46746f38c97SRichard Henderson } 46846f38c97SRichard Henderson break; 46946f38c97SRichard Henderson default: 47046f38c97SRichard Henderson goto fault_walk; /* reserved */ 47146f38c97SRichard Henderson } 47246f38c97SRichard Henderson 47346f38c97SRichard Henderson fi->gpcf = GPCF_Fail; 47446f38c97SRichard Henderson goto fault_common; 47546f38c97SRichard Henderson fault_eabt: 47646f38c97SRichard Henderson fi->gpcf = GPCF_EABT; 47746f38c97SRichard Henderson goto fault_common; 47846f38c97SRichard Henderson fault_size: 47946f38c97SRichard Henderson fi->gpcf = GPCF_AddressSize; 48046f38c97SRichard Henderson goto fault_common; 48146f38c97SRichard Henderson fault_walk: 48246f38c97SRichard Henderson fi->gpcf = GPCF_Walk; 48346f38c97SRichard Henderson fault_common: 48446f38c97SRichard Henderson fi->level = level; 48546f38c97SRichard Henderson fi->paddr = paddress; 48646f38c97SRichard Henderson fi->paddr_space = pspace; 48746f38c97SRichard Henderson return false; 48846f38c97SRichard Henderson } 48946f38c97SRichard Henderson 490728b923fSRichard Henderson static bool S1_attrs_are_device(uint8_t attrs) 491728b923fSRichard Henderson { 492728b923fSRichard Henderson /* 493728b923fSRichard Henderson * This slightly under-decodes the MAIR_ELx field: 494728b923fSRichard Henderson * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE; 495728b923fSRichard Henderson * 0b0000dd1x is UNPREDICTABLE. 496728b923fSRichard Henderson */ 497728b923fSRichard Henderson return (attrs & 0xf0) == 0; 498728b923fSRichard Henderson } 499728b923fSRichard Henderson 500f3639a64SRichard Henderson static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) 50111552bb0SRichard Henderson { 50211552bb0SRichard Henderson /* 50311552bb0SRichard Henderson * For an S1 page table walk, the stage 1 attributes are always 50411552bb0SRichard Henderson * some form of "this is Normal memory". The combined S1+S2 50511552bb0SRichard Henderson * attributes are therefore only Device if stage 2 specifies Device. 50611552bb0SRichard Henderson * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00, 50711552bb0SRichard Henderson * ie when cacheattrs.attrs bits [3:2] are 0b00. 50811552bb0SRichard Henderson * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie 50911552bb0SRichard Henderson * when cacheattrs.attrs bit [2] is 0. 51011552bb0SRichard Henderson */ 511ac76c2e5SRichard Henderson if (hcr & HCR_FWB) { 512f3639a64SRichard Henderson return (attrs & 0x4) == 0; 51311552bb0SRichard Henderson } else { 514f3639a64SRichard Henderson return (attrs & 0xc) == 0; 51511552bb0SRichard Henderson } 51611552bb0SRichard Henderson } 51711552bb0SRichard Henderson 5183f74da44SPeter Maydell static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space, 5193f74da44SPeter Maydell ARMMMUIdx s2_mmu_idx) 5203f74da44SPeter Maydell { 5213f74da44SPeter Maydell /* 5223f74da44SPeter Maydell * Return the security space to use for stage 2 when doing 5233f74da44SPeter Maydell * the S1 page table descriptor load. 5243f74da44SPeter Maydell */ 5253f74da44SPeter Maydell if (regime_is_stage2(s2_mmu_idx)) { 5263f74da44SPeter Maydell /* 5273f74da44SPeter Maydell * The security space for ptw reads is almost always the same 5283f74da44SPeter Maydell * as that of the security space of the stage 1 translation. 5293f74da44SPeter Maydell * The only exception is when stage 1 is Secure; in that case 5303f74da44SPeter Maydell * the ptw read might be to the Secure or the NonSecure space 5313f74da44SPeter Maydell * (but never Realm or Root), and the s2_mmu_idx tells us which. 5323f74da44SPeter Maydell * Root translations are always single-stage. 5333f74da44SPeter Maydell */ 5343f74da44SPeter Maydell if (s1_space == ARMSS_Secure) { 5353f74da44SPeter Maydell return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S); 5363f74da44SPeter Maydell } else { 5373f74da44SPeter Maydell assert(s2_mmu_idx != ARMMMUIdx_Stage2_S); 5383f74da44SPeter Maydell assert(s1_space != ARMSS_Root); 5393f74da44SPeter Maydell return s1_space; 5403f74da44SPeter Maydell } 5413f74da44SPeter Maydell } else { 5423f74da44SPeter Maydell /* ptw loads are from phys: the mmu idx itself says which space */ 5433f74da44SPeter Maydell return arm_phys_to_space(s2_mmu_idx); 5443f74da44SPeter Maydell } 5453f74da44SPeter Maydell } 5463f74da44SPeter Maydell 5474f51edd3SPeter Maydell static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx) 5484f51edd3SPeter Maydell { 5494f51edd3SPeter Maydell /* 5504f51edd3SPeter Maydell * For stage 2 faults in Secure EL22, S1NS indicates 5514f51edd3SPeter Maydell * whether the faulting IPA is in the Secure or NonSecure 5524f51edd3SPeter Maydell * IPA space. For all other kinds of fault, it is false. 5534f51edd3SPeter Maydell */ 5544f51edd3SPeter Maydell return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx) 5554f51edd3SPeter Maydell && s2_mmu_idx == ARMMMUIdx_Stage2_S; 5564f51edd3SPeter Maydell } 5574f51edd3SPeter Maydell 55811552bb0SRichard Henderson /* Translate a S1 pagetable walk through S2 if needed. */ 5596d2654ffSRichard Henderson static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, 5606d2654ffSRichard Henderson hwaddr addr, ARMMMUFaultInfo *fi) 56111552bb0SRichard Henderson { 562f3639a64SRichard Henderson ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 56348da29e4SRichard Henderson ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; 564f3639a64SRichard Henderson uint8_t pte_attrs; 565bf25b7b0SRichard Henderson 56671943a1eSRichard Henderson ptw->out_virt = addr; 56771943a1eSRichard Henderson 568f3639a64SRichard Henderson if (unlikely(ptw->in_debug)) { 569f3639a64SRichard Henderson /* 570f3639a64SRichard Henderson * From gdbstub, do not use softmmu so that we don't modify the 571f3639a64SRichard Henderson * state of the cpu at all, including softmmu tlb contents. 572f3639a64SRichard Henderson */ 5733f74da44SPeter Maydell ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); 5746d2654ffSRichard Henderson S1Translate s2ptw = { 5756d2654ffSRichard Henderson .in_mmu_idx = s2_mmu_idx, 576fcc0b041SPeter Maydell .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), 5773f74da44SPeter Maydell .in_space = s2_space, 578f3639a64SRichard Henderson .in_debug = true, 5796d2654ffSRichard Henderson }; 580f3639a64SRichard Henderson GetPhysAddrResult s2 = { }; 58148da29e4SRichard Henderson 5825458670bSRichard Henderson if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) { 583f3639a64SRichard Henderson goto fail; 584f3639a64SRichard Henderson } 58546f38c97SRichard Henderson 586f3639a64SRichard Henderson ptw->out_phys = s2.f.phys_addr; 587f3639a64SRichard Henderson pte_attrs = s2.cacheattrs.attrs; 588f3639a64SRichard Henderson ptw->out_host = NULL; 58971943a1eSRichard Henderson ptw->out_rw = false; 590fe4a5472SRichard Henderson ptw->out_space = s2.f.attrs.space; 591f3639a64SRichard Henderson } else { 5920d3de77aSFabiano Rosas #ifdef CONFIG_TCG 593f3639a64SRichard Henderson CPUTLBEntryFull *full; 594f3639a64SRichard Henderson int flags; 59511552bb0SRichard Henderson 596f3639a64SRichard Henderson env->tlb_fi = fi; 5976d03226bSAlex Bennée flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD, 598f3639a64SRichard Henderson arm_to_core_mmu_idx(s2_mmu_idx), 5996d03226bSAlex Bennée &ptw->out_host, &full); 600f3639a64SRichard Henderson env->tlb_fi = NULL; 601f3639a64SRichard Henderson 602f3639a64SRichard Henderson if (unlikely(flags & TLB_INVALID_MASK)) { 603f3639a64SRichard Henderson goto fail; 604f3639a64SRichard Henderson } 6059d2617acSRichard Henderson ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 60671943a1eSRichard Henderson ptw->out_rw = full->prot & PAGE_WRITE; 607a81fef4bSAnton Johansson pte_attrs = full->extra.arm.pte_attrs; 60890c66293SRichard Henderson ptw->out_space = full->attrs.space; 6090d3de77aSFabiano Rosas #else 6100d3de77aSFabiano Rosas g_assert_not_reached(); 6110d3de77aSFabiano Rosas #endif 61211552bb0SRichard Henderson } 613ac76c2e5SRichard Henderson 61448da29e4SRichard Henderson if (regime_is_stage2(s2_mmu_idx)) { 6152d12bb96SPeter Maydell uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); 616f3639a64SRichard Henderson 617f3639a64SRichard Henderson if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) { 61811552bb0SRichard Henderson /* 61911552bb0SRichard Henderson * PTW set and S1 walk touched S2 Device memory: 62011552bb0SRichard Henderson * generate Permission fault. 62111552bb0SRichard Henderson */ 62211552bb0SRichard Henderson fi->type = ARMFault_Permission; 62311552bb0SRichard Henderson fi->s2addr = addr; 62411552bb0SRichard Henderson fi->stage2 = true; 62511552bb0SRichard Henderson fi->s1ptw = true; 6264f51edd3SPeter Maydell fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); 6276d2654ffSRichard Henderson return false; 62811552bb0SRichard Henderson } 629f3639a64SRichard Henderson } 63011552bb0SRichard Henderson 631f3639a64SRichard Henderson ptw->out_be = regime_translation_big_endian(env, mmu_idx); 6326d2654ffSRichard Henderson return true; 633f3639a64SRichard Henderson 634f3639a64SRichard Henderson fail: 635f3639a64SRichard Henderson assert(fi->type != ARMFault_None); 63646f38c97SRichard Henderson if (fi->type == ARMFault_GPCFOnOutput) { 63746f38c97SRichard Henderson fi->type = ARMFault_GPCFOnWalk; 63846f38c97SRichard Henderson } 639f3639a64SRichard Henderson fi->s2addr = addr; 640f6415660SPeter Maydell fi->stage2 = regime_is_stage2(s2_mmu_idx); 641f6415660SPeter Maydell fi->s1ptw = fi->stage2; 6424f51edd3SPeter Maydell fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); 643f3639a64SRichard Henderson return false; 64411552bb0SRichard Henderson } 64511552bb0SRichard Henderson 64611552bb0SRichard Henderson /* All loads done in the course of a page table walk go through here. */ 64793e5b3a6SRichard Henderson static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, 6486d2654ffSRichard Henderson ARMMMUFaultInfo *fi) 64911552bb0SRichard Henderson { 6505e79887bSRichard Henderson CPUState *cs = env_cpu(env); 65171943a1eSRichard Henderson void *host = ptw->out_host; 65211552bb0SRichard Henderson uint32_t data; 65311552bb0SRichard Henderson 65471943a1eSRichard Henderson if (likely(host)) { 655f3639a64SRichard Henderson /* Page tables are in RAM, and we have the host address. */ 65671943a1eSRichard Henderson data = qatomic_read((uint32_t *)host); 6574e7a2c98SRichard Henderson if (ptw->out_be) { 65871943a1eSRichard Henderson data = be32_to_cpu(data); 65911552bb0SRichard Henderson } else { 66071943a1eSRichard Henderson data = le32_to_cpu(data); 66111552bb0SRichard Henderson } 662f3639a64SRichard Henderson } else { 663f3639a64SRichard Henderson /* Page tables are in MMIO. */ 66490c66293SRichard Henderson MemTxAttrs attrs = { 66590c66293SRichard Henderson .space = ptw->out_space, 666b02f5e06SPeter Maydell .secure = arm_space_is_secure(ptw->out_space), 66790c66293SRichard Henderson }; 668f3639a64SRichard Henderson AddressSpace *as = arm_addressspace(cs, attrs); 669f3639a64SRichard Henderson MemTxResult result = MEMTX_OK; 670f3639a64SRichard Henderson 671f3639a64SRichard Henderson if (ptw->out_be) { 672f3639a64SRichard Henderson data = address_space_ldl_be(as, ptw->out_phys, attrs, &result); 673f3639a64SRichard Henderson } else { 674f3639a64SRichard Henderson data = address_space_ldl_le(as, ptw->out_phys, attrs, &result); 67511552bb0SRichard Henderson } 676f3639a64SRichard Henderson if (unlikely(result != MEMTX_OK)) { 67711552bb0SRichard Henderson fi->type = ARMFault_SyncExternalOnWalk; 67811552bb0SRichard Henderson fi->ea = arm_extabort_type(result); 67911552bb0SRichard Henderson return 0; 68011552bb0SRichard Henderson } 681f3639a64SRichard Henderson } 682f3639a64SRichard Henderson return data; 683f3639a64SRichard Henderson } 68411552bb0SRichard Henderson 68593e5b3a6SRichard Henderson static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, 6866d2654ffSRichard Henderson ARMMMUFaultInfo *fi) 68711552bb0SRichard Henderson { 6885e79887bSRichard Henderson CPUState *cs = env_cpu(env); 68971943a1eSRichard Henderson void *host = ptw->out_host; 69011552bb0SRichard Henderson uint64_t data; 69111552bb0SRichard Henderson 69271943a1eSRichard Henderson if (likely(host)) { 693f3639a64SRichard Henderson /* Page tables are in RAM, and we have the host address. */ 69471943a1eSRichard Henderson #ifdef CONFIG_ATOMIC64 69571943a1eSRichard Henderson data = qatomic_read__nocheck((uint64_t *)host); 6964e7a2c98SRichard Henderson if (ptw->out_be) { 69771943a1eSRichard Henderson data = be64_to_cpu(data); 69811552bb0SRichard Henderson } else { 69971943a1eSRichard Henderson data = le64_to_cpu(data); 70011552bb0SRichard Henderson } 70171943a1eSRichard Henderson #else 70271943a1eSRichard Henderson if (ptw->out_be) { 70371943a1eSRichard Henderson data = ldq_be_p(host); 70471943a1eSRichard Henderson } else { 70571943a1eSRichard Henderson data = ldq_le_p(host); 70671943a1eSRichard Henderson } 70771943a1eSRichard Henderson #endif 708f3639a64SRichard Henderson } else { 709f3639a64SRichard Henderson /* Page tables are in MMIO. */ 71090c66293SRichard Henderson MemTxAttrs attrs = { 71190c66293SRichard Henderson .space = ptw->out_space, 712b02f5e06SPeter Maydell .secure = arm_space_is_secure(ptw->out_space), 71390c66293SRichard Henderson }; 714f3639a64SRichard Henderson AddressSpace *as = arm_addressspace(cs, attrs); 715f3639a64SRichard Henderson MemTxResult result = MEMTX_OK; 716f3639a64SRichard Henderson 717f3639a64SRichard Henderson if (ptw->out_be) { 718f3639a64SRichard Henderson data = address_space_ldq_be(as, ptw->out_phys, attrs, &result); 719f3639a64SRichard Henderson } else { 720f3639a64SRichard Henderson data = address_space_ldq_le(as, ptw->out_phys, attrs, &result); 72111552bb0SRichard Henderson } 722f3639a64SRichard Henderson if (unlikely(result != MEMTX_OK)) { 72311552bb0SRichard Henderson fi->type = ARMFault_SyncExternalOnWalk; 72411552bb0SRichard Henderson fi->ea = arm_extabort_type(result); 72511552bb0SRichard Henderson return 0; 72611552bb0SRichard Henderson } 727f3639a64SRichard Henderson } 728f3639a64SRichard Henderson return data; 729f3639a64SRichard Henderson } 73011552bb0SRichard Henderson 73171943a1eSRichard Henderson static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, 73271943a1eSRichard Henderson uint64_t new_val, S1Translate *ptw, 73371943a1eSRichard Henderson ARMMMUFaultInfo *fi) 73471943a1eSRichard Henderson { 735465af4dbSAlex Bennée #if defined(TARGET_AARCH64) && defined(CONFIG_TCG) 73671943a1eSRichard Henderson uint64_t cur_val; 73771943a1eSRichard Henderson void *host = ptw->out_host; 73871943a1eSRichard Henderson 73971943a1eSRichard Henderson if (unlikely(!host)) { 7407421ddc4SJonathan Cameron /* Page table in MMIO Memory Region */ 7417421ddc4SJonathan Cameron CPUState *cs = env_cpu(env); 7427421ddc4SJonathan Cameron MemTxAttrs attrs = { 7437421ddc4SJonathan Cameron .space = ptw->out_space, 7447421ddc4SJonathan Cameron .secure = arm_space_is_secure(ptw->out_space), 7457421ddc4SJonathan Cameron }; 7467421ddc4SJonathan Cameron AddressSpace *as = arm_addressspace(cs, attrs); 7477421ddc4SJonathan Cameron MemTxResult result = MEMTX_OK; 7487421ddc4SJonathan Cameron bool need_lock = !bql_locked(); 7497421ddc4SJonathan Cameron 7507421ddc4SJonathan Cameron if (need_lock) { 7517421ddc4SJonathan Cameron bql_lock(); 7527421ddc4SJonathan Cameron } 7537421ddc4SJonathan Cameron if (ptw->out_be) { 7547421ddc4SJonathan Cameron cur_val = address_space_ldq_be(as, ptw->out_phys, attrs, &result); 7557421ddc4SJonathan Cameron if (unlikely(result != MEMTX_OK)) { 7567421ddc4SJonathan Cameron fi->type = ARMFault_SyncExternalOnWalk; 7577421ddc4SJonathan Cameron fi->ea = arm_extabort_type(result); 7587421ddc4SJonathan Cameron if (need_lock) { 7597421ddc4SJonathan Cameron bql_unlock(); 7607421ddc4SJonathan Cameron } 7617421ddc4SJonathan Cameron return old_val; 7627421ddc4SJonathan Cameron } 7637421ddc4SJonathan Cameron if (cur_val == old_val) { 7647421ddc4SJonathan Cameron address_space_stq_be(as, ptw->out_phys, new_val, attrs, &result); 7657421ddc4SJonathan Cameron if (unlikely(result != MEMTX_OK)) { 7667421ddc4SJonathan Cameron fi->type = ARMFault_SyncExternalOnWalk; 7677421ddc4SJonathan Cameron fi->ea = arm_extabort_type(result); 7687421ddc4SJonathan Cameron if (need_lock) { 7697421ddc4SJonathan Cameron bql_unlock(); 7707421ddc4SJonathan Cameron } 7717421ddc4SJonathan Cameron return old_val; 7727421ddc4SJonathan Cameron } 7737421ddc4SJonathan Cameron cur_val = new_val; 7747421ddc4SJonathan Cameron } 7757421ddc4SJonathan Cameron } else { 7767421ddc4SJonathan Cameron cur_val = address_space_ldq_le(as, ptw->out_phys, attrs, &result); 7777421ddc4SJonathan Cameron if (unlikely(result != MEMTX_OK)) { 7787421ddc4SJonathan Cameron fi->type = ARMFault_SyncExternalOnWalk; 7797421ddc4SJonathan Cameron fi->ea = arm_extabort_type(result); 7807421ddc4SJonathan Cameron if (need_lock) { 7817421ddc4SJonathan Cameron bql_unlock(); 7827421ddc4SJonathan Cameron } 7837421ddc4SJonathan Cameron return old_val; 7847421ddc4SJonathan Cameron } 7857421ddc4SJonathan Cameron if (cur_val == old_val) { 7867421ddc4SJonathan Cameron address_space_stq_le(as, ptw->out_phys, new_val, attrs, &result); 7877421ddc4SJonathan Cameron if (unlikely(result != MEMTX_OK)) { 7887421ddc4SJonathan Cameron fi->type = ARMFault_SyncExternalOnWalk; 7897421ddc4SJonathan Cameron fi->ea = arm_extabort_type(result); 7907421ddc4SJonathan Cameron if (need_lock) { 7917421ddc4SJonathan Cameron bql_unlock(); 7927421ddc4SJonathan Cameron } 7937421ddc4SJonathan Cameron return old_val; 7947421ddc4SJonathan Cameron } 7957421ddc4SJonathan Cameron cur_val = new_val; 7967421ddc4SJonathan Cameron } 7977421ddc4SJonathan Cameron } 7987421ddc4SJonathan Cameron if (need_lock) { 7997421ddc4SJonathan Cameron bql_unlock(); 8007421ddc4SJonathan Cameron } 8017421ddc4SJonathan Cameron return cur_val; 80271943a1eSRichard Henderson } 80371943a1eSRichard Henderson 80471943a1eSRichard Henderson /* 80571943a1eSRichard Henderson * Raising a stage2 Protection fault for an atomic update to a read-only 80671943a1eSRichard Henderson * page is delayed until it is certain that there is a change to make. 80771943a1eSRichard Henderson */ 80871943a1eSRichard Henderson if (unlikely(!ptw->out_rw)) { 80971943a1eSRichard Henderson int flags; 81071943a1eSRichard Henderson 81171943a1eSRichard Henderson env->tlb_fi = fi; 8126d03226bSAlex Bennée flags = probe_access_full_mmu(env, ptw->out_virt, 0, 8136d03226bSAlex Bennée MMU_DATA_STORE, 81471943a1eSRichard Henderson arm_to_core_mmu_idx(ptw->in_ptw_idx), 8156d03226bSAlex Bennée NULL, NULL); 81671943a1eSRichard Henderson env->tlb_fi = NULL; 81771943a1eSRichard Henderson 81871943a1eSRichard Henderson if (unlikely(flags & TLB_INVALID_MASK)) { 819f6415660SPeter Maydell /* 820f6415660SPeter Maydell * We know this must be a stage 2 fault because the granule 821f6415660SPeter Maydell * protection table does not separately track read and write 822f6415660SPeter Maydell * permission, so all GPC faults are caught in S1_ptw_translate(): 823f6415660SPeter Maydell * we only get here for "readable but not writeable". 824f6415660SPeter Maydell */ 82571943a1eSRichard Henderson assert(fi->type != ARMFault_None); 82671943a1eSRichard Henderson fi->s2addr = ptw->out_virt; 82771943a1eSRichard Henderson fi->stage2 = true; 82871943a1eSRichard Henderson fi->s1ptw = true; 8294f51edd3SPeter Maydell fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx); 83071943a1eSRichard Henderson return 0; 83171943a1eSRichard Henderson } 83271943a1eSRichard Henderson 83371943a1eSRichard Henderson /* In case CAS mismatches and we loop, remember writability. */ 83471943a1eSRichard Henderson ptw->out_rw = true; 83571943a1eSRichard Henderson } 83671943a1eSRichard Henderson 83771943a1eSRichard Henderson #ifdef CONFIG_ATOMIC64 83871943a1eSRichard Henderson if (ptw->out_be) { 83971943a1eSRichard Henderson old_val = cpu_to_be64(old_val); 84071943a1eSRichard Henderson new_val = cpu_to_be64(new_val); 84171943a1eSRichard Henderson cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); 84271943a1eSRichard Henderson cur_val = be64_to_cpu(cur_val); 84371943a1eSRichard Henderson } else { 84471943a1eSRichard Henderson old_val = cpu_to_le64(old_val); 84571943a1eSRichard Henderson new_val = cpu_to_le64(new_val); 84671943a1eSRichard Henderson cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); 84771943a1eSRichard Henderson cur_val = le64_to_cpu(cur_val); 84871943a1eSRichard Henderson } 84971943a1eSRichard Henderson #else 85071943a1eSRichard Henderson /* 85171943a1eSRichard Henderson * We can't support the full 64-bit atomic cmpxchg on the host. 85271943a1eSRichard Henderson * Because this is only used for FEAT_HAFDBS, which is only for AA64, 85371943a1eSRichard Henderson * we know that TCG_OVERSIZED_GUEST is set, which means that we are 85471943a1eSRichard Henderson * running in round-robin mode and could only race with dma i/o. 85571943a1eSRichard Henderson */ 856d3ae5f5dSRichard Henderson #if !TCG_OVERSIZED_GUEST 85771943a1eSRichard Henderson # error "Unexpected configuration" 85871943a1eSRichard Henderson #endif 859195801d7SStefan Hajnoczi bool locked = bql_locked(); 86071943a1eSRichard Henderson if (!locked) { 861195801d7SStefan Hajnoczi bql_lock(); 86271943a1eSRichard Henderson } 86371943a1eSRichard Henderson if (ptw->out_be) { 86471943a1eSRichard Henderson cur_val = ldq_be_p(host); 86571943a1eSRichard Henderson if (cur_val == old_val) { 86671943a1eSRichard Henderson stq_be_p(host, new_val); 86771943a1eSRichard Henderson } 86871943a1eSRichard Henderson } else { 86971943a1eSRichard Henderson cur_val = ldq_le_p(host); 87071943a1eSRichard Henderson if (cur_val == old_val) { 87171943a1eSRichard Henderson stq_le_p(host, new_val); 87271943a1eSRichard Henderson } 87371943a1eSRichard Henderson } 87471943a1eSRichard Henderson if (!locked) { 875195801d7SStefan Hajnoczi bql_unlock(); 87671943a1eSRichard Henderson } 87771943a1eSRichard Henderson #endif 87871943a1eSRichard Henderson 87971943a1eSRichard Henderson return cur_val; 880d3ae5f5dSRichard Henderson #else 881465af4dbSAlex Bennée /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */ 882d3ae5f5dSRichard Henderson g_assert_not_reached(); 883d3ae5f5dSRichard Henderson #endif 88471943a1eSRichard Henderson } 88571943a1eSRichard Henderson 8864c74ab15SRichard Henderson static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 8874c74ab15SRichard Henderson uint32_t *table, uint32_t address) 8884c74ab15SRichard Henderson { 8894c74ab15SRichard Henderson /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 890c1547bbaSPeter Maydell uint64_t tcr = regime_tcr(env, mmu_idx); 8919e70e26cSPeter Maydell int maskshift = extract32(tcr, 0, 3); 8929e70e26cSPeter Maydell uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift); 8939e70e26cSPeter Maydell uint32_t base_mask; 8944c74ab15SRichard Henderson 8959e70e26cSPeter Maydell if (address & mask) { 8969e70e26cSPeter Maydell if (tcr & TTBCR_PD1) { 8974c74ab15SRichard Henderson /* Translation table walk disabled for TTBR1 */ 8984c74ab15SRichard Henderson return false; 8994c74ab15SRichard Henderson } 9004c74ab15SRichard Henderson *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 9014c74ab15SRichard Henderson } else { 9029e70e26cSPeter Maydell if (tcr & TTBCR_PD0) { 9034c74ab15SRichard Henderson /* Translation table walk disabled for TTBR0 */ 9044c74ab15SRichard Henderson return false; 9054c74ab15SRichard Henderson } 9069e70e26cSPeter Maydell base_mask = ~((uint32_t)0x3fffu >> maskshift); 9079e70e26cSPeter Maydell *table = regime_ttbr(env, mmu_idx, 0) & base_mask; 9084c74ab15SRichard Henderson } 9094c74ab15SRichard Henderson *table |= (address >> 18) & 0x3ffc; 9104c74ab15SRichard Henderson return true; 9114c74ab15SRichard Henderson } 9124c74ab15SRichard Henderson 9134845d3beSRichard Henderson /* 9144845d3beSRichard Henderson * Translate section/page access permissions to page R/W protection flags 9154845d3beSRichard Henderson * @env: CPUARMState 9164845d3beSRichard Henderson * @mmu_idx: MMU index indicating required translation regime 9174845d3beSRichard Henderson * @ap: The 3-bit access permissions (AP[2:0]) 9184845d3beSRichard Henderson * @domain_prot: The 2-bit domain access permissions 9196f2d9d74STimofey Kutergin * @is_user: TRUE if accessing from PL0 9204845d3beSRichard Henderson */ 9216f2d9d74STimofey Kutergin static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx, 9226f2d9d74STimofey Kutergin int ap, int domain_prot, bool is_user) 9234845d3beSRichard Henderson { 9244845d3beSRichard Henderson if (domain_prot == 3) { 9254845d3beSRichard Henderson return PAGE_READ | PAGE_WRITE; 9264845d3beSRichard Henderson } 9274845d3beSRichard Henderson 9284845d3beSRichard Henderson switch (ap) { 9294845d3beSRichard Henderson case 0: 9304845d3beSRichard Henderson if (arm_feature(env, ARM_FEATURE_V7)) { 9314845d3beSRichard Henderson return 0; 9324845d3beSRichard Henderson } 9334845d3beSRichard Henderson switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 9344845d3beSRichard Henderson case SCTLR_S: 9354845d3beSRichard Henderson return is_user ? 0 : PAGE_READ; 9364845d3beSRichard Henderson case SCTLR_R: 9374845d3beSRichard Henderson return PAGE_READ; 9384845d3beSRichard Henderson default: 9394845d3beSRichard Henderson return 0; 9404845d3beSRichard Henderson } 9414845d3beSRichard Henderson case 1: 9424845d3beSRichard Henderson return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9434845d3beSRichard Henderson case 2: 9444845d3beSRichard Henderson if (is_user) { 9454845d3beSRichard Henderson return PAGE_READ; 9464845d3beSRichard Henderson } else { 9474845d3beSRichard Henderson return PAGE_READ | PAGE_WRITE; 9484845d3beSRichard Henderson } 9494845d3beSRichard Henderson case 3: 9504845d3beSRichard Henderson return PAGE_READ | PAGE_WRITE; 9514845d3beSRichard Henderson case 4: /* Reserved. */ 9524845d3beSRichard Henderson return 0; 9534845d3beSRichard Henderson case 5: 9544845d3beSRichard Henderson return is_user ? 0 : PAGE_READ; 9554845d3beSRichard Henderson case 6: 9564845d3beSRichard Henderson return PAGE_READ; 9574845d3beSRichard Henderson case 7: 9584845d3beSRichard Henderson if (!arm_feature(env, ARM_FEATURE_V6K)) { 9594845d3beSRichard Henderson return 0; 9604845d3beSRichard Henderson } 9614845d3beSRichard Henderson return PAGE_READ; 9624845d3beSRichard Henderson default: 9634845d3beSRichard Henderson g_assert_not_reached(); 9644845d3beSRichard Henderson } 9654845d3beSRichard Henderson } 9664845d3beSRichard Henderson 9674845d3beSRichard Henderson /* 9686f2d9d74STimofey Kutergin * Translate section/page access permissions to page R/W protection flags 9696f2d9d74STimofey Kutergin * @env: CPUARMState 9706f2d9d74STimofey Kutergin * @mmu_idx: MMU index indicating required translation regime 9716f2d9d74STimofey Kutergin * @ap: The 3-bit access permissions (AP[2:0]) 9726f2d9d74STimofey Kutergin * @domain_prot: The 2-bit domain access permissions 9736f2d9d74STimofey Kutergin */ 9746f2d9d74STimofey Kutergin static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 9756f2d9d74STimofey Kutergin int ap, int domain_prot) 9766f2d9d74STimofey Kutergin { 9776f2d9d74STimofey Kutergin return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 9786f2d9d74STimofey Kutergin regime_is_user(env, mmu_idx)); 9796f2d9d74STimofey Kutergin } 9806f2d9d74STimofey Kutergin 9816f2d9d74STimofey Kutergin /* 9824845d3beSRichard Henderson * Translate section/page access permissions to page R/W protection flags. 9834845d3beSRichard Henderson * @ap: The 2-bit simple AP (AP[2:1]) 9844845d3beSRichard Henderson * @is_user: TRUE if accessing from PL0 9854845d3beSRichard Henderson */ 9864845d3beSRichard Henderson static int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 9874845d3beSRichard Henderson { 9884845d3beSRichard Henderson switch (ap) { 9894845d3beSRichard Henderson case 0: 9904845d3beSRichard Henderson return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9914845d3beSRichard Henderson case 1: 9924845d3beSRichard Henderson return PAGE_READ | PAGE_WRITE; 9934845d3beSRichard Henderson case 2: 9944845d3beSRichard Henderson return is_user ? 0 : PAGE_READ; 9954845d3beSRichard Henderson case 3: 9964845d3beSRichard Henderson return PAGE_READ; 9974845d3beSRichard Henderson default: 9984845d3beSRichard Henderson g_assert_not_reached(); 9994845d3beSRichard Henderson } 10004845d3beSRichard Henderson } 10014845d3beSRichard Henderson 10024845d3beSRichard Henderson static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 10034845d3beSRichard Henderson { 10044845d3beSRichard Henderson return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 10054845d3beSRichard Henderson } 10064845d3beSRichard Henderson 10076d2654ffSRichard Henderson static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw, 10086d2654ffSRichard Henderson uint32_t address, MMUAccessType access_type, 10096d2654ffSRichard Henderson GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1010f2d2f5ceSRichard Henderson { 1011f2d2f5ceSRichard Henderson int level = 1; 1012f2d2f5ceSRichard Henderson uint32_t table; 1013f2d2f5ceSRichard Henderson uint32_t desc; 1014f2d2f5ceSRichard Henderson int type; 1015f2d2f5ceSRichard Henderson int ap; 1016f2d2f5ceSRichard Henderson int domain = 0; 1017f2d2f5ceSRichard Henderson int domain_prot; 1018f2d2f5ceSRichard Henderson hwaddr phys_addr; 1019f2d2f5ceSRichard Henderson uint32_t dacr; 1020f2d2f5ceSRichard Henderson 1021f2d2f5ceSRichard Henderson /* Pagetable walk. */ 1022f2d2f5ceSRichard Henderson /* Lookup l1 descriptor. */ 10236d2654ffSRichard Henderson if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) { 1024f2d2f5ceSRichard Henderson /* Section translation fault if page walk is disabled by PD0 or PD1 */ 1025f2d2f5ceSRichard Henderson fi->type = ARMFault_Translation; 1026f2d2f5ceSRichard Henderson goto do_fault; 1027f2d2f5ceSRichard Henderson } 102893e5b3a6SRichard Henderson if (!S1_ptw_translate(env, ptw, table, fi)) { 102993e5b3a6SRichard Henderson goto do_fault; 103093e5b3a6SRichard Henderson } 103193e5b3a6SRichard Henderson desc = arm_ldl_ptw(env, ptw, fi); 1032f2d2f5ceSRichard Henderson if (fi->type != ARMFault_None) { 1033f2d2f5ceSRichard Henderson goto do_fault; 1034f2d2f5ceSRichard Henderson } 1035f2d2f5ceSRichard Henderson type = (desc & 3); 1036f2d2f5ceSRichard Henderson domain = (desc >> 5) & 0x0f; 10376d2654ffSRichard Henderson if (regime_el(env, ptw->in_mmu_idx) == 1) { 1038f2d2f5ceSRichard Henderson dacr = env->cp15.dacr_ns; 1039f2d2f5ceSRichard Henderson } else { 1040f2d2f5ceSRichard Henderson dacr = env->cp15.dacr_s; 1041f2d2f5ceSRichard Henderson } 1042f2d2f5ceSRichard Henderson domain_prot = (dacr >> (domain * 2)) & 3; 1043f2d2f5ceSRichard Henderson if (type == 0) { 1044f2d2f5ceSRichard Henderson /* Section translation fault. */ 1045f2d2f5ceSRichard Henderson fi->type = ARMFault_Translation; 1046f2d2f5ceSRichard Henderson goto do_fault; 1047f2d2f5ceSRichard Henderson } 1048f2d2f5ceSRichard Henderson if (type != 2) { 1049f2d2f5ceSRichard Henderson level = 2; 1050f2d2f5ceSRichard Henderson } 1051f2d2f5ceSRichard Henderson if (domain_prot == 0 || domain_prot == 2) { 1052f2d2f5ceSRichard Henderson fi->type = ARMFault_Domain; 1053f2d2f5ceSRichard Henderson goto do_fault; 1054f2d2f5ceSRichard Henderson } 1055f2d2f5ceSRichard Henderson if (type == 2) { 1056f2d2f5ceSRichard Henderson /* 1Mb section. */ 1057f2d2f5ceSRichard Henderson phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 1058f2d2f5ceSRichard Henderson ap = (desc >> 10) & 3; 10597fa7ea8fSRichard Henderson result->f.lg_page_size = 20; /* 1MB */ 1060f2d2f5ceSRichard Henderson } else { 1061f2d2f5ceSRichard Henderson /* Lookup l2 entry. */ 1062f2d2f5ceSRichard Henderson if (type == 1) { 1063f2d2f5ceSRichard Henderson /* Coarse pagetable. */ 1064f2d2f5ceSRichard Henderson table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 1065f2d2f5ceSRichard Henderson } else { 1066f2d2f5ceSRichard Henderson /* Fine pagetable. */ 1067f2d2f5ceSRichard Henderson table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 1068f2d2f5ceSRichard Henderson } 106993e5b3a6SRichard Henderson if (!S1_ptw_translate(env, ptw, table, fi)) { 107093e5b3a6SRichard Henderson goto do_fault; 107193e5b3a6SRichard Henderson } 107293e5b3a6SRichard Henderson desc = arm_ldl_ptw(env, ptw, fi); 1073f2d2f5ceSRichard Henderson if (fi->type != ARMFault_None) { 1074f2d2f5ceSRichard Henderson goto do_fault; 1075f2d2f5ceSRichard Henderson } 1076f2d2f5ceSRichard Henderson switch (desc & 3) { 1077f2d2f5ceSRichard Henderson case 0: /* Page translation fault. */ 1078f2d2f5ceSRichard Henderson fi->type = ARMFault_Translation; 1079f2d2f5ceSRichard Henderson goto do_fault; 1080f2d2f5ceSRichard Henderson case 1: /* 64k page. */ 1081f2d2f5ceSRichard Henderson phys_addr = (desc & 0xffff0000) | (address & 0xffff); 1082f2d2f5ceSRichard Henderson ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 10837fa7ea8fSRichard Henderson result->f.lg_page_size = 16; 1084f2d2f5ceSRichard Henderson break; 1085f2d2f5ceSRichard Henderson case 2: /* 4k page. */ 1086f2d2f5ceSRichard Henderson phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1087f2d2f5ceSRichard Henderson ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 10887fa7ea8fSRichard Henderson result->f.lg_page_size = 12; 1089f2d2f5ceSRichard Henderson break; 1090f2d2f5ceSRichard Henderson case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 1091f2d2f5ceSRichard Henderson if (type == 1) { 1092f2d2f5ceSRichard Henderson /* ARMv6/XScale extended small page format */ 1093f2d2f5ceSRichard Henderson if (arm_feature(env, ARM_FEATURE_XSCALE) 1094f2d2f5ceSRichard Henderson || arm_feature(env, ARM_FEATURE_V6)) { 1095f2d2f5ceSRichard Henderson phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10967fa7ea8fSRichard Henderson result->f.lg_page_size = 12; 1097f2d2f5ceSRichard Henderson } else { 1098f2d2f5ceSRichard Henderson /* 1099f2d2f5ceSRichard Henderson * UNPREDICTABLE in ARMv5; we choose to take a 1100f2d2f5ceSRichard Henderson * page translation fault. 1101f2d2f5ceSRichard Henderson */ 1102f2d2f5ceSRichard Henderson fi->type = ARMFault_Translation; 1103f2d2f5ceSRichard Henderson goto do_fault; 1104f2d2f5ceSRichard Henderson } 1105f2d2f5ceSRichard Henderson } else { 1106f2d2f5ceSRichard Henderson phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 11077fa7ea8fSRichard Henderson result->f.lg_page_size = 10; 1108f2d2f5ceSRichard Henderson } 1109f2d2f5ceSRichard Henderson ap = (desc >> 4) & 3; 1110f2d2f5ceSRichard Henderson break; 1111f2d2f5ceSRichard Henderson default: 1112f2d2f5ceSRichard Henderson /* Never happens, but compiler isn't smart enough to tell. */ 1113f2d2f5ceSRichard Henderson g_assert_not_reached(); 1114f2d2f5ceSRichard Henderson } 1115f2d2f5ceSRichard Henderson } 11166d2654ffSRichard Henderson result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot); 11177fa7ea8fSRichard Henderson result->f.prot |= result->f.prot ? PAGE_EXEC : 0; 11187fa7ea8fSRichard Henderson if (!(result->f.prot & (1 << access_type))) { 1119f2d2f5ceSRichard Henderson /* Access permission fault. */ 1120f2d2f5ceSRichard Henderson fi->type = ARMFault_Permission; 1121f2d2f5ceSRichard Henderson goto do_fault; 1122f2d2f5ceSRichard Henderson } 11237fa7ea8fSRichard Henderson result->f.phys_addr = phys_addr; 1124f2d2f5ceSRichard Henderson return false; 1125f2d2f5ceSRichard Henderson do_fault: 1126f2d2f5ceSRichard Henderson fi->domain = domain; 1127f2d2f5ceSRichard Henderson fi->level = level; 1128f2d2f5ceSRichard Henderson return true; 1129f2d2f5ceSRichard Henderson } 1130f2d2f5ceSRichard Henderson 11316d2654ffSRichard Henderson static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, 11326d2654ffSRichard Henderson uint32_t address, MMUAccessType access_type, 11336d2654ffSRichard Henderson GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 113453c038efSRichard Henderson { 113553c038efSRichard Henderson ARMCPU *cpu = env_archcpu(env); 11366d2654ffSRichard Henderson ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 113753c038efSRichard Henderson int level = 1; 113853c038efSRichard Henderson uint32_t table; 113953c038efSRichard Henderson uint32_t desc; 114053c038efSRichard Henderson uint32_t xn; 114153c038efSRichard Henderson uint32_t pxn = 0; 114253c038efSRichard Henderson int type; 114353c038efSRichard Henderson int ap; 114453c038efSRichard Henderson int domain = 0; 114553c038efSRichard Henderson int domain_prot; 114653c038efSRichard Henderson hwaddr phys_addr; 114753c038efSRichard Henderson uint32_t dacr; 114853c038efSRichard Henderson bool ns; 11496f2d9d74STimofey Kutergin int user_prot; 115053c038efSRichard Henderson 115153c038efSRichard Henderson /* Pagetable walk. */ 115253c038efSRichard Henderson /* Lookup l1 descriptor. */ 115353c038efSRichard Henderson if (!get_level1_table_address(env, mmu_idx, &table, address)) { 115453c038efSRichard Henderson /* Section translation fault if page walk is disabled by PD0 or PD1 */ 115553c038efSRichard Henderson fi->type = ARMFault_Translation; 115653c038efSRichard Henderson goto do_fault; 115753c038efSRichard Henderson } 115893e5b3a6SRichard Henderson if (!S1_ptw_translate(env, ptw, table, fi)) { 115993e5b3a6SRichard Henderson goto do_fault; 116093e5b3a6SRichard Henderson } 116193e5b3a6SRichard Henderson desc = arm_ldl_ptw(env, ptw, fi); 116253c038efSRichard Henderson if (fi->type != ARMFault_None) { 116353c038efSRichard Henderson goto do_fault; 116453c038efSRichard Henderson } 116553c038efSRichard Henderson type = (desc & 3); 116653c038efSRichard Henderson if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) { 116753c038efSRichard Henderson /* Section translation fault, or attempt to use the encoding 116853c038efSRichard Henderson * which is Reserved on implementations without PXN. 116953c038efSRichard Henderson */ 117053c038efSRichard Henderson fi->type = ARMFault_Translation; 117153c038efSRichard Henderson goto do_fault; 117253c038efSRichard Henderson } 117353c038efSRichard Henderson if ((type == 1) || !(desc & (1 << 18))) { 117453c038efSRichard Henderson /* Page or Section. */ 117553c038efSRichard Henderson domain = (desc >> 5) & 0x0f; 117653c038efSRichard Henderson } 117753c038efSRichard Henderson if (regime_el(env, mmu_idx) == 1) { 117853c038efSRichard Henderson dacr = env->cp15.dacr_ns; 117953c038efSRichard Henderson } else { 118053c038efSRichard Henderson dacr = env->cp15.dacr_s; 118153c038efSRichard Henderson } 118253c038efSRichard Henderson if (type == 1) { 118353c038efSRichard Henderson level = 2; 118453c038efSRichard Henderson } 118553c038efSRichard Henderson domain_prot = (dacr >> (domain * 2)) & 3; 118653c038efSRichard Henderson if (domain_prot == 0 || domain_prot == 2) { 118753c038efSRichard Henderson /* Section or Page domain fault */ 118853c038efSRichard Henderson fi->type = ARMFault_Domain; 118953c038efSRichard Henderson goto do_fault; 119053c038efSRichard Henderson } 119153c038efSRichard Henderson if (type != 1) { 119253c038efSRichard Henderson if (desc & (1 << 18)) { 119353c038efSRichard Henderson /* Supersection. */ 119453c038efSRichard Henderson phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 119553c038efSRichard Henderson phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 119653c038efSRichard Henderson phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 11977fa7ea8fSRichard Henderson result->f.lg_page_size = 24; /* 16MB */ 119853c038efSRichard Henderson } else { 119953c038efSRichard Henderson /* Section. */ 120053c038efSRichard Henderson phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 12017fa7ea8fSRichard Henderson result->f.lg_page_size = 20; /* 1MB */ 120253c038efSRichard Henderson } 120353c038efSRichard Henderson ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 120453c038efSRichard Henderson xn = desc & (1 << 4); 120553c038efSRichard Henderson pxn = desc & 1; 120653c038efSRichard Henderson ns = extract32(desc, 19, 1); 120753c038efSRichard Henderson } else { 120853c038efSRichard Henderson if (cpu_isar_feature(aa32_pxn, cpu)) { 120953c038efSRichard Henderson pxn = (desc >> 2) & 1; 121053c038efSRichard Henderson } 121153c038efSRichard Henderson ns = extract32(desc, 3, 1); 121253c038efSRichard Henderson /* Lookup l2 entry. */ 121353c038efSRichard Henderson table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 121493e5b3a6SRichard Henderson if (!S1_ptw_translate(env, ptw, table, fi)) { 121593e5b3a6SRichard Henderson goto do_fault; 121693e5b3a6SRichard Henderson } 121793e5b3a6SRichard Henderson desc = arm_ldl_ptw(env, ptw, fi); 121853c038efSRichard Henderson if (fi->type != ARMFault_None) { 121953c038efSRichard Henderson goto do_fault; 122053c038efSRichard Henderson } 122153c038efSRichard Henderson ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 122253c038efSRichard Henderson switch (desc & 3) { 122353c038efSRichard Henderson case 0: /* Page translation fault. */ 122453c038efSRichard Henderson fi->type = ARMFault_Translation; 122553c038efSRichard Henderson goto do_fault; 122653c038efSRichard Henderson case 1: /* 64k page. */ 122753c038efSRichard Henderson phys_addr = (desc & 0xffff0000) | (address & 0xffff); 122853c038efSRichard Henderson xn = desc & (1 << 15); 12297fa7ea8fSRichard Henderson result->f.lg_page_size = 16; 123053c038efSRichard Henderson break; 123153c038efSRichard Henderson case 2: case 3: /* 4k page. */ 123253c038efSRichard Henderson phys_addr = (desc & 0xfffff000) | (address & 0xfff); 123353c038efSRichard Henderson xn = desc & 1; 12347fa7ea8fSRichard Henderson result->f.lg_page_size = 12; 123553c038efSRichard Henderson break; 123653c038efSRichard Henderson default: 123753c038efSRichard Henderson /* Never happens, but compiler isn't smart enough to tell. */ 123853c038efSRichard Henderson g_assert_not_reached(); 123953c038efSRichard Henderson } 124053c038efSRichard Henderson } 124153c038efSRichard Henderson if (domain_prot == 3) { 12427fa7ea8fSRichard Henderson result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 124353c038efSRichard Henderson } else { 124453c038efSRichard Henderson if (pxn && !regime_is_user(env, mmu_idx)) { 124553c038efSRichard Henderson xn = 1; 124653c038efSRichard Henderson } 124753c038efSRichard Henderson if (xn && access_type == MMU_INST_FETCH) { 124853c038efSRichard Henderson fi->type = ARMFault_Permission; 124953c038efSRichard Henderson goto do_fault; 125053c038efSRichard Henderson } 125153c038efSRichard Henderson 125253c038efSRichard Henderson if (arm_feature(env, ARM_FEATURE_V6K) && 125353c038efSRichard Henderson (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 125453c038efSRichard Henderson /* The simplified model uses AP[0] as an access control bit. */ 125553c038efSRichard Henderson if ((ap & 1) == 0) { 125653c038efSRichard Henderson /* Access flag fault. */ 125753c038efSRichard Henderson fi->type = ARMFault_AccessFlag; 125853c038efSRichard Henderson goto do_fault; 125953c038efSRichard Henderson } 12607fa7ea8fSRichard Henderson result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 12616f2d9d74STimofey Kutergin user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1); 126253c038efSRichard Henderson } else { 12637fa7ea8fSRichard Henderson result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 12646f2d9d74STimofey Kutergin user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1); 126553c038efSRichard Henderson } 12667fa7ea8fSRichard Henderson if (result->f.prot && !xn) { 12677fa7ea8fSRichard Henderson result->f.prot |= PAGE_EXEC; 126853c038efSRichard Henderson } 12697fa7ea8fSRichard Henderson if (!(result->f.prot & (1 << access_type))) { 127053c038efSRichard Henderson /* Access permission fault. */ 127153c038efSRichard Henderson fi->type = ARMFault_Permission; 127253c038efSRichard Henderson goto do_fault; 127353c038efSRichard Henderson } 12746f2d9d74STimofey Kutergin if (regime_is_pan(env, mmu_idx) && 12756f2d9d74STimofey Kutergin !regime_is_user(env, mmu_idx) && 12766f2d9d74STimofey Kutergin user_prot && 12776f2d9d74STimofey Kutergin access_type != MMU_INST_FETCH) { 12786f2d9d74STimofey Kutergin /* Privileged Access Never fault */ 12796f2d9d74STimofey Kutergin fi->type = ARMFault_Permission; 12806f2d9d74STimofey Kutergin goto do_fault; 12816f2d9d74STimofey Kutergin } 128253c038efSRichard Henderson } 128353c038efSRichard Henderson if (ns) { 128453c038efSRichard Henderson /* The NS bit will (as required by the architecture) have no effect if 128553c038efSRichard Henderson * the CPU doesn't support TZ or this is a non-secure translation 128653c038efSRichard Henderson * regime, because the attribute will already be non-secure. 128753c038efSRichard Henderson */ 12887fa7ea8fSRichard Henderson result->f.attrs.secure = false; 128990c66293SRichard Henderson result->f.attrs.space = ARMSS_NonSecure; 129053c038efSRichard Henderson } 12917fa7ea8fSRichard Henderson result->f.phys_addr = phys_addr; 129253c038efSRichard Henderson return false; 129353c038efSRichard Henderson do_fault: 129453c038efSRichard Henderson fi->domain = domain; 129553c038efSRichard Henderson fi->level = level; 129653c038efSRichard Henderson return true; 129753c038efSRichard Henderson } 129853c038efSRichard Henderson 1299f8526edcSRichard Henderson /* 1300f8526edcSRichard Henderson * Translate S2 section/page access permissions to protection flags 1301f8526edcSRichard Henderson * @env: CPUARMState 1302f8526edcSRichard Henderson * @s2ap: The 2-bit stage2 access permissions (S2AP) 1303f8526edcSRichard Henderson * @xn: XN (execute-never) bits 1304f8526edcSRichard Henderson * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 1305f8526edcSRichard Henderson */ 13064a7d7702SRichard Henderson static int get_S2prot_noexecute(int s2ap) 1307f8526edcSRichard Henderson { 1308f8526edcSRichard Henderson int prot = 0; 1309f8526edcSRichard Henderson 1310f8526edcSRichard Henderson if (s2ap & 1) { 1311f8526edcSRichard Henderson prot |= PAGE_READ; 1312f8526edcSRichard Henderson } 1313f8526edcSRichard Henderson if (s2ap & 2) { 1314f8526edcSRichard Henderson prot |= PAGE_WRITE; 1315f8526edcSRichard Henderson } 13164a7d7702SRichard Henderson return prot; 13174a7d7702SRichard Henderson } 13184a7d7702SRichard Henderson 13194a7d7702SRichard Henderson static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) 13204a7d7702SRichard Henderson { 13214a7d7702SRichard Henderson int prot = get_S2prot_noexecute(s2ap); 1322f8526edcSRichard Henderson 1323f8526edcSRichard Henderson if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { 1324f8526edcSRichard Henderson switch (xn) { 1325f8526edcSRichard Henderson case 0: 1326f8526edcSRichard Henderson prot |= PAGE_EXEC; 1327f8526edcSRichard Henderson break; 1328f8526edcSRichard Henderson case 1: 1329f8526edcSRichard Henderson if (s1_is_el0) { 1330f8526edcSRichard Henderson prot |= PAGE_EXEC; 1331f8526edcSRichard Henderson } 1332f8526edcSRichard Henderson break; 1333f8526edcSRichard Henderson case 2: 1334f8526edcSRichard Henderson break; 1335f8526edcSRichard Henderson case 3: 1336f8526edcSRichard Henderson if (!s1_is_el0) { 1337f8526edcSRichard Henderson prot |= PAGE_EXEC; 1338f8526edcSRichard Henderson } 1339f8526edcSRichard Henderson break; 1340f8526edcSRichard Henderson default: 1341f8526edcSRichard Henderson g_assert_not_reached(); 1342f8526edcSRichard Henderson } 1343f8526edcSRichard Henderson } else { 1344f8526edcSRichard Henderson if (!extract32(xn, 1, 1)) { 1345f8526edcSRichard Henderson if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 1346f8526edcSRichard Henderson prot |= PAGE_EXEC; 1347f8526edcSRichard Henderson } 1348f8526edcSRichard Henderson } 1349f8526edcSRichard Henderson } 1350f8526edcSRichard Henderson return prot; 1351f8526edcSRichard Henderson } 1352f8526edcSRichard Henderson 1353f8526edcSRichard Henderson /* 1354f8526edcSRichard Henderson * Translate section/page access permissions to protection flags 1355f8526edcSRichard Henderson * @env: CPUARMState 1356f8526edcSRichard Henderson * @mmu_idx: MMU index indicating required translation regime 1357f8526edcSRichard Henderson * @is_aa64: TRUE if AArch64 1358f8526edcSRichard Henderson * @ap: The 2-bit simple AP (AP[2:1]) 1359f8526edcSRichard Henderson * @xn: XN (execute-never) bit 1360f8526edcSRichard Henderson * @pxn: PXN (privileged execute-never) bit 13612f1ff4e7SRichard Henderson * @in_pa: The original input pa space 13622f1ff4e7SRichard Henderson * @out_pa: The output pa space, modified by NSTable, NS, and NSE 1363f8526edcSRichard Henderson */ 1364f8526edcSRichard Henderson static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 13652f1ff4e7SRichard Henderson int ap, int xn, int pxn, 13662f1ff4e7SRichard Henderson ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) 1367f8526edcSRichard Henderson { 1368dd17143fSPeter Maydell ARMCPU *cpu = env_archcpu(env); 1369f8526edcSRichard Henderson bool is_user = regime_is_user(env, mmu_idx); 1370f8526edcSRichard Henderson int prot_rw, user_rw; 1371f8526edcSRichard Henderson bool have_wxn; 1372f8526edcSRichard Henderson int wxn = 0; 1373f8526edcSRichard Henderson 1374edc05dd4SRichard Henderson assert(!regime_is_stage2(mmu_idx)); 1375f8526edcSRichard Henderson 1376f8526edcSRichard Henderson user_rw = simple_ap_to_rw_prot_is_user(ap, true); 1377f8526edcSRichard Henderson if (is_user) { 1378f8526edcSRichard Henderson prot_rw = user_rw; 1379f8526edcSRichard Henderson } else { 1380dd17143fSPeter Maydell /* 1381dd17143fSPeter Maydell * PAN controls can forbid data accesses but don't affect insn fetch. 1382dd17143fSPeter Maydell * Plain PAN forbids data accesses if EL0 has data permissions; 1383dd17143fSPeter Maydell * PAN3 forbids data accesses if EL0 has either data or exec perms. 1384dd17143fSPeter Maydell * Note that for AArch64 the 'user can exec' case is exactly !xn. 1385dd17143fSPeter Maydell * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0 1386dd17143fSPeter Maydell * do not affect EPAN. 1387dd17143fSPeter Maydell */ 1388f8526edcSRichard Henderson if (user_rw && regime_is_pan(env, mmu_idx)) { 1389dd17143fSPeter Maydell prot_rw = 0; 1390dd17143fSPeter Maydell } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 && 1391dd17143fSPeter Maydell regime_is_pan(env, mmu_idx) && 1392dd17143fSPeter Maydell (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) { 1393f8526edcSRichard Henderson prot_rw = 0; 1394f8526edcSRichard Henderson } else { 1395f8526edcSRichard Henderson prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 1396f8526edcSRichard Henderson } 1397f8526edcSRichard Henderson } 1398f8526edcSRichard Henderson 13994a7d7702SRichard Henderson if (in_pa != out_pa) { 14004a7d7702SRichard Henderson switch (in_pa) { 14014a7d7702SRichard Henderson case ARMSS_Root: 14024a7d7702SRichard Henderson /* 14034a7d7702SRichard Henderson * R_ZWRVD: permission fault for insn fetched from non-Root, 14044a7d7702SRichard Henderson * I_WWBFB: SIF has no effect in EL3. 14054a7d7702SRichard Henderson */ 1406f8526edcSRichard Henderson return prot_rw; 14074a7d7702SRichard Henderson case ARMSS_Realm: 14084a7d7702SRichard Henderson /* 14094a7d7702SRichard Henderson * R_PKTDS: permission fault for insn fetched from non-Realm, 14104a7d7702SRichard Henderson * for Realm EL2 or EL2&0. The corresponding fault for EL1&0 14114a7d7702SRichard Henderson * happens during any stage2 translation. 14124a7d7702SRichard Henderson */ 14134a7d7702SRichard Henderson switch (mmu_idx) { 14144a7d7702SRichard Henderson case ARMMMUIdx_E2: 14154a7d7702SRichard Henderson case ARMMMUIdx_E20_0: 14164a7d7702SRichard Henderson case ARMMMUIdx_E20_2: 14174a7d7702SRichard Henderson case ARMMMUIdx_E20_2_PAN: 14184a7d7702SRichard Henderson return prot_rw; 14194a7d7702SRichard Henderson default: 14204a7d7702SRichard Henderson break; 14214a7d7702SRichard Henderson } 14224a7d7702SRichard Henderson break; 14234a7d7702SRichard Henderson case ARMSS_Secure: 14244a7d7702SRichard Henderson if (env->cp15.scr_el3 & SCR_SIF) { 14254a7d7702SRichard Henderson return prot_rw; 14264a7d7702SRichard Henderson } 14274a7d7702SRichard Henderson break; 14284a7d7702SRichard Henderson default: 14294a7d7702SRichard Henderson /* Input NonSecure must have output NonSecure. */ 14304a7d7702SRichard Henderson g_assert_not_reached(); 14314a7d7702SRichard Henderson } 1432f8526edcSRichard Henderson } 1433f8526edcSRichard Henderson 1434f8526edcSRichard Henderson /* TODO have_wxn should be replaced with 1435f8526edcSRichard Henderson * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 1436f8526edcSRichard Henderson * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 1437f8526edcSRichard Henderson * compatible processors have EL2, which is required for [U]WXN. 1438f8526edcSRichard Henderson */ 1439f8526edcSRichard Henderson have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 1440f8526edcSRichard Henderson 1441f8526edcSRichard Henderson if (have_wxn) { 1442f8526edcSRichard Henderson wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 1443f8526edcSRichard Henderson } 1444f8526edcSRichard Henderson 1445f8526edcSRichard Henderson if (is_aa64) { 1446f8526edcSRichard Henderson if (regime_has_2_ranges(mmu_idx) && !is_user) { 1447f8526edcSRichard Henderson xn = pxn || (user_rw & PAGE_WRITE); 1448f8526edcSRichard Henderson } 1449f8526edcSRichard Henderson } else if (arm_feature(env, ARM_FEATURE_V7)) { 1450f8526edcSRichard Henderson switch (regime_el(env, mmu_idx)) { 1451f8526edcSRichard Henderson case 1: 1452f8526edcSRichard Henderson case 3: 1453f8526edcSRichard Henderson if (is_user) { 1454f8526edcSRichard Henderson xn = xn || !(user_rw & PAGE_READ); 1455f8526edcSRichard Henderson } else { 1456f8526edcSRichard Henderson int uwxn = 0; 1457f8526edcSRichard Henderson if (have_wxn) { 1458f8526edcSRichard Henderson uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 1459f8526edcSRichard Henderson } 1460f8526edcSRichard Henderson xn = xn || !(prot_rw & PAGE_READ) || pxn || 1461f8526edcSRichard Henderson (uwxn && (user_rw & PAGE_WRITE)); 1462f8526edcSRichard Henderson } 1463f8526edcSRichard Henderson break; 1464f8526edcSRichard Henderson case 2: 1465f8526edcSRichard Henderson break; 1466f8526edcSRichard Henderson } 1467f8526edcSRichard Henderson } else { 1468f8526edcSRichard Henderson xn = wxn = 0; 1469f8526edcSRichard Henderson } 1470f8526edcSRichard Henderson 1471f8526edcSRichard Henderson if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 1472f8526edcSRichard Henderson return prot_rw; 1473f8526edcSRichard Henderson } 1474f8526edcSRichard Henderson return prot_rw | PAGE_EXEC; 1475f8526edcSRichard Henderson } 1476f8526edcSRichard Henderson 14772f0ec92eSRichard Henderson static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 14782f0ec92eSRichard Henderson ARMMMUIdx mmu_idx) 14792f0ec92eSRichard Henderson { 1480c1547bbaSPeter Maydell uint64_t tcr = regime_tcr(env, mmu_idx); 14812f0ec92eSRichard Henderson uint32_t el = regime_el(env, mmu_idx); 14822f0ec92eSRichard Henderson int select, tsz; 14832f0ec92eSRichard Henderson bool epd, hpd; 14842f0ec92eSRichard Henderson 14852f0ec92eSRichard Henderson assert(mmu_idx != ARMMMUIdx_Stage2_S); 14862f0ec92eSRichard Henderson 14872f0ec92eSRichard Henderson if (mmu_idx == ARMMMUIdx_Stage2) { 14882f0ec92eSRichard Henderson /* VTCR */ 14892f0ec92eSRichard Henderson bool sext = extract32(tcr, 4, 1); 14902f0ec92eSRichard Henderson bool sign = extract32(tcr, 3, 1); 14912f0ec92eSRichard Henderson 14922f0ec92eSRichard Henderson /* 14932f0ec92eSRichard Henderson * If the sign-extend bit is not the same as t0sz[3], the result 14942f0ec92eSRichard Henderson * is unpredictable. Flag this as a guest error. 14952f0ec92eSRichard Henderson */ 14962f0ec92eSRichard Henderson if (sign != sext) { 14972f0ec92eSRichard Henderson qemu_log_mask(LOG_GUEST_ERROR, 14982f0ec92eSRichard Henderson "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 14992f0ec92eSRichard Henderson } 15002f0ec92eSRichard Henderson tsz = sextract32(tcr, 0, 4) + 8; 15012f0ec92eSRichard Henderson select = 0; 15022f0ec92eSRichard Henderson hpd = false; 15032f0ec92eSRichard Henderson epd = false; 15042f0ec92eSRichard Henderson } else if (el == 2) { 15052f0ec92eSRichard Henderson /* HTCR */ 15062f0ec92eSRichard Henderson tsz = extract32(tcr, 0, 3); 15072f0ec92eSRichard Henderson select = 0; 15082f0ec92eSRichard Henderson hpd = extract64(tcr, 24, 1); 15092f0ec92eSRichard Henderson epd = false; 15102f0ec92eSRichard Henderson } else { 15112f0ec92eSRichard Henderson int t0sz = extract32(tcr, 0, 3); 15122f0ec92eSRichard Henderson int t1sz = extract32(tcr, 16, 3); 15132f0ec92eSRichard Henderson 15142f0ec92eSRichard Henderson if (t1sz == 0) { 15152f0ec92eSRichard Henderson select = va > (0xffffffffu >> t0sz); 15162f0ec92eSRichard Henderson } else { 15172f0ec92eSRichard Henderson /* Note that we will detect errors later. */ 15182f0ec92eSRichard Henderson select = va >= ~(0xffffffffu >> t1sz); 15192f0ec92eSRichard Henderson } 15202f0ec92eSRichard Henderson if (!select) { 15212f0ec92eSRichard Henderson tsz = t0sz; 15222f0ec92eSRichard Henderson epd = extract32(tcr, 7, 1); 15232f0ec92eSRichard Henderson hpd = extract64(tcr, 41, 1); 15242f0ec92eSRichard Henderson } else { 15252f0ec92eSRichard Henderson tsz = t1sz; 15262f0ec92eSRichard Henderson epd = extract32(tcr, 23, 1); 15272f0ec92eSRichard Henderson hpd = extract64(tcr, 42, 1); 15282f0ec92eSRichard Henderson } 15292f0ec92eSRichard Henderson /* For aarch32, hpd0 is not enabled without t2e as well. */ 15302f0ec92eSRichard Henderson hpd &= extract32(tcr, 6, 1); 15312f0ec92eSRichard Henderson } 15322f0ec92eSRichard Henderson 15332f0ec92eSRichard Henderson return (ARMVAParameters) { 15342f0ec92eSRichard Henderson .tsz = tsz, 15352f0ec92eSRichard Henderson .select = select, 15362f0ec92eSRichard Henderson .epd = epd, 15372f0ec92eSRichard Henderson .hpd = hpd, 15382f0ec92eSRichard Henderson }; 15392f0ec92eSRichard Henderson } 15402f0ec92eSRichard Henderson 1541c5168785SRichard Henderson /* 1542c5168785SRichard Henderson * check_s2_mmu_setup 1543c5168785SRichard Henderson * @cpu: ARMCPU 1544c5168785SRichard Henderson * @is_aa64: True if the translation regime is in AArch64 state 15450ffe5b7bSRichard Henderson * @tcr: VTCR_EL2 or VSTCR_EL2 15460ffe5b7bSRichard Henderson * @ds: Effective value of TCR.DS. 15470ffe5b7bSRichard Henderson * @iasize: Bitsize of IPAs 1548c5168785SRichard Henderson * @stride: Page-table stride (See the ARM ARM) 1549c5168785SRichard Henderson * 15500ffe5b7bSRichard Henderson * Decode the starting level of the S2 lookup, returning INT_MIN if 15510ffe5b7bSRichard Henderson * the configuration is invalid. 1552c5168785SRichard Henderson */ 15530ffe5b7bSRichard Henderson static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr, 15540ffe5b7bSRichard Henderson bool ds, int iasize, int stride) 1555c5168785SRichard Henderson { 15560ffe5b7bSRichard Henderson int sl0, sl2, startlevel, granulebits, levels; 15570ffe5b7bSRichard Henderson int s1_min_iasize, s1_max_iasize; 15580ffe5b7bSRichard Henderson 15590ffe5b7bSRichard Henderson sl0 = extract32(tcr, 6, 2); 15600ffe5b7bSRichard Henderson if (is_aa64) { 15610ffe5b7bSRichard Henderson /* 15620ffe5b7bSRichard Henderson * AArch64.S2InvalidSL: Interpretation of SL depends on the page size, 15630ffe5b7bSRichard Henderson * so interleave AArch64.S2StartLevel. 1564c5168785SRichard Henderson */ 1565c5168785SRichard Henderson switch (stride) { 15660ffe5b7bSRichard Henderson case 9: /* 4KB */ 15670ffe5b7bSRichard Henderson /* SL2 is RES0 unless DS=1 & 4KB granule. */ 15680ffe5b7bSRichard Henderson sl2 = extract64(tcr, 33, 1); 15690ffe5b7bSRichard Henderson if (ds && sl2) { 15700ffe5b7bSRichard Henderson if (sl0 != 0) { 15710ffe5b7bSRichard Henderson goto fail; 15720ffe5b7bSRichard Henderson } 15730ffe5b7bSRichard Henderson startlevel = -1; 15740ffe5b7bSRichard Henderson } else { 15750ffe5b7bSRichard Henderson startlevel = 2 - sl0; 15760ffe5b7bSRichard Henderson switch (sl0) { 15770ffe5b7bSRichard Henderson case 2: 15780ffe5b7bSRichard Henderson if (arm_pamax(cpu) < 44) { 15790ffe5b7bSRichard Henderson goto fail; 1580c5168785SRichard Henderson } 1581c5168785SRichard Henderson break; 15820ffe5b7bSRichard Henderson case 3: 15830ffe5b7bSRichard Henderson if (!cpu_isar_feature(aa64_st, cpu)) { 15840ffe5b7bSRichard Henderson goto fail; 15850ffe5b7bSRichard Henderson } 15860ffe5b7bSRichard Henderson startlevel = 3; 15870ffe5b7bSRichard Henderson break; 15880ffe5b7bSRichard Henderson } 1589c5168785SRichard Henderson } 1590c5168785SRichard Henderson break; 15910ffe5b7bSRichard Henderson case 11: /* 16KB */ 15920ffe5b7bSRichard Henderson switch (sl0) { 15930ffe5b7bSRichard Henderson case 2: 15940ffe5b7bSRichard Henderson if (arm_pamax(cpu) < 42) { 15950ffe5b7bSRichard Henderson goto fail; 1596c5168785SRichard Henderson } 1597c5168785SRichard Henderson break; 15980ffe5b7bSRichard Henderson case 3: 15990ffe5b7bSRichard Henderson if (!ds) { 16000ffe5b7bSRichard Henderson goto fail; 16010ffe5b7bSRichard Henderson } 16020ffe5b7bSRichard Henderson break; 16030ffe5b7bSRichard Henderson } 16040ffe5b7bSRichard Henderson startlevel = 3 - sl0; 16050ffe5b7bSRichard Henderson break; 16060ffe5b7bSRichard Henderson case 13: /* 64KB */ 16070ffe5b7bSRichard Henderson switch (sl0) { 16080ffe5b7bSRichard Henderson case 2: 16090ffe5b7bSRichard Henderson if (arm_pamax(cpu) < 44) { 16100ffe5b7bSRichard Henderson goto fail; 16110ffe5b7bSRichard Henderson } 16120ffe5b7bSRichard Henderson break; 16130ffe5b7bSRichard Henderson case 3: 16140ffe5b7bSRichard Henderson goto fail; 16150ffe5b7bSRichard Henderson } 16160ffe5b7bSRichard Henderson startlevel = 3 - sl0; 16170ffe5b7bSRichard Henderson break; 1618c5168785SRichard Henderson default: 1619c5168785SRichard Henderson g_assert_not_reached(); 1620c5168785SRichard Henderson } 1621c5168785SRichard Henderson } else { 16220ffe5b7bSRichard Henderson /* 16230ffe5b7bSRichard Henderson * Things are simpler for AArch32 EL2, with only 4k pages. 16240ffe5b7bSRichard Henderson * There is no separate S2InvalidSL function, but AArch32.S2Walk 16250ffe5b7bSRichard Henderson * begins with walkparms.sl0 in {'1x'}. 16260ffe5b7bSRichard Henderson */ 1627c5168785SRichard Henderson assert(stride == 9); 16280ffe5b7bSRichard Henderson if (sl0 >= 2) { 16290ffe5b7bSRichard Henderson goto fail; 16300ffe5b7bSRichard Henderson } 16310ffe5b7bSRichard Henderson startlevel = 2 - sl0; 16320ffe5b7bSRichard Henderson } 1633c5168785SRichard Henderson 16340ffe5b7bSRichard Henderson /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */ 16350ffe5b7bSRichard Henderson levels = 3 - startlevel; 16360ffe5b7bSRichard Henderson granulebits = stride + 3; 16370ffe5b7bSRichard Henderson 16380ffe5b7bSRichard Henderson s1_min_iasize = levels * stride + granulebits + 1; 16390ffe5b7bSRichard Henderson s1_max_iasize = s1_min_iasize + (stride - 1) + 4; 16400ffe5b7bSRichard Henderson 16410ffe5b7bSRichard Henderson if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) { 16420ffe5b7bSRichard Henderson return startlevel; 1643c5168785SRichard Henderson } 16440ffe5b7bSRichard Henderson 16450ffe5b7bSRichard Henderson fail: 16460ffe5b7bSRichard Henderson return INT_MIN; 1647c5168785SRichard Henderson } 1648c5168785SRichard Henderson 1649d53e2507SPeter Maydell static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds, 1650d53e2507SPeter Maydell ARMGranuleSize gran, int level) 1651d53e2507SPeter Maydell { 1652d53e2507SPeter Maydell /* 1653d53e2507SPeter Maydell * See pseudocode AArch46.BlockDescSupported(): block descriptors 1654d53e2507SPeter Maydell * are not valid at all levels, depending on the page size. 1655d53e2507SPeter Maydell */ 1656d53e2507SPeter Maydell switch (gran) { 1657d53e2507SPeter Maydell case Gran4K: 1658d53e2507SPeter Maydell return (level == 0 && ds) || level == 1 || level == 2; 1659d53e2507SPeter Maydell case Gran16K: 1660d53e2507SPeter Maydell return (level == 1 && ds) || level == 2; 1661d53e2507SPeter Maydell case Gran64K: 1662d53e2507SPeter Maydell return (level == 1 && arm_pamax(cpu) == 52) || level == 2; 1663d53e2507SPeter Maydell default: 1664d53e2507SPeter Maydell g_assert_not_reached(); 1665d53e2507SPeter Maydell } 1666d53e2507SPeter Maydell } 1667d53e2507SPeter Maydell 1668dea9104aSPeter Maydell static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw) 1669dea9104aSPeter Maydell { 1670dea9104aSPeter Maydell uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); 1671dea9104aSPeter Maydell return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1); 1672dea9104aSPeter Maydell } 1673dea9104aSPeter Maydell 16743283222aSRichard Henderson /** 16753283222aSRichard Henderson * get_phys_addr_lpae: perform one stage of page table walk, LPAE format 16763283222aSRichard Henderson * 16773283222aSRichard Henderson * Returns false if the translation was successful. Otherwise, phys_ptr, 16783283222aSRichard Henderson * attrs, prot and page_size may not be filled in, and the populated fsr 16793283222aSRichard Henderson * value provides information on why the translation aborted, in the format 16803283222aSRichard Henderson * of a long-format DFSR/IFSR fault register, with the following caveat: 16813283222aSRichard Henderson * the WnR bit is never set (the caller must do this). 16823283222aSRichard Henderson * 16833283222aSRichard Henderson * @env: CPUARMState 16846d2654ffSRichard Henderson * @ptw: Current and next stage parameters for the walk. 16853283222aSRichard Henderson * @address: virtual address to get physical address for 16863283222aSRichard Henderson * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH 1687c053f40bSRichard Henderson * @memop: memory operation feeding this access, or 0 for none 168803ee9bbeSRichard Henderson * @result: set on translation success, 16893283222aSRichard Henderson * @fi: set to fault info if the translation fails 16903283222aSRichard Henderson */ 16916d2654ffSRichard Henderson static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, 16926d2654ffSRichard Henderson uint64_t address, 1693c053f40bSRichard Henderson MMUAccessType access_type, MemOp memop, 1694c23f08a5SRichard Henderson GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 16953283222aSRichard Henderson { 16963283222aSRichard Henderson ARMCPU *cpu = env_archcpu(env); 16976d2654ffSRichard Henderson ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 169815f8f467SArd Biesheuvel int32_t level; 16993283222aSRichard Henderson ARMVAParameters param; 17003283222aSRichard Henderson uint64_t ttbr; 17013283222aSRichard Henderson hwaddr descaddr, indexmask, indexmask_grainsize; 17023283222aSRichard Henderson uint32_t tableattrs; 17033283222aSRichard Henderson target_ulong page_size; 170445666091SRichard Henderson uint64_t attrs; 17053283222aSRichard Henderson int32_t stride; 17063283222aSRichard Henderson int addrsize, inputsize, outputsize; 1707c1547bbaSPeter Maydell uint64_t tcr = regime_tcr(env, mmu_idx); 17082f1ff4e7SRichard Henderson int ap, xn, pxn; 17093283222aSRichard Henderson uint32_t el = regime_el(env, mmu_idx); 17103283222aSRichard Henderson uint64_t descaddrmask; 17113283222aSRichard Henderson bool aarch64 = arm_el_is_aa64(env, el); 171271943a1eSRichard Henderson uint64_t descriptor, new_descriptor; 17132f1ff4e7SRichard Henderson ARMSecuritySpace out_space; 1714728b923fSRichard Henderson bool device; 17153283222aSRichard Henderson 17163283222aSRichard Henderson /* TODO: This code does not support shareability levels. */ 17173283222aSRichard Henderson if (aarch64) { 17183283222aSRichard Henderson int ps; 17193283222aSRichard Henderson 17203283222aSRichard Henderson param = aa64_va_parameters(env, address, mmu_idx, 1721478dccbbSPeter Maydell access_type != MMU_INST_FETCH, 1722478dccbbSPeter Maydell !arm_el_is_aa64(env, 1)); 17233283222aSRichard Henderson level = 0; 17243283222aSRichard Henderson 17253283222aSRichard Henderson /* 17263283222aSRichard Henderson * If TxSZ is programmed to a value larger than the maximum, 17273283222aSRichard Henderson * or smaller than the effective minimum, it is IMPLEMENTATION 17283283222aSRichard Henderson * DEFINED whether we behave as if the field were programmed 17293283222aSRichard Henderson * within bounds, or if a level 0 Translation fault is generated. 17303283222aSRichard Henderson * 17313283222aSRichard Henderson * With FEAT_LVA, fault on less than minimum becomes required, 17323283222aSRichard Henderson * so our choice is to always raise the fault. 17333283222aSRichard Henderson */ 17343283222aSRichard Henderson if (param.tsz_oob) { 173527c1b81dSRichard Henderson goto do_translation_fault; 17363283222aSRichard Henderson } 17373283222aSRichard Henderson 17383283222aSRichard Henderson addrsize = 64 - 8 * param.tbi; 17393283222aSRichard Henderson inputsize = 64 - param.tsz; 17403283222aSRichard Henderson 17413283222aSRichard Henderson /* 17423283222aSRichard Henderson * Bound PS by PARANGE to find the effective output address size. 17433283222aSRichard Henderson * ID_AA64MMFR0 is a read-only register so values outside of the 17443283222aSRichard Henderson * supported mappings can be considered an implementation error. 17453283222aSRichard Henderson */ 17463283222aSRichard Henderson ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); 17473283222aSRichard Henderson ps = MIN(ps, param.ps); 17483283222aSRichard Henderson assert(ps < ARRAY_SIZE(pamax_map)); 17493283222aSRichard Henderson outputsize = pamax_map[ps]; 1750312b71abSArd Biesheuvel 1751312b71abSArd Biesheuvel /* 1752312b71abSArd Biesheuvel * With LPA2, the effective output address (OA) size is at most 48 bits 1753312b71abSArd Biesheuvel * unless TCR.DS == 1 1754312b71abSArd Biesheuvel */ 1755312b71abSArd Biesheuvel if (!param.ds && param.gran != Gran64K) { 1756312b71abSArd Biesheuvel outputsize = MIN(outputsize, 48); 1757312b71abSArd Biesheuvel } 17583283222aSRichard Henderson } else { 17593283222aSRichard Henderson param = aa32_va_parameters(env, address, mmu_idx); 17603283222aSRichard Henderson level = 1; 17613283222aSRichard Henderson addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 17623283222aSRichard Henderson inputsize = addrsize - param.tsz; 17633283222aSRichard Henderson outputsize = 40; 17643283222aSRichard Henderson } 17653283222aSRichard Henderson 17663283222aSRichard Henderson /* 17673283222aSRichard Henderson * We determined the region when collecting the parameters, but we 17683283222aSRichard Henderson * have not yet validated that the address is valid for the region. 17693283222aSRichard Henderson * Extract the top bits and verify that they all match select. 17703283222aSRichard Henderson * 17713283222aSRichard Henderson * For aa32, if inputsize == addrsize, then we have selected the 17723283222aSRichard Henderson * region by exclusion in aa32_va_parameters and there is no more 17733283222aSRichard Henderson * validation to do here. 17743283222aSRichard Henderson */ 17753283222aSRichard Henderson if (inputsize < addrsize) { 17763283222aSRichard Henderson target_ulong top_bits = sextract64(address, inputsize, 17773283222aSRichard Henderson addrsize - inputsize); 17783283222aSRichard Henderson if (-top_bits != param.select) { 17793283222aSRichard Henderson /* The gap between the two regions is a Translation fault */ 178027c1b81dSRichard Henderson goto do_translation_fault; 17813283222aSRichard Henderson } 17823283222aSRichard Henderson } 17833283222aSRichard Henderson 17843c003f70SPeter Maydell stride = arm_granule_bits(param.gran) - 3; 17853283222aSRichard Henderson 17863283222aSRichard Henderson /* 17873283222aSRichard Henderson * Note that QEMU ignores shareability and cacheability attributes, 17883283222aSRichard Henderson * so we don't need to do anything with the SH, ORGN, IRGN fields 17893283222aSRichard Henderson * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 17903283222aSRichard Henderson * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 17913283222aSRichard Henderson * implement any ASID-like capability so we can ignore it (instead 17923283222aSRichard Henderson * we will always flush the TLB any time the ASID is changed). 17933283222aSRichard Henderson */ 17943283222aSRichard Henderson ttbr = regime_ttbr(env, mmu_idx, param.select); 17953283222aSRichard Henderson 17963283222aSRichard Henderson /* 17973283222aSRichard Henderson * Here we should have set up all the parameters for the translation: 17983283222aSRichard Henderson * inputsize, ttbr, epd, stride, tbi 17993283222aSRichard Henderson */ 18003283222aSRichard Henderson 18013283222aSRichard Henderson if (param.epd) { 18023283222aSRichard Henderson /* 18033283222aSRichard Henderson * Translation table walk disabled => Translation fault on TLB miss 18043283222aSRichard Henderson * Note: This is always 0 on 64-bit EL2 and EL3. 18053283222aSRichard Henderson */ 180627c1b81dSRichard Henderson goto do_translation_fault; 18073283222aSRichard Henderson } 18083283222aSRichard Henderson 1809edc05dd4SRichard Henderson if (!regime_is_stage2(mmu_idx)) { 18103283222aSRichard Henderson /* 18113283222aSRichard Henderson * The starting level depends on the virtual address size (which can 18123283222aSRichard Henderson * be up to 48 bits) and the translation granule size. It indicates 18133283222aSRichard Henderson * the number of strides (stride bits at a time) needed to 18143283222aSRichard Henderson * consume the bits of the input address. In the pseudocode this is: 18153283222aSRichard Henderson * level = 4 - RoundUp((inputsize - grainsize) / stride) 18163283222aSRichard Henderson * where their 'inputsize' is our 'inputsize', 'grainsize' is 18173283222aSRichard Henderson * our 'stride + 3' and 'stride' is our 'stride'. 18183283222aSRichard Henderson * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 18193283222aSRichard Henderson * = 4 - (inputsize - stride - 3 + stride - 1) / stride 18203283222aSRichard Henderson * = 4 - (inputsize - 4) / stride; 18213283222aSRichard Henderson */ 18223283222aSRichard Henderson level = 4 - (inputsize - 4) / stride; 18233283222aSRichard Henderson } else { 18240ffe5b7bSRichard Henderson int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds, 18250ffe5b7bSRichard Henderson inputsize, stride); 18260ffe5b7bSRichard Henderson if (startlevel == INT_MIN) { 18273283222aSRichard Henderson level = 0; 182827c1b81dSRichard Henderson goto do_translation_fault; 18293283222aSRichard Henderson } 18303283222aSRichard Henderson level = startlevel; 18313283222aSRichard Henderson } 18323283222aSRichard Henderson 18333283222aSRichard Henderson indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3); 18343283222aSRichard Henderson indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level))); 18353283222aSRichard Henderson 18363283222aSRichard Henderson /* Now we can extract the actual base address from the TTBR */ 18373283222aSRichard Henderson descaddr = extract64(ttbr, 0, 48); 18383283222aSRichard Henderson 18393283222aSRichard Henderson /* 18403283222aSRichard Henderson * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR. 18413283222aSRichard Henderson * 18423283222aSRichard Henderson * Otherwise, if the base address is out of range, raise AddressSizeFault. 18433283222aSRichard Henderson * In the pseudocode, this is !IsZero(baseregister<47:outputsize>), 18443283222aSRichard Henderson * but we've just cleared the bits above 47, so simplify the test. 18453283222aSRichard Henderson */ 18463283222aSRichard Henderson if (outputsize > 48) { 18473283222aSRichard Henderson descaddr |= extract64(ttbr, 2, 4) << 48; 18483283222aSRichard Henderson } else if (descaddr >> outputsize) { 18493283222aSRichard Henderson level = 0; 185027c1b81dSRichard Henderson fi->type = ARMFault_AddressSize; 18513283222aSRichard Henderson goto do_fault; 18523283222aSRichard Henderson } 18533283222aSRichard Henderson 18543283222aSRichard Henderson /* 18553283222aSRichard Henderson * We rely on this masking to clear the RES0 bits at the bottom of the TTBR 18563283222aSRichard Henderson * and also to mask out CnP (bit 0) which could validly be non-zero. 18573283222aSRichard Henderson */ 18583283222aSRichard Henderson descaddr &= ~indexmask; 18593283222aSRichard Henderson 18603283222aSRichard Henderson /* 18613283222aSRichard Henderson * For AArch32, the address field in the descriptor goes up to bit 39 18623283222aSRichard Henderson * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0 18633283222aSRichard Henderson * or an AddressSize fault is raised. So for v8 we extract those SBZ 18643283222aSRichard Henderson * bits as part of the address, which will be checked via outputsize. 18653283222aSRichard Henderson * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2; 18663283222aSRichard Henderson * the highest bits of a 52-bit output are placed elsewhere. 18673283222aSRichard Henderson */ 18683283222aSRichard Henderson if (param.ds) { 18693283222aSRichard Henderson descaddrmask = MAKE_64BIT_MASK(0, 50); 18703283222aSRichard Henderson } else if (arm_feature(env, ARM_FEATURE_V8)) { 18713283222aSRichard Henderson descaddrmask = MAKE_64BIT_MASK(0, 48); 18723283222aSRichard Henderson } else { 18733283222aSRichard Henderson descaddrmask = MAKE_64BIT_MASK(0, 40); 18743283222aSRichard Henderson } 18753283222aSRichard Henderson descaddrmask &= ~indexmask_grainsize; 187626d19945SRichard Henderson tableattrs = 0; 18773283222aSRichard Henderson 1878fe4ddc15SRichard Henderson next_level: 18793283222aSRichard Henderson descaddr |= (address >> (stride * (4 - level))) & indexmask; 18803283222aSRichard Henderson descaddr &= ~7ULL; 188126d19945SRichard Henderson 188226d19945SRichard Henderson /* 188326d19945SRichard Henderson * Process the NSTable bit from the previous level. This changes 188426d19945SRichard Henderson * the table address space and the output space from Secure to 188526d19945SRichard Henderson * NonSecure. With RME, the EL3 translation regime does not change 188626d19945SRichard Henderson * from Root to NonSecure. 188726d19945SRichard Henderson */ 188826d19945SRichard Henderson if (ptw->in_space == ARMSS_Secure 188926d19945SRichard Henderson && !regime_is_stage2(mmu_idx) 189026d19945SRichard Henderson && extract32(tableattrs, 4, 1)) { 189148da29e4SRichard Henderson /* 189248da29e4SRichard Henderson * Stage2_S -> Stage2 or Phys_S -> Phys_NS 1893d38fa967SRichard Henderson * Assert the relative order of the secure/non-secure indexes. 189448da29e4SRichard Henderson */ 1895d38fa967SRichard Henderson QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS); 1896d38fa967SRichard Henderson QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2); 1897d38fa967SRichard Henderson ptw->in_ptw_idx += 1; 189826d19945SRichard Henderson ptw->in_space = ARMSS_NonSecure; 189948da29e4SRichard Henderson } 190026d19945SRichard Henderson 190193e5b3a6SRichard Henderson if (!S1_ptw_translate(env, ptw, descaddr, fi)) { 190293e5b3a6SRichard Henderson goto do_fault; 190393e5b3a6SRichard Henderson } 190493e5b3a6SRichard Henderson descriptor = arm_ldq_ptw(env, ptw, fi); 19053283222aSRichard Henderson if (fi->type != ARMFault_None) { 19063283222aSRichard Henderson goto do_fault; 19073283222aSRichard Henderson } 190871943a1eSRichard Henderson new_descriptor = descriptor; 19093283222aSRichard Henderson 191071943a1eSRichard Henderson restart_atomic_update: 1911d53e2507SPeter Maydell if (!(descriptor & 1) || 1912d53e2507SPeter Maydell (!(descriptor & 2) && 1913d53e2507SPeter Maydell !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) { 1914d53e2507SPeter Maydell /* Invalid, or a block descriptor at an invalid level */ 191527c1b81dSRichard Henderson goto do_translation_fault; 19163283222aSRichard Henderson } 19173283222aSRichard Henderson 19183283222aSRichard Henderson descaddr = descriptor & descaddrmask; 19193283222aSRichard Henderson 19203283222aSRichard Henderson /* 19213283222aSRichard Henderson * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12] 19223283222aSRichard Henderson * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of 19233283222aSRichard Henderson * descaddr are in [9:8]. Otherwise, if descaddr is out of range, 19243283222aSRichard Henderson * raise AddressSizeFault. 19253283222aSRichard Henderson */ 19263283222aSRichard Henderson if (outputsize > 48) { 19273283222aSRichard Henderson if (param.ds) { 19283283222aSRichard Henderson descaddr |= extract64(descriptor, 8, 2) << 50; 19293283222aSRichard Henderson } else { 19303283222aSRichard Henderson descaddr |= extract64(descriptor, 12, 4) << 48; 19313283222aSRichard Henderson } 19323283222aSRichard Henderson } else if (descaddr >> outputsize) { 193327c1b81dSRichard Henderson fi->type = ARMFault_AddressSize; 19343283222aSRichard Henderson goto do_fault; 19353283222aSRichard Henderson } 19363283222aSRichard Henderson 19373283222aSRichard Henderson if ((descriptor & 2) && (level < 3)) { 19383283222aSRichard Henderson /* 19393283222aSRichard Henderson * Table entry. The top five bits are attributes which may 19403283222aSRichard Henderson * propagate down through lower levels of the table (and 19413283222aSRichard Henderson * which are all arranged so that 0 means "no effect", so 19423283222aSRichard Henderson * we can gather them up by ORing in the bits at each level). 19433283222aSRichard Henderson */ 19443283222aSRichard Henderson tableattrs |= extract64(descriptor, 59, 5); 19453283222aSRichard Henderson level++; 19463283222aSRichard Henderson indexmask = indexmask_grainsize; 1947fe4ddc15SRichard Henderson goto next_level; 19483283222aSRichard Henderson } 1949fe4ddc15SRichard Henderson 19503283222aSRichard Henderson /* 19513283222aSRichard Henderson * Block entry at level 1 or 2, or page entry at level 3. 19523283222aSRichard Henderson * These are basically the same thing, although the number 19533283222aSRichard Henderson * of bits we pull in from the vaddr varies. Note that although 19543283222aSRichard Henderson * descaddrmask masks enough of the low bits of the descriptor 19553283222aSRichard Henderson * to give a correct page or table address, the address field 19563283222aSRichard Henderson * in a block descriptor is smaller; so we need to explicitly 19573283222aSRichard Henderson * clear the lower bits here before ORing in the low vaddr bits. 195871943a1eSRichard Henderson * 195971943a1eSRichard Henderson * Afterward, descaddr is the final physical address. 19603283222aSRichard Henderson */ 19613283222aSRichard Henderson page_size = (1ULL << ((stride * (4 - level)) + 3)); 1962c2360eaaSPeter Maydell descaddr &= ~(hwaddr)(page_size - 1); 19633283222aSRichard Henderson descaddr |= (address & (page_size - 1)); 19643283222aSRichard Henderson 196571943a1eSRichard Henderson if (likely(!ptw->in_debug)) { 196634a57faeSRichard Henderson /* 196771943a1eSRichard Henderson * Access flag. 196871943a1eSRichard Henderson * If HA is enabled, prepare to update the descriptor below. 196971943a1eSRichard Henderson * Otherwise, pass the access fault on to software. 197034a57faeSRichard Henderson */ 197171943a1eSRichard Henderson if (!(descriptor & (1 << 10))) { 197271943a1eSRichard Henderson if (param.ha) { 197371943a1eSRichard Henderson new_descriptor |= 1 << 10; /* AF */ 197471943a1eSRichard Henderson } else { 197571943a1eSRichard Henderson fi->type = ARMFault_AccessFlag; 197671943a1eSRichard Henderson goto do_fault; 197771943a1eSRichard Henderson } 197871943a1eSRichard Henderson } 197965c123fdSRichard Henderson 198065c123fdSRichard Henderson /* 198165c123fdSRichard Henderson * Dirty Bit. 198265c123fdSRichard Henderson * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP 198365c123fdSRichard Henderson * bit for writeback. The actual write protection test may still be 198465c123fdSRichard Henderson * overridden by tableattrs, to be merged below. 198565c123fdSRichard Henderson */ 198665c123fdSRichard Henderson if (param.hd 198765c123fdSRichard Henderson && extract64(descriptor, 51, 1) /* DBM */ 198865c123fdSRichard Henderson && access_type == MMU_DATA_STORE) { 198965c123fdSRichard Henderson if (regime_is_stage2(mmu_idx)) { 199065c123fdSRichard Henderson new_descriptor |= 1ull << 7; /* set S2AP[1] */ 199165c123fdSRichard Henderson } else { 199265c123fdSRichard Henderson new_descriptor &= ~(1ull << 7); /* clear AP[2] */ 199365c123fdSRichard Henderson } 199465c123fdSRichard Henderson } 199571943a1eSRichard Henderson } 199671943a1eSRichard Henderson 199771943a1eSRichard Henderson /* 199871943a1eSRichard Henderson * Extract attributes from the (modified) descriptor, and apply 199971943a1eSRichard Henderson * table descriptors. Stage 2 table descriptors do not include 200071943a1eSRichard Henderson * any attribute fields. HPD disables all the table attributes 2001b9c139dcSPeter Maydell * except NSTable (which we have already handled). 200271943a1eSRichard Henderson */ 200371943a1eSRichard Henderson attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14)); 200434a57faeSRichard Henderson if (!regime_is_stage2(mmu_idx)) { 200534a57faeSRichard Henderson if (!param.hpd) { 200645666091SRichard Henderson attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */ 20073283222aSRichard Henderson /* 20083283222aSRichard Henderson * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 20093283222aSRichard Henderson * means "force PL1 access only", which means forcing AP[1] to 0. 20103283222aSRichard Henderson */ 201145666091SRichard Henderson attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */ 201245666091SRichard Henderson attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */ 201334a57faeSRichard Henderson } 201434a57faeSRichard Henderson } 2015fe4ddc15SRichard Henderson 201645666091SRichard Henderson ap = extract32(attrs, 6, 2); 20172f1ff4e7SRichard Henderson out_space = ptw->in_space; 2018edc05dd4SRichard Henderson if (regime_is_stage2(mmu_idx)) { 20192f1ff4e7SRichard Henderson /* 20202f1ff4e7SRichard Henderson * R_GYNXY: For stage2 in Realm security state, bit 55 is NS. 20212f1ff4e7SRichard Henderson * The bit remains ignored for other security states. 20224a7d7702SRichard Henderson * R_YMCSL: Executing an insn fetched from non-Realm causes 20234a7d7702SRichard Henderson * a stage2 permission fault. 20242f1ff4e7SRichard Henderson */ 20252f1ff4e7SRichard Henderson if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) { 20262f1ff4e7SRichard Henderson out_space = ARMSS_NonSecure; 20274a7d7702SRichard Henderson result->f.prot = get_S2prot_noexecute(ap); 20284a7d7702SRichard Henderson } else { 202945666091SRichard Henderson xn = extract64(attrs, 53, 2); 20307c19b2d6SRichard Henderson result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); 20314a7d7702SRichard Henderson } 203264bda510SRichard Henderson 203364bda510SRichard Henderson result->cacheattrs.is_s2_format = true; 203464bda510SRichard Henderson result->cacheattrs.attrs = extract32(attrs, 2, 4); 203564bda510SRichard Henderson /* 203664bda510SRichard Henderson * Security state does not really affect HCR_EL2.FWB; 203764bda510SRichard Henderson * we only need to filter FWB for aa32 or other FEAT. 203864bda510SRichard Henderson */ 203964bda510SRichard Henderson device = S2_attrs_are_device(arm_hcr_el2_eff(env), 204064bda510SRichard Henderson result->cacheattrs.attrs); 20413283222aSRichard Henderson } else { 20422f1ff4e7SRichard Henderson int nse, ns = extract32(attrs, 5, 1); 204364bda510SRichard Henderson uint8_t attrindx; 204464bda510SRichard Henderson uint64_t mair; 204564bda510SRichard Henderson 20462f1ff4e7SRichard Henderson switch (out_space) { 20472f1ff4e7SRichard Henderson case ARMSS_Root: 20482f1ff4e7SRichard Henderson /* 20492f1ff4e7SRichard Henderson * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime. 20502f1ff4e7SRichard Henderson * R_XTYPW: NSE and NS together select the output pa space. 20512f1ff4e7SRichard Henderson */ 20522f1ff4e7SRichard Henderson nse = extract32(attrs, 11, 1); 20532f1ff4e7SRichard Henderson out_space = (nse << 1) | ns; 20542f1ff4e7SRichard Henderson if (out_space == ARMSS_Secure && 20552f1ff4e7SRichard Henderson !cpu_isar_feature(aa64_sel2, cpu)) { 20562f1ff4e7SRichard Henderson out_space = ARMSS_NonSecure; 20572f1ff4e7SRichard Henderson } 20582f1ff4e7SRichard Henderson break; 20592f1ff4e7SRichard Henderson case ARMSS_Secure: 20602f1ff4e7SRichard Henderson if (ns) { 20612f1ff4e7SRichard Henderson out_space = ARMSS_NonSecure; 20622f1ff4e7SRichard Henderson } 20632f1ff4e7SRichard Henderson break; 20642f1ff4e7SRichard Henderson case ARMSS_Realm: 20652f1ff4e7SRichard Henderson switch (mmu_idx) { 20662f1ff4e7SRichard Henderson case ARMMMUIdx_Stage1_E0: 20672f1ff4e7SRichard Henderson case ARMMMUIdx_Stage1_E1: 20682f1ff4e7SRichard Henderson case ARMMMUIdx_Stage1_E1_PAN: 20692f1ff4e7SRichard Henderson /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */ 20702f1ff4e7SRichard Henderson break; 20712f1ff4e7SRichard Henderson case ARMMMUIdx_E2: 20722f1ff4e7SRichard Henderson case ARMMMUIdx_E20_0: 20732f1ff4e7SRichard Henderson case ARMMMUIdx_E20_2: 20742f1ff4e7SRichard Henderson case ARMMMUIdx_E20_2_PAN: 20752f1ff4e7SRichard Henderson /* 20762f1ff4e7SRichard Henderson * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1, 20772f1ff4e7SRichard Henderson * NS changes the output to non-secure space. 20782f1ff4e7SRichard Henderson */ 20792f1ff4e7SRichard Henderson if (ns) { 20802f1ff4e7SRichard Henderson out_space = ARMSS_NonSecure; 20812f1ff4e7SRichard Henderson } 20822f1ff4e7SRichard Henderson break; 20832f1ff4e7SRichard Henderson default: 20842f1ff4e7SRichard Henderson g_assert_not_reached(); 20852f1ff4e7SRichard Henderson } 20862f1ff4e7SRichard Henderson break; 20872f1ff4e7SRichard Henderson case ARMSS_NonSecure: 20882f1ff4e7SRichard Henderson /* R_QRMFF: For NonSecure state, the NS bit is RES0. */ 20892f1ff4e7SRichard Henderson break; 20902f1ff4e7SRichard Henderson default: 20912f1ff4e7SRichard Henderson g_assert_not_reached(); 20922f1ff4e7SRichard Henderson } 209345666091SRichard Henderson xn = extract64(attrs, 54, 1); 209445666091SRichard Henderson pxn = extract64(attrs, 53, 1); 20952f1ff4e7SRichard Henderson 2096dea9104aSPeter Maydell if (el == 1 && nv_nv1_enabled(env, ptw)) { 2097dea9104aSPeter Maydell /* 2098dea9104aSPeter Maydell * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page 2099dea9104aSPeter Maydell * descriptor bit 54 holds PXN, 53 is RES0, and the effective value 2100dea9104aSPeter Maydell * of UXN is 0. Similarly for bits 59 and 60 in table descriptors 2101dea9104aSPeter Maydell * (which we have already folded into bits 53 and 54 of attrs). 2102dea9104aSPeter Maydell * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0. 2103dea9104aSPeter Maydell * Similarly, APTable[0] from the table descriptor is treated as 0; 2104dea9104aSPeter Maydell * we already folded this into AP[1] and squashing that to 0 does 2105dea9104aSPeter Maydell * the right thing. 2106dea9104aSPeter Maydell */ 2107dea9104aSPeter Maydell pxn = xn; 2108dea9104aSPeter Maydell xn = 0; 2109dea9104aSPeter Maydell ap &= ~1; 2110dea9104aSPeter Maydell } 21112f1ff4e7SRichard Henderson /* 21122f1ff4e7SRichard Henderson * Note that we modified ptw->in_space earlier for NSTable, but 21132f1ff4e7SRichard Henderson * result->f.attrs retains a copy of the original security space. 21142f1ff4e7SRichard Henderson */ 21152f1ff4e7SRichard Henderson result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn, 21162f1ff4e7SRichard Henderson result->f.attrs.space, out_space); 211764bda510SRichard Henderson 211864bda510SRichard Henderson /* Index into MAIR registers for cache attributes */ 211964bda510SRichard Henderson attrindx = extract32(attrs, 2, 3); 212064bda510SRichard Henderson mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 212164bda510SRichard Henderson assert(attrindx <= 7); 212264bda510SRichard Henderson result->cacheattrs.is_s2_format = false; 212364bda510SRichard Henderson result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); 212464bda510SRichard Henderson 212564bda510SRichard Henderson /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ 212664bda510SRichard Henderson if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { 212764bda510SRichard Henderson result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ 212864bda510SRichard Henderson } 212964bda510SRichard Henderson device = S1_attrs_are_device(result->cacheattrs.attrs); 21303283222aSRichard Henderson } 21313283222aSRichard Henderson 2132*e530581eSRichard Henderson /* 2133*e530581eSRichard Henderson * Enable alignment checks on Device memory. 2134*e530581eSRichard Henderson * 2135*e530581eSRichard Henderson * Per R_XCHFJ, the correct ordering for alignment, permission, 2136*e530581eSRichard Henderson * and stage 2 faults is: 2137*e530581eSRichard Henderson * - Alignment fault caused by the memory type 2138*e530581eSRichard Henderson * - Permission fault 2139*e530581eSRichard Henderson * - A stage 2 fault on the memory access 2140*e530581eSRichard Henderson * Perform the alignment check now, so that we recognize it in 2141*e530581eSRichard Henderson * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent 2142*e530581eSRichard Henderson * softmmu tlb hit will also check the alignment; clear along the 2143*e530581eSRichard Henderson * non-device path so that tlb_fill_flags is consistent in the 2144*e530581eSRichard Henderson * event of restart_atomic_update. 2145*e530581eSRichard Henderson * 2146*e530581eSRichard Henderson * In v7, for a CPU without the Virtualization Extensions this 2147*e530581eSRichard Henderson * access is UNPREDICTABLE; we choose to make it take the alignment 2148*e530581eSRichard Henderson * fault as is required for a v7VE CPU. (QEMU doesn't emulate any 2149*e530581eSRichard Henderson * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) 2150*e530581eSRichard Henderson */ 2151*e530581eSRichard Henderson if (device) { 2152*e530581eSRichard Henderson unsigned a_bits = memop_atomicity_bits(memop); 2153*e530581eSRichard Henderson if (address & ((1 << a_bits) - 1)) { 2154*e530581eSRichard Henderson fi->type = ARMFault_Alignment; 2155*e530581eSRichard Henderson goto do_fault; 2156*e530581eSRichard Henderson } 2157*e530581eSRichard Henderson result->f.tlb_fill_flags = TLB_CHECK_ALIGNED; 2158*e530581eSRichard Henderson } else { 2159*e530581eSRichard Henderson result->f.tlb_fill_flags = 0; 2160*e530581eSRichard Henderson } 2161*e530581eSRichard Henderson 21627fa7ea8fSRichard Henderson if (!(result->f.prot & (1 << access_type))) { 216327c1b81dSRichard Henderson fi->type = ARMFault_Permission; 21643283222aSRichard Henderson goto do_fault; 21653283222aSRichard Henderson } 21663283222aSRichard Henderson 216771943a1eSRichard Henderson /* If FEAT_HAFDBS has made changes, update the PTE. */ 216871943a1eSRichard Henderson if (new_descriptor != descriptor) { 216971943a1eSRichard Henderson new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi); 217071943a1eSRichard Henderson if (fi->type != ARMFault_None) { 217171943a1eSRichard Henderson goto do_fault; 217271943a1eSRichard Henderson } 217371943a1eSRichard Henderson /* 217471943a1eSRichard Henderson * I_YZSVV says that if the in-memory descriptor has changed, 217571943a1eSRichard Henderson * then we must use the information in that new value 217671943a1eSRichard Henderson * (which might include a different output address, different 217771943a1eSRichard Henderson * attributes, or generate a fault). 217871943a1eSRichard Henderson * Restart the handling of the descriptor value from scratch. 217971943a1eSRichard Henderson */ 218071943a1eSRichard Henderson if (new_descriptor != descriptor) { 218171943a1eSRichard Henderson descriptor = new_descriptor; 218271943a1eSRichard Henderson goto restart_atomic_update; 218371943a1eSRichard Henderson } 218471943a1eSRichard Henderson } 218571943a1eSRichard Henderson 21862f1ff4e7SRichard Henderson result->f.attrs.space = out_space; 21872f1ff4e7SRichard Henderson result->f.attrs.secure = arm_space_is_secure(out_space); 2188937f2245SRichard Henderson 2189728b923fSRichard Henderson /* 21903283222aSRichard Henderson * For FEAT_LPA2 and effective DS, the SH field in the attributes 21913283222aSRichard Henderson * was re-purposed for output address bits. The SH attribute in 21923283222aSRichard Henderson * that case comes from TCR_ELx, which we extracted earlier. 21933283222aSRichard Henderson */ 21943283222aSRichard Henderson if (param.ds) { 219503ee9bbeSRichard Henderson result->cacheattrs.shareability = param.sh; 21963283222aSRichard Henderson } else { 219745666091SRichard Henderson result->cacheattrs.shareability = extract32(attrs, 8, 2); 21983283222aSRichard Henderson } 21993283222aSRichard Henderson 22007fa7ea8fSRichard Henderson result->f.phys_addr = descaddr; 22017fa7ea8fSRichard Henderson result->f.lg_page_size = ctz64(page_size); 22023283222aSRichard Henderson return false; 22033283222aSRichard Henderson 220427c1b81dSRichard Henderson do_translation_fault: 220527c1b81dSRichard Henderson fi->type = ARMFault_Translation; 22063283222aSRichard Henderson do_fault: 2207a729d636SPeter Maydell if (fi->s1ptw) { 2208a729d636SPeter Maydell /* Retain the existing stage 2 fi->level */ 2209a729d636SPeter Maydell assert(fi->stage2); 2210a729d636SPeter Maydell } else { 22113283222aSRichard Henderson fi->level = level; 2212a729d636SPeter Maydell fi->stage2 = regime_is_stage2(mmu_idx); 2213a729d636SPeter Maydell } 22144f51edd3SPeter Maydell fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx); 22153283222aSRichard Henderson return true; 22163283222aSRichard Henderson } 22173283222aSRichard Henderson 2218a5637becSPeter Maydell static bool get_phys_addr_pmsav5(CPUARMState *env, 2219a5637becSPeter Maydell S1Translate *ptw, 2220a5637becSPeter Maydell uint32_t address, 2221a5637becSPeter Maydell MMUAccessType access_type, 2222a5637becSPeter Maydell GetPhysAddrResult *result, 22239a12fb36SRichard Henderson ARMMMUFaultInfo *fi) 22249a12fb36SRichard Henderson { 22259a12fb36SRichard Henderson int n; 22269a12fb36SRichard Henderson uint32_t mask; 22279a12fb36SRichard Henderson uint32_t base; 2228a5637becSPeter Maydell ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 22299a12fb36SRichard Henderson bool is_user = regime_is_user(env, mmu_idx); 22309a12fb36SRichard Henderson 2231d1289140SPeter Maydell if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { 22329a12fb36SRichard Henderson /* MPU disabled. */ 22337fa7ea8fSRichard Henderson result->f.phys_addr = address; 22347fa7ea8fSRichard Henderson result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 22359a12fb36SRichard Henderson return false; 22369a12fb36SRichard Henderson } 22379a12fb36SRichard Henderson 22387fa7ea8fSRichard Henderson result->f.phys_addr = address; 22399a12fb36SRichard Henderson for (n = 7; n >= 0; n--) { 22409a12fb36SRichard Henderson base = env->cp15.c6_region[n]; 22419a12fb36SRichard Henderson if ((base & 1) == 0) { 22429a12fb36SRichard Henderson continue; 22439a12fb36SRichard Henderson } 22449a12fb36SRichard Henderson mask = 1 << ((base >> 1) & 0x1f); 22459a12fb36SRichard Henderson /* Keep this shift separate from the above to avoid an 22469a12fb36SRichard Henderson (undefined) << 32. */ 22479a12fb36SRichard Henderson mask = (mask << 1) - 1; 22489a12fb36SRichard Henderson if (((base ^ address) & ~mask) == 0) { 22499a12fb36SRichard Henderson break; 22509a12fb36SRichard Henderson } 22519a12fb36SRichard Henderson } 22529a12fb36SRichard Henderson if (n < 0) { 22539a12fb36SRichard Henderson fi->type = ARMFault_Background; 22549a12fb36SRichard Henderson return true; 22559a12fb36SRichard Henderson } 22569a12fb36SRichard Henderson 22579a12fb36SRichard Henderson if (access_type == MMU_INST_FETCH) { 22589a12fb36SRichard Henderson mask = env->cp15.pmsav5_insn_ap; 22599a12fb36SRichard Henderson } else { 22609a12fb36SRichard Henderson mask = env->cp15.pmsav5_data_ap; 22619a12fb36SRichard Henderson } 22629a12fb36SRichard Henderson mask = (mask >> (n * 4)) & 0xf; 22639a12fb36SRichard Henderson switch (mask) { 22649a12fb36SRichard Henderson case 0: 22659a12fb36SRichard Henderson fi->type = ARMFault_Permission; 22669a12fb36SRichard Henderson fi->level = 1; 22679a12fb36SRichard Henderson return true; 22689a12fb36SRichard Henderson case 1: 22699a12fb36SRichard Henderson if (is_user) { 22709a12fb36SRichard Henderson fi->type = ARMFault_Permission; 22719a12fb36SRichard Henderson fi->level = 1; 22729a12fb36SRichard Henderson return true; 22739a12fb36SRichard Henderson } 22747fa7ea8fSRichard Henderson result->f.prot = PAGE_READ | PAGE_WRITE; 22759a12fb36SRichard Henderson break; 22769a12fb36SRichard Henderson case 2: 22777fa7ea8fSRichard Henderson result->f.prot = PAGE_READ; 22789a12fb36SRichard Henderson if (!is_user) { 22797fa7ea8fSRichard Henderson result->f.prot |= PAGE_WRITE; 22809a12fb36SRichard Henderson } 22819a12fb36SRichard Henderson break; 22829a12fb36SRichard Henderson case 3: 22837fa7ea8fSRichard Henderson result->f.prot = PAGE_READ | PAGE_WRITE; 22849a12fb36SRichard Henderson break; 22859a12fb36SRichard Henderson case 5: 22869a12fb36SRichard Henderson if (is_user) { 22879a12fb36SRichard Henderson fi->type = ARMFault_Permission; 22889a12fb36SRichard Henderson fi->level = 1; 22899a12fb36SRichard Henderson return true; 22909a12fb36SRichard Henderson } 22917fa7ea8fSRichard Henderson result->f.prot = PAGE_READ; 22929a12fb36SRichard Henderson break; 22939a12fb36SRichard Henderson case 6: 22947fa7ea8fSRichard Henderson result->f.prot = PAGE_READ; 22959a12fb36SRichard Henderson break; 22969a12fb36SRichard Henderson default: 22979a12fb36SRichard Henderson /* Bad permission. */ 22989a12fb36SRichard Henderson fi->type = ARMFault_Permission; 22999a12fb36SRichard Henderson fi->level = 1; 23009a12fb36SRichard Henderson return true; 23019a12fb36SRichard Henderson } 23027fa7ea8fSRichard Henderson result->f.prot |= PAGE_EXEC; 23039a12fb36SRichard Henderson return false; 23049a12fb36SRichard Henderson } 23059a12fb36SRichard Henderson 2306fedbaa05SRichard Henderson static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx, 23077fa7ea8fSRichard Henderson int32_t address, uint8_t *prot) 23087d2e08c9SRichard Henderson { 23097d2e08c9SRichard Henderson if (!arm_feature(env, ARM_FEATURE_M)) { 23107d2e08c9SRichard Henderson *prot = PAGE_READ | PAGE_WRITE; 23117d2e08c9SRichard Henderson switch (address) { 23127d2e08c9SRichard Henderson case 0xF0000000 ... 0xFFFFFFFF: 23137d2e08c9SRichard Henderson if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 23147d2e08c9SRichard Henderson /* hivecs execing is ok */ 23157d2e08c9SRichard Henderson *prot |= PAGE_EXEC; 23167d2e08c9SRichard Henderson } 23177d2e08c9SRichard Henderson break; 23187d2e08c9SRichard Henderson case 0x00000000 ... 0x7FFFFFFF: 23197d2e08c9SRichard Henderson *prot |= PAGE_EXEC; 23207d2e08c9SRichard Henderson break; 23217d2e08c9SRichard Henderson } 23227d2e08c9SRichard Henderson } else { 23237d2e08c9SRichard Henderson /* Default system address map for M profile cores. 23247d2e08c9SRichard Henderson * The architecture specifies which regions are execute-never; 23257d2e08c9SRichard Henderson * at the MPU level no other checks are defined. 23267d2e08c9SRichard Henderson */ 23277d2e08c9SRichard Henderson switch (address) { 23287d2e08c9SRichard Henderson case 0x00000000 ... 0x1fffffff: /* ROM */ 23297d2e08c9SRichard Henderson case 0x20000000 ... 0x3fffffff: /* SRAM */ 23307d2e08c9SRichard Henderson case 0x60000000 ... 0x7fffffff: /* RAM */ 23317d2e08c9SRichard Henderson case 0x80000000 ... 0x9fffffff: /* RAM */ 23327d2e08c9SRichard Henderson *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 23337d2e08c9SRichard Henderson break; 23347d2e08c9SRichard Henderson case 0x40000000 ... 0x5fffffff: /* Peripheral */ 23357d2e08c9SRichard Henderson case 0xa0000000 ... 0xbfffffff: /* Device */ 23367d2e08c9SRichard Henderson case 0xc0000000 ... 0xdfffffff: /* Device */ 23377d2e08c9SRichard Henderson case 0xe0000000 ... 0xffffffff: /* System */ 23387d2e08c9SRichard Henderson *prot = PAGE_READ | PAGE_WRITE; 23397d2e08c9SRichard Henderson break; 23407d2e08c9SRichard Henderson default: 23417d2e08c9SRichard Henderson g_assert_not_reached(); 23427d2e08c9SRichard Henderson } 23437d2e08c9SRichard Henderson } 23447d2e08c9SRichard Henderson } 23457d2e08c9SRichard Henderson 234647ff5ba9SRichard Henderson static bool m_is_ppb_region(CPUARMState *env, uint32_t address) 234747ff5ba9SRichard Henderson { 234847ff5ba9SRichard Henderson /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 234947ff5ba9SRichard Henderson return arm_feature(env, ARM_FEATURE_M) && 235047ff5ba9SRichard Henderson extract32(address, 20, 12) == 0xe00; 235147ff5ba9SRichard Henderson } 235247ff5ba9SRichard Henderson 235347ff5ba9SRichard Henderson static bool m_is_system_region(CPUARMState *env, uint32_t address) 235447ff5ba9SRichard Henderson { 235547ff5ba9SRichard Henderson /* 235647ff5ba9SRichard Henderson * True if address is in the M profile system region 235747ff5ba9SRichard Henderson * 0xe0000000 - 0xffffffff 235847ff5ba9SRichard Henderson */ 235947ff5ba9SRichard Henderson return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 236047ff5ba9SRichard Henderson } 236147ff5ba9SRichard Henderson 2362c8e436c9SRichard Henderson static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx, 23631a469cf7SRichard Henderson bool is_secure, bool is_user) 2364c8e436c9SRichard Henderson { 2365c8e436c9SRichard Henderson /* 2366c8e436c9SRichard Henderson * Return true if we should use the default memory map as a 2367c8e436c9SRichard Henderson * "background" region if there are no hits against any MPU regions. 2368c8e436c9SRichard Henderson */ 2369c8e436c9SRichard Henderson CPUARMState *env = &cpu->env; 2370c8e436c9SRichard Henderson 2371c8e436c9SRichard Henderson if (is_user) { 2372c8e436c9SRichard Henderson return false; 2373c8e436c9SRichard Henderson } 2374c8e436c9SRichard Henderson 2375c8e436c9SRichard Henderson if (arm_feature(env, ARM_FEATURE_M)) { 23761a469cf7SRichard Henderson return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 2377c8e436c9SRichard Henderson } 2378fca45e34STobias Röhmel 2379fca45e34STobias Röhmel if (mmu_idx == ARMMMUIdx_Stage2) { 2380fca45e34STobias Röhmel return false; 2381fca45e34STobias Röhmel } 2382fca45e34STobias Röhmel 2383fca45e34STobias Röhmel return regime_sctlr(env, mmu_idx) & SCTLR_BR; 2384c8e436c9SRichard Henderson } 2385c8e436c9SRichard Henderson 2386a5637becSPeter Maydell static bool get_phys_addr_pmsav7(CPUARMState *env, 2387a5637becSPeter Maydell S1Translate *ptw, 2388a5637becSPeter Maydell uint32_t address, 2389a5637becSPeter Maydell MMUAccessType access_type, 2390a5637becSPeter Maydell GetPhysAddrResult *result, 23911f2e87e5SRichard Henderson ARMMMUFaultInfo *fi) 23921f2e87e5SRichard Henderson { 23931f2e87e5SRichard Henderson ARMCPU *cpu = env_archcpu(env); 23941f2e87e5SRichard Henderson int n; 2395a5637becSPeter Maydell ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 23961f2e87e5SRichard Henderson bool is_user = regime_is_user(env, mmu_idx); 2397a5637becSPeter Maydell bool secure = arm_space_is_secure(ptw->in_space); 23981f2e87e5SRichard Henderson 23997fa7ea8fSRichard Henderson result->f.phys_addr = address; 24007fa7ea8fSRichard Henderson result->f.lg_page_size = TARGET_PAGE_BITS; 24017fa7ea8fSRichard Henderson result->f.prot = 0; 24021f2e87e5SRichard Henderson 2403d1289140SPeter Maydell if (regime_translation_disabled(env, mmu_idx, ptw->in_space) || 24041f2e87e5SRichard Henderson m_is_ppb_region(env, address)) { 24051f2e87e5SRichard Henderson /* 24061f2e87e5SRichard Henderson * MPU disabled or M profile PPB access: use default memory map. 24071f2e87e5SRichard Henderson * The other case which uses the default memory map in the 24081f2e87e5SRichard Henderson * v7M ARM ARM pseudocode is exception vector reads from the vector 24091f2e87e5SRichard Henderson * table. In QEMU those accesses are done in arm_v7m_load_vector(), 24101f2e87e5SRichard Henderson * which always does a direct read using address_space_ldl(), rather 24111f2e87e5SRichard Henderson * than going via this function, so we don't need to check that here. 24121f2e87e5SRichard Henderson */ 24137fa7ea8fSRichard Henderson get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); 24141f2e87e5SRichard Henderson } else { /* MPU enabled */ 24151f2e87e5SRichard Henderson for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 24161f2e87e5SRichard Henderson /* region search */ 24171f2e87e5SRichard Henderson uint32_t base = env->pmsav7.drbar[n]; 24181f2e87e5SRichard Henderson uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 24191f2e87e5SRichard Henderson uint32_t rmask; 24201f2e87e5SRichard Henderson bool srdis = false; 24211f2e87e5SRichard Henderson 24221f2e87e5SRichard Henderson if (!(env->pmsav7.drsr[n] & 0x1)) { 24231f2e87e5SRichard Henderson continue; 24241f2e87e5SRichard Henderson } 24251f2e87e5SRichard Henderson 24261f2e87e5SRichard Henderson if (!rsize) { 24271f2e87e5SRichard Henderson qemu_log_mask(LOG_GUEST_ERROR, 24281f2e87e5SRichard Henderson "DRSR[%d]: Rsize field cannot be 0\n", n); 24291f2e87e5SRichard Henderson continue; 24301f2e87e5SRichard Henderson } 24311f2e87e5SRichard Henderson rsize++; 24321f2e87e5SRichard Henderson rmask = (1ull << rsize) - 1; 24331f2e87e5SRichard Henderson 24341f2e87e5SRichard Henderson if (base & rmask) { 24351f2e87e5SRichard Henderson qemu_log_mask(LOG_GUEST_ERROR, 24361f2e87e5SRichard Henderson "DRBAR[%d]: 0x%" PRIx32 " misaligned " 24371f2e87e5SRichard Henderson "to DRSR region size, mask = 0x%" PRIx32 "\n", 24381f2e87e5SRichard Henderson n, base, rmask); 24391f2e87e5SRichard Henderson continue; 24401f2e87e5SRichard Henderson } 24411f2e87e5SRichard Henderson 24421f2e87e5SRichard Henderson if (address < base || address > base + rmask) { 24431f2e87e5SRichard Henderson /* 24441f2e87e5SRichard Henderson * Address not in this region. We must check whether the 24451f2e87e5SRichard Henderson * region covers addresses in the same page as our address. 24461f2e87e5SRichard Henderson * In that case we must not report a size that covers the 24471f2e87e5SRichard Henderson * whole page for a subsequent hit against a different MPU 24481f2e87e5SRichard Henderson * region or the background region, because it would result in 24491f2e87e5SRichard Henderson * incorrect TLB hits for subsequent accesses to addresses that 24501f2e87e5SRichard Henderson * are in this MPU region. 24511f2e87e5SRichard Henderson */ 24521f2e87e5SRichard Henderson if (ranges_overlap(base, rmask, 24531f2e87e5SRichard Henderson address & TARGET_PAGE_MASK, 24541f2e87e5SRichard Henderson TARGET_PAGE_SIZE)) { 24557fa7ea8fSRichard Henderson result->f.lg_page_size = 0; 24561f2e87e5SRichard Henderson } 24571f2e87e5SRichard Henderson continue; 24581f2e87e5SRichard Henderson } 24591f2e87e5SRichard Henderson 24601f2e87e5SRichard Henderson /* Region matched */ 24611f2e87e5SRichard Henderson 24621f2e87e5SRichard Henderson if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 24631f2e87e5SRichard Henderson int i, snd; 24641f2e87e5SRichard Henderson uint32_t srdis_mask; 24651f2e87e5SRichard Henderson 24661f2e87e5SRichard Henderson rsize -= 3; /* sub region size (power of 2) */ 24671f2e87e5SRichard Henderson snd = ((address - base) >> rsize) & 0x7; 24681f2e87e5SRichard Henderson srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 24691f2e87e5SRichard Henderson 24701f2e87e5SRichard Henderson srdis_mask = srdis ? 0x3 : 0x0; 24711f2e87e5SRichard Henderson for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 24721f2e87e5SRichard Henderson /* 24731f2e87e5SRichard Henderson * This will check in groups of 2, 4 and then 8, whether 24741f2e87e5SRichard Henderson * the subregion bits are consistent. rsize is incremented 24751f2e87e5SRichard Henderson * back up to give the region size, considering consistent 24761f2e87e5SRichard Henderson * adjacent subregions as one region. Stop testing if rsize 24771f2e87e5SRichard Henderson * is already big enough for an entire QEMU page. 24781f2e87e5SRichard Henderson */ 24791f2e87e5SRichard Henderson int snd_rounded = snd & ~(i - 1); 24801f2e87e5SRichard Henderson uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 24811f2e87e5SRichard Henderson snd_rounded + 8, i); 24821f2e87e5SRichard Henderson if (srdis_mask ^ srdis_multi) { 24831f2e87e5SRichard Henderson break; 24841f2e87e5SRichard Henderson } 24851f2e87e5SRichard Henderson srdis_mask = (srdis_mask << i) | srdis_mask; 24861f2e87e5SRichard Henderson rsize++; 24871f2e87e5SRichard Henderson } 24881f2e87e5SRichard Henderson } 24891f2e87e5SRichard Henderson if (srdis) { 24901f2e87e5SRichard Henderson continue; 24911f2e87e5SRichard Henderson } 24921f2e87e5SRichard Henderson if (rsize < TARGET_PAGE_BITS) { 24937fa7ea8fSRichard Henderson result->f.lg_page_size = rsize; 24941f2e87e5SRichard Henderson } 24951f2e87e5SRichard Henderson break; 24961f2e87e5SRichard Henderson } 24971f2e87e5SRichard Henderson 24981f2e87e5SRichard Henderson if (n == -1) { /* no hits */ 24991a469cf7SRichard Henderson if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { 25001f2e87e5SRichard Henderson /* background fault */ 25011f2e87e5SRichard Henderson fi->type = ARMFault_Background; 25021f2e87e5SRichard Henderson return true; 25031f2e87e5SRichard Henderson } 25047fa7ea8fSRichard Henderson get_phys_addr_pmsav7_default(env, mmu_idx, address, 25057fa7ea8fSRichard Henderson &result->f.prot); 25061f2e87e5SRichard Henderson } else { /* a MPU hit! */ 25071f2e87e5SRichard Henderson uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 25081f2e87e5SRichard Henderson uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 25091f2e87e5SRichard Henderson 25101f2e87e5SRichard Henderson if (m_is_system_region(env, address)) { 25111f2e87e5SRichard Henderson /* System space is always execute never */ 25121f2e87e5SRichard Henderson xn = 1; 25131f2e87e5SRichard Henderson } 25141f2e87e5SRichard Henderson 25151f2e87e5SRichard Henderson if (is_user) { /* User mode AP bit decoding */ 25161f2e87e5SRichard Henderson switch (ap) { 25171f2e87e5SRichard Henderson case 0: 25181f2e87e5SRichard Henderson case 1: 25191f2e87e5SRichard Henderson case 5: 25201f2e87e5SRichard Henderson break; /* no access */ 25211f2e87e5SRichard Henderson case 3: 25227fa7ea8fSRichard Henderson result->f.prot |= PAGE_WRITE; 25231f2e87e5SRichard Henderson /* fall through */ 25241f2e87e5SRichard Henderson case 2: 25251f2e87e5SRichard Henderson case 6: 25267fa7ea8fSRichard Henderson result->f.prot |= PAGE_READ | PAGE_EXEC; 25271f2e87e5SRichard Henderson break; 25281f2e87e5SRichard Henderson case 7: 25291f2e87e5SRichard Henderson /* for v7M, same as 6; for R profile a reserved value */ 25301f2e87e5SRichard Henderson if (arm_feature(env, ARM_FEATURE_M)) { 25317fa7ea8fSRichard Henderson result->f.prot |= PAGE_READ | PAGE_EXEC; 25321f2e87e5SRichard Henderson break; 25331f2e87e5SRichard Henderson } 25341f2e87e5SRichard Henderson /* fall through */ 25351f2e87e5SRichard Henderson default: 25361f2e87e5SRichard Henderson qemu_log_mask(LOG_GUEST_ERROR, 25371f2e87e5SRichard Henderson "DRACR[%d]: Bad value for AP bits: 0x%" 25381f2e87e5SRichard Henderson PRIx32 "\n", n, ap); 25391f2e87e5SRichard Henderson } 25401f2e87e5SRichard Henderson } else { /* Priv. mode AP bits decoding */ 25411f2e87e5SRichard Henderson switch (ap) { 25421f2e87e5SRichard Henderson case 0: 25431f2e87e5SRichard Henderson break; /* no access */ 25441f2e87e5SRichard Henderson case 1: 25451f2e87e5SRichard Henderson case 2: 25461f2e87e5SRichard Henderson case 3: 25477fa7ea8fSRichard Henderson result->f.prot |= PAGE_WRITE; 25481f2e87e5SRichard Henderson /* fall through */ 25491f2e87e5SRichard Henderson case 5: 25501f2e87e5SRichard Henderson case 6: 25517fa7ea8fSRichard Henderson result->f.prot |= PAGE_READ | PAGE_EXEC; 25521f2e87e5SRichard Henderson break; 25531f2e87e5SRichard Henderson case 7: 25541f2e87e5SRichard Henderson /* for v7M, same as 6; for R profile a reserved value */ 25551f2e87e5SRichard Henderson if (arm_feature(env, ARM_FEATURE_M)) { 25567fa7ea8fSRichard Henderson result->f.prot |= PAGE_READ | PAGE_EXEC; 25571f2e87e5SRichard Henderson break; 25581f2e87e5SRichard Henderson } 25591f2e87e5SRichard Henderson /* fall through */ 25601f2e87e5SRichard Henderson default: 25611f2e87e5SRichard Henderson qemu_log_mask(LOG_GUEST_ERROR, 25621f2e87e5SRichard Henderson "DRACR[%d]: Bad value for AP bits: 0x%" 25631f2e87e5SRichard Henderson PRIx32 "\n", n, ap); 25641f2e87e5SRichard Henderson } 25651f2e87e5SRichard Henderson } 25661f2e87e5SRichard Henderson 25671f2e87e5SRichard Henderson /* execute never */ 25681f2e87e5SRichard Henderson if (xn) { 25697fa7ea8fSRichard Henderson result->f.prot &= ~PAGE_EXEC; 25701f2e87e5SRichard Henderson } 25711f2e87e5SRichard Henderson } 25721f2e87e5SRichard Henderson } 25731f2e87e5SRichard Henderson 25741f2e87e5SRichard Henderson fi->type = ARMFault_Permission; 25751f2e87e5SRichard Henderson fi->level = 1; 25767fa7ea8fSRichard Henderson return !(result->f.prot & (1 << access_type)); 25771f2e87e5SRichard Henderson } 25781f2e87e5SRichard Henderson 2579fca45e34STobias Röhmel static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx, 2580fca45e34STobias Röhmel uint32_t secure) 2581fca45e34STobias Röhmel { 2582fca45e34STobias Röhmel if (regime_el(env, mmu_idx) == 2) { 2583fca45e34STobias Röhmel return env->pmsav8.hprbar; 2584fca45e34STobias Röhmel } else { 2585fca45e34STobias Röhmel return env->pmsav8.rbar[secure]; 2586fca45e34STobias Röhmel } 2587fca45e34STobias Röhmel } 2588fca45e34STobias Röhmel 2589fca45e34STobias Röhmel static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx, 2590fca45e34STobias Röhmel uint32_t secure) 2591fca45e34STobias Röhmel { 2592fca45e34STobias Röhmel if (regime_el(env, mmu_idx) == 2) { 2593fca45e34STobias Röhmel return env->pmsav8.hprlar; 2594fca45e34STobias Röhmel } else { 2595fca45e34STobias Röhmel return env->pmsav8.rlar[secure]; 2596fca45e34STobias Röhmel } 2597fca45e34STobias Röhmel } 2598fca45e34STobias Röhmel 2599fedbaa05SRichard Henderson bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 2600fedbaa05SRichard Henderson MMUAccessType access_type, ARMMMUIdx mmu_idx, 2601e9fb7090SRichard Henderson bool secure, GetPhysAddrResult *result, 2602e9fb7090SRichard Henderson ARMMMUFaultInfo *fi, uint32_t *mregion) 2603fedbaa05SRichard Henderson { 2604fedbaa05SRichard Henderson /* 2605fedbaa05SRichard Henderson * Perform a PMSAv8 MPU lookup (without also doing the SAU check 2606fedbaa05SRichard Henderson * that a full phys-to-virt translation does). 2607fedbaa05SRichard Henderson * mregion is (if not NULL) set to the region number which matched, 2608fedbaa05SRichard Henderson * or -1 if no region number is returned (MPU off, address did not 2609fedbaa05SRichard Henderson * hit a region, address hit in multiple regions). 2610652c750eSRichard Henderson * If the region hit doesn't cover the entire TARGET_PAGE the address 2611652c750eSRichard Henderson * is within, then we set the result page_size to 1 to force the 2612652c750eSRichard Henderson * memory system to use a subpage. 2613fedbaa05SRichard Henderson */ 2614fedbaa05SRichard Henderson ARMCPU *cpu = env_archcpu(env); 2615fedbaa05SRichard Henderson bool is_user = regime_is_user(env, mmu_idx); 2616fedbaa05SRichard Henderson int n; 2617fedbaa05SRichard Henderson int matchregion = -1; 2618fedbaa05SRichard Henderson bool hit = false; 2619fedbaa05SRichard Henderson uint32_t addr_page_base = address & TARGET_PAGE_MASK; 2620fedbaa05SRichard Henderson uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 2621fca45e34STobias Röhmel int region_counter; 2622fca45e34STobias Röhmel 2623fca45e34STobias Röhmel if (regime_el(env, mmu_idx) == 2) { 2624fca45e34STobias Röhmel region_counter = cpu->pmsav8r_hdregion; 2625fca45e34STobias Röhmel } else { 2626fca45e34STobias Röhmel region_counter = cpu->pmsav7_dregion; 2627fca45e34STobias Röhmel } 2628fedbaa05SRichard Henderson 26297fa7ea8fSRichard Henderson result->f.lg_page_size = TARGET_PAGE_BITS; 26307fa7ea8fSRichard Henderson result->f.phys_addr = address; 26317fa7ea8fSRichard Henderson result->f.prot = 0; 2632fedbaa05SRichard Henderson if (mregion) { 2633fedbaa05SRichard Henderson *mregion = -1; 2634fedbaa05SRichard Henderson } 2635fedbaa05SRichard Henderson 2636fca45e34STobias Röhmel if (mmu_idx == ARMMMUIdx_Stage2) { 2637fca45e34STobias Röhmel fi->stage2 = true; 2638fca45e34STobias Röhmel } 2639fca45e34STobias Röhmel 2640fedbaa05SRichard Henderson /* 2641fedbaa05SRichard Henderson * Unlike the ARM ARM pseudocode, we don't need to check whether this 2642fedbaa05SRichard Henderson * was an exception vector read from the vector table (which is always 2643fedbaa05SRichard Henderson * done using the default system address map), because those accesses 2644fedbaa05SRichard Henderson * are done in arm_v7m_load_vector(), which always does a direct 2645fedbaa05SRichard Henderson * read using address_space_ldl(), rather than going via this function. 2646fedbaa05SRichard Henderson */ 2647d1289140SPeter Maydell if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) { 2648d1289140SPeter Maydell /* MPU disabled */ 2649fedbaa05SRichard Henderson hit = true; 2650fedbaa05SRichard Henderson } else if (m_is_ppb_region(env, address)) { 2651fedbaa05SRichard Henderson hit = true; 2652fedbaa05SRichard Henderson } else { 26531a469cf7SRichard Henderson if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { 2654fedbaa05SRichard Henderson hit = true; 2655fedbaa05SRichard Henderson } 2656fedbaa05SRichard Henderson 2657fca45e34STobias Röhmel uint32_t bitmask; 2658fca45e34STobias Röhmel if (arm_feature(env, ARM_FEATURE_M)) { 2659fca45e34STobias Röhmel bitmask = 0x1f; 2660fca45e34STobias Röhmel } else { 2661fca45e34STobias Röhmel bitmask = 0x3f; 2662fca45e34STobias Röhmel fi->level = 0; 2663fca45e34STobias Röhmel } 2664fca45e34STobias Röhmel 2665fca45e34STobias Röhmel for (n = region_counter - 1; n >= 0; n--) { 2666fedbaa05SRichard Henderson /* region search */ 2667fedbaa05SRichard Henderson /* 2668fca45e34STobias Röhmel * Note that the base address is bits [31:x] from the register 2669fca45e34STobias Röhmel * with bits [x-1:0] all zeroes, but the limit address is bits 2670fca45e34STobias Röhmel * [31:x] from the register with bits [x:0] all ones. Where x is 2671fca45e34STobias Röhmel * 5 for Cortex-M and 6 for Cortex-R 2672fedbaa05SRichard Henderson */ 2673fca45e34STobias Röhmel uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask; 2674fca45e34STobias Röhmel uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask; 2675fedbaa05SRichard Henderson 2676fca45e34STobias Röhmel if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) { 2677fedbaa05SRichard Henderson /* Region disabled */ 2678fedbaa05SRichard Henderson continue; 2679fedbaa05SRichard Henderson } 2680fedbaa05SRichard Henderson 2681fedbaa05SRichard Henderson if (address < base || address > limit) { 2682fedbaa05SRichard Henderson /* 2683fedbaa05SRichard Henderson * Address not in this region. We must check whether the 2684fedbaa05SRichard Henderson * region covers addresses in the same page as our address. 2685fedbaa05SRichard Henderson * In that case we must not report a size that covers the 2686fedbaa05SRichard Henderson * whole page for a subsequent hit against a different MPU 2687fedbaa05SRichard Henderson * region or the background region, because it would result in 2688fedbaa05SRichard Henderson * incorrect TLB hits for subsequent accesses to addresses that 2689fedbaa05SRichard Henderson * are in this MPU region. 2690fedbaa05SRichard Henderson */ 2691fedbaa05SRichard Henderson if (limit >= base && 2692fedbaa05SRichard Henderson ranges_overlap(base, limit - base + 1, 2693fedbaa05SRichard Henderson addr_page_base, 2694fedbaa05SRichard Henderson TARGET_PAGE_SIZE)) { 26957fa7ea8fSRichard Henderson result->f.lg_page_size = 0; 2696fedbaa05SRichard Henderson } 2697fedbaa05SRichard Henderson continue; 2698fedbaa05SRichard Henderson } 2699fedbaa05SRichard Henderson 2700fedbaa05SRichard Henderson if (base > addr_page_base || limit < addr_page_limit) { 27017fa7ea8fSRichard Henderson result->f.lg_page_size = 0; 2702fedbaa05SRichard Henderson } 2703fedbaa05SRichard Henderson 2704fedbaa05SRichard Henderson if (matchregion != -1) { 2705fedbaa05SRichard Henderson /* 2706fedbaa05SRichard Henderson * Multiple regions match -- always a failure (unlike 2707fedbaa05SRichard Henderson * PMSAv7 where highest-numbered-region wins) 2708fedbaa05SRichard Henderson */ 2709fedbaa05SRichard Henderson fi->type = ARMFault_Permission; 2710fca45e34STobias Röhmel if (arm_feature(env, ARM_FEATURE_M)) { 2711fedbaa05SRichard Henderson fi->level = 1; 2712fca45e34STobias Röhmel } 2713fedbaa05SRichard Henderson return true; 2714fedbaa05SRichard Henderson } 2715fedbaa05SRichard Henderson 2716fedbaa05SRichard Henderson matchregion = n; 2717fedbaa05SRichard Henderson hit = true; 2718fedbaa05SRichard Henderson } 2719fedbaa05SRichard Henderson } 2720fedbaa05SRichard Henderson 2721fedbaa05SRichard Henderson if (!hit) { 2722fca45e34STobias Röhmel if (arm_feature(env, ARM_FEATURE_M)) { 2723fedbaa05SRichard Henderson fi->type = ARMFault_Background; 2724fca45e34STobias Röhmel } else { 2725fca45e34STobias Röhmel fi->type = ARMFault_Permission; 2726fca45e34STobias Röhmel } 2727fedbaa05SRichard Henderson return true; 2728fedbaa05SRichard Henderson } 2729fedbaa05SRichard Henderson 2730fedbaa05SRichard Henderson if (matchregion == -1) { 2731fedbaa05SRichard Henderson /* hit using the background region */ 27327fa7ea8fSRichard Henderson get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); 2733fedbaa05SRichard Henderson } else { 2734fca45e34STobias Röhmel uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion]; 2735fca45e34STobias Röhmel uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion]; 2736fca45e34STobias Röhmel uint32_t ap = extract32(matched_rbar, 1, 2); 2737fca45e34STobias Röhmel uint32_t xn = extract32(matched_rbar, 0, 1); 2738fedbaa05SRichard Henderson bool pxn = false; 2739fedbaa05SRichard Henderson 2740fedbaa05SRichard Henderson if (arm_feature(env, ARM_FEATURE_V8_1M)) { 2741fca45e34STobias Röhmel pxn = extract32(matched_rlar, 4, 1); 2742fedbaa05SRichard Henderson } 2743fedbaa05SRichard Henderson 2744fedbaa05SRichard Henderson if (m_is_system_region(env, address)) { 2745fedbaa05SRichard Henderson /* System space is always execute never */ 2746fedbaa05SRichard Henderson xn = 1; 2747fedbaa05SRichard Henderson } 2748fedbaa05SRichard Henderson 2749fca45e34STobias Röhmel if (regime_el(env, mmu_idx) == 2) { 2750fca45e34STobias Röhmel result->f.prot = simple_ap_to_rw_prot_is_user(ap, 2751fca45e34STobias Röhmel mmu_idx != ARMMMUIdx_E2); 2752fca45e34STobias Röhmel } else { 27537fa7ea8fSRichard Henderson result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 2754fca45e34STobias Röhmel } 2755fca45e34STobias Röhmel 2756fca45e34STobias Röhmel if (!arm_feature(env, ARM_FEATURE_M)) { 2757fca45e34STobias Röhmel uint8_t attrindx = extract32(matched_rlar, 1, 3); 2758fca45e34STobias Röhmel uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 2759fca45e34STobias Röhmel uint8_t sh = extract32(matched_rlar, 3, 2); 2760fca45e34STobias Röhmel 2761fca45e34STobias Röhmel if (regime_sctlr(env, mmu_idx) & SCTLR_WXN && 2762fca45e34STobias Röhmel result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) { 2763fca45e34STobias Röhmel xn = 0x1; 2764fca45e34STobias Röhmel } 2765fca45e34STobias Röhmel 2766fca45e34STobias Röhmel if ((regime_el(env, mmu_idx) == 1) && 2767fca45e34STobias Röhmel regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) { 2768fca45e34STobias Röhmel pxn = 0x1; 2769fca45e34STobias Röhmel } 2770fca45e34STobias Röhmel 2771fca45e34STobias Röhmel result->cacheattrs.is_s2_format = false; 2772fca45e34STobias Röhmel result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); 2773fca45e34STobias Röhmel result->cacheattrs.shareability = sh; 2774fca45e34STobias Röhmel } 2775fca45e34STobias Röhmel 27767fa7ea8fSRichard Henderson if (result->f.prot && !xn && !(pxn && !is_user)) { 27777fa7ea8fSRichard Henderson result->f.prot |= PAGE_EXEC; 2778fedbaa05SRichard Henderson } 2779fca45e34STobias Röhmel 2780fedbaa05SRichard Henderson if (mregion) { 2781fedbaa05SRichard Henderson *mregion = matchregion; 2782fedbaa05SRichard Henderson } 2783fedbaa05SRichard Henderson } 2784fedbaa05SRichard Henderson 2785fedbaa05SRichard Henderson fi->type = ARMFault_Permission; 2786fca45e34STobias Röhmel if (arm_feature(env, ARM_FEATURE_M)) { 2787fedbaa05SRichard Henderson fi->level = 1; 2788fca45e34STobias Röhmel } 27897fa7ea8fSRichard Henderson return !(result->f.prot & (1 << access_type)); 2790fedbaa05SRichard Henderson } 2791fedbaa05SRichard Henderson 27922c1f429dSRichard Henderson static bool v8m_is_sau_exempt(CPUARMState *env, 27932c1f429dSRichard Henderson uint32_t address, MMUAccessType access_type) 27942c1f429dSRichard Henderson { 27952c1f429dSRichard Henderson /* 27962c1f429dSRichard Henderson * The architecture specifies that certain address ranges are 27972c1f429dSRichard Henderson * exempt from v8M SAU/IDAU checks. 27982c1f429dSRichard Henderson */ 27992c1f429dSRichard Henderson return 28002c1f429dSRichard Henderson (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 28012c1f429dSRichard Henderson (address >= 0xe0000000 && address <= 0xe0002fff) || 28022c1f429dSRichard Henderson (address >= 0xe000e000 && address <= 0xe000efff) || 28032c1f429dSRichard Henderson (address >= 0xe002e000 && address <= 0xe002efff) || 28042c1f429dSRichard Henderson (address >= 0xe0040000 && address <= 0xe0041fff) || 28052c1f429dSRichard Henderson (address >= 0xe00ff000 && address <= 0xe00fffff); 28062c1f429dSRichard Henderson } 28072c1f429dSRichard Henderson 28082c1f429dSRichard Henderson void v8m_security_lookup(CPUARMState *env, uint32_t address, 28092c1f429dSRichard Henderson MMUAccessType access_type, ARMMMUIdx mmu_idx, 2810dbf2a71aSRichard Henderson bool is_secure, V8M_SAttributes *sattrs) 28112c1f429dSRichard Henderson { 28122c1f429dSRichard Henderson /* 28132c1f429dSRichard Henderson * Look up the security attributes for this address. Compare the 28142c1f429dSRichard Henderson * pseudocode SecurityCheck() function. 28152c1f429dSRichard Henderson * We assume the caller has zero-initialized *sattrs. 28162c1f429dSRichard Henderson */ 28172c1f429dSRichard Henderson ARMCPU *cpu = env_archcpu(env); 28182c1f429dSRichard Henderson int r; 28192c1f429dSRichard Henderson bool idau_exempt = false, idau_ns = true, idau_nsc = true; 28202c1f429dSRichard Henderson int idau_region = IREGION_NOTVALID; 28212c1f429dSRichard Henderson uint32_t addr_page_base = address & TARGET_PAGE_MASK; 28222c1f429dSRichard Henderson uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 28232c1f429dSRichard Henderson 28242c1f429dSRichard Henderson if (cpu->idau) { 28252c1f429dSRichard Henderson IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 28262c1f429dSRichard Henderson IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 28272c1f429dSRichard Henderson 28282c1f429dSRichard Henderson iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 28292c1f429dSRichard Henderson &idau_nsc); 28302c1f429dSRichard Henderson } 28312c1f429dSRichard Henderson 28322c1f429dSRichard Henderson if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 28332c1f429dSRichard Henderson /* 0xf0000000..0xffffffff is always S for insn fetches */ 28342c1f429dSRichard Henderson return; 28352c1f429dSRichard Henderson } 28362c1f429dSRichard Henderson 28372c1f429dSRichard Henderson if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 2838dbf2a71aSRichard Henderson sattrs->ns = !is_secure; 28392c1f429dSRichard Henderson return; 28402c1f429dSRichard Henderson } 28412c1f429dSRichard Henderson 28422c1f429dSRichard Henderson if (idau_region != IREGION_NOTVALID) { 28432c1f429dSRichard Henderson sattrs->irvalid = true; 28442c1f429dSRichard Henderson sattrs->iregion = idau_region; 28452c1f429dSRichard Henderson } 28462c1f429dSRichard Henderson 28472c1f429dSRichard Henderson switch (env->sau.ctrl & 3) { 28482c1f429dSRichard Henderson case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 28492c1f429dSRichard Henderson break; 28502c1f429dSRichard Henderson case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 28512c1f429dSRichard Henderson sattrs->ns = true; 28522c1f429dSRichard Henderson break; 28532c1f429dSRichard Henderson default: /* SAU.ENABLE == 1 */ 28542c1f429dSRichard Henderson for (r = 0; r < cpu->sau_sregion; r++) { 28552c1f429dSRichard Henderson if (env->sau.rlar[r] & 1) { 28562c1f429dSRichard Henderson uint32_t base = env->sau.rbar[r] & ~0x1f; 28572c1f429dSRichard Henderson uint32_t limit = env->sau.rlar[r] | 0x1f; 28582c1f429dSRichard Henderson 28592c1f429dSRichard Henderson if (base <= address && limit >= address) { 28602c1f429dSRichard Henderson if (base > addr_page_base || limit < addr_page_limit) { 28612c1f429dSRichard Henderson sattrs->subpage = true; 28622c1f429dSRichard Henderson } 28632c1f429dSRichard Henderson if (sattrs->srvalid) { 28642c1f429dSRichard Henderson /* 28652c1f429dSRichard Henderson * If we hit in more than one region then we must report 28662c1f429dSRichard Henderson * as Secure, not NS-Callable, with no valid region 28672c1f429dSRichard Henderson * number info. 28682c1f429dSRichard Henderson */ 28692c1f429dSRichard Henderson sattrs->ns = false; 28702c1f429dSRichard Henderson sattrs->nsc = false; 28712c1f429dSRichard Henderson sattrs->sregion = 0; 28722c1f429dSRichard Henderson sattrs->srvalid = false; 28732c1f429dSRichard Henderson break; 28742c1f429dSRichard Henderson } else { 28752c1f429dSRichard Henderson if (env->sau.rlar[r] & 2) { 28762c1f429dSRichard Henderson sattrs->nsc = true; 28772c1f429dSRichard Henderson } else { 28782c1f429dSRichard Henderson sattrs->ns = true; 28792c1f429dSRichard Henderson } 28802c1f429dSRichard Henderson sattrs->srvalid = true; 28812c1f429dSRichard Henderson sattrs->sregion = r; 28822c1f429dSRichard Henderson } 28832c1f429dSRichard Henderson } else { 28842c1f429dSRichard Henderson /* 28852c1f429dSRichard Henderson * Address not in this region. We must check whether the 28862c1f429dSRichard Henderson * region covers addresses in the same page as our address. 28872c1f429dSRichard Henderson * In that case we must not report a size that covers the 28882c1f429dSRichard Henderson * whole page for a subsequent hit against a different MPU 28892c1f429dSRichard Henderson * region or the background region, because it would result 28902c1f429dSRichard Henderson * in incorrect TLB hits for subsequent accesses to 28912c1f429dSRichard Henderson * addresses that are in this MPU region. 28922c1f429dSRichard Henderson */ 28932c1f429dSRichard Henderson if (limit >= base && 28942c1f429dSRichard Henderson ranges_overlap(base, limit - base + 1, 28952c1f429dSRichard Henderson addr_page_base, 28962c1f429dSRichard Henderson TARGET_PAGE_SIZE)) { 28972c1f429dSRichard Henderson sattrs->subpage = true; 28982c1f429dSRichard Henderson } 28992c1f429dSRichard Henderson } 29002c1f429dSRichard Henderson } 29012c1f429dSRichard Henderson } 29022c1f429dSRichard Henderson break; 29032c1f429dSRichard Henderson } 29042c1f429dSRichard Henderson 29052c1f429dSRichard Henderson /* 29062c1f429dSRichard Henderson * The IDAU will override the SAU lookup results if it specifies 29072c1f429dSRichard Henderson * higher security than the SAU does. 29082c1f429dSRichard Henderson */ 29092c1f429dSRichard Henderson if (!idau_ns) { 29102c1f429dSRichard Henderson if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 29112c1f429dSRichard Henderson sattrs->ns = false; 29122c1f429dSRichard Henderson sattrs->nsc = idau_nsc; 29132c1f429dSRichard Henderson } 29142c1f429dSRichard Henderson } 29152c1f429dSRichard Henderson } 29162c1f429dSRichard Henderson 2917a5637becSPeter Maydell static bool get_phys_addr_pmsav8(CPUARMState *env, 2918a5637becSPeter Maydell S1Translate *ptw, 2919a5637becSPeter Maydell uint32_t address, 2920a5637becSPeter Maydell MMUAccessType access_type, 2921a5637becSPeter Maydell GetPhysAddrResult *result, 2922730d5c31SRichard Henderson ARMMMUFaultInfo *fi) 2923730d5c31SRichard Henderson { 2924730d5c31SRichard Henderson V8M_SAttributes sattrs = {}; 2925a5637becSPeter Maydell ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 2926a5637becSPeter Maydell bool secure = arm_space_is_secure(ptw->in_space); 2927730d5c31SRichard Henderson bool ret; 2928730d5c31SRichard Henderson 2929730d5c31SRichard Henderson if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 2930dbf2a71aSRichard Henderson v8m_security_lookup(env, address, access_type, mmu_idx, 2931dbf2a71aSRichard Henderson secure, &sattrs); 2932730d5c31SRichard Henderson if (access_type == MMU_INST_FETCH) { 2933730d5c31SRichard Henderson /* 2934730d5c31SRichard Henderson * Instruction fetches always use the MMU bank and the 2935730d5c31SRichard Henderson * transaction attribute determined by the fetch address, 2936730d5c31SRichard Henderson * regardless of CPU state. This is painful for QEMU 2937730d5c31SRichard Henderson * to handle, because it would mean we need to encode 2938730d5c31SRichard Henderson * into the mmu_idx not just the (user, negpri) information 2939730d5c31SRichard Henderson * for the current security state but also that for the 2940730d5c31SRichard Henderson * other security state, which would balloon the number 2941730d5c31SRichard Henderson * of mmu_idx values needed alarmingly. 2942730d5c31SRichard Henderson * Fortunately we can avoid this because it's not actually 2943730d5c31SRichard Henderson * possible to arbitrarily execute code from memory with 2944730d5c31SRichard Henderson * the wrong security attribute: it will always generate 2945730d5c31SRichard Henderson * an exception of some kind or another, apart from the 2946730d5c31SRichard Henderson * special case of an NS CPU executing an SG instruction 2947730d5c31SRichard Henderson * in S&NSC memory. So we always just fail the translation 2948730d5c31SRichard Henderson * here and sort things out in the exception handler 2949730d5c31SRichard Henderson * (including possibly emulating an SG instruction). 2950730d5c31SRichard Henderson */ 2951730d5c31SRichard Henderson if (sattrs.ns != !secure) { 2952730d5c31SRichard Henderson if (sattrs.nsc) { 2953730d5c31SRichard Henderson fi->type = ARMFault_QEMU_NSCExec; 2954730d5c31SRichard Henderson } else { 2955730d5c31SRichard Henderson fi->type = ARMFault_QEMU_SFault; 2956730d5c31SRichard Henderson } 29577fa7ea8fSRichard Henderson result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; 29587fa7ea8fSRichard Henderson result->f.phys_addr = address; 29597fa7ea8fSRichard Henderson result->f.prot = 0; 2960730d5c31SRichard Henderson return true; 2961730d5c31SRichard Henderson } 2962730d5c31SRichard Henderson } else { 2963730d5c31SRichard Henderson /* 2964730d5c31SRichard Henderson * For data accesses we always use the MMU bank indicated 2965730d5c31SRichard Henderson * by the current CPU state, but the security attributes 2966730d5c31SRichard Henderson * might downgrade a secure access to nonsecure. 2967730d5c31SRichard Henderson */ 2968730d5c31SRichard Henderson if (sattrs.ns) { 29697fa7ea8fSRichard Henderson result->f.attrs.secure = false; 297090c66293SRichard Henderson result->f.attrs.space = ARMSS_NonSecure; 2971730d5c31SRichard Henderson } else if (!secure) { 2972730d5c31SRichard Henderson /* 2973730d5c31SRichard Henderson * NS access to S memory must fault. 2974730d5c31SRichard Henderson * Architecturally we should first check whether the 2975730d5c31SRichard Henderson * MPU information for this address indicates that we 2976730d5c31SRichard Henderson * are doing an unaligned access to Device memory, which 2977730d5c31SRichard Henderson * should generate a UsageFault instead. QEMU does not 2978730d5c31SRichard Henderson * currently check for that kind of unaligned access though. 2979730d5c31SRichard Henderson * If we added it we would need to do so as a special case 2980730d5c31SRichard Henderson * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 2981730d5c31SRichard Henderson */ 2982730d5c31SRichard Henderson fi->type = ARMFault_QEMU_SFault; 29837fa7ea8fSRichard Henderson result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; 29847fa7ea8fSRichard Henderson result->f.phys_addr = address; 29857fa7ea8fSRichard Henderson result->f.prot = 0; 2986730d5c31SRichard Henderson return true; 2987730d5c31SRichard Henderson } 2988730d5c31SRichard Henderson } 2989730d5c31SRichard Henderson } 2990730d5c31SRichard Henderson 2991e9fb7090SRichard Henderson ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure, 2992652c750eSRichard Henderson result, fi, NULL); 2993652c750eSRichard Henderson if (sattrs.subpage) { 29947fa7ea8fSRichard Henderson result->f.lg_page_size = 0; 2995652c750eSRichard Henderson } 2996730d5c31SRichard Henderson return ret; 2997730d5c31SRichard Henderson } 2998730d5c31SRichard Henderson 2999966f4bb7SRichard Henderson /* 3000966f4bb7SRichard Henderson * Translate from the 4-bit stage 2 representation of 3001966f4bb7SRichard Henderson * memory attributes (without cache-allocation hints) to 3002966f4bb7SRichard Henderson * the 8-bit representation of the stage 1 MAIR registers 3003966f4bb7SRichard Henderson * (which includes allocation hints). 3004966f4bb7SRichard Henderson * 3005966f4bb7SRichard Henderson * ref: shared/translation/attrs/S2AttrDecode() 3006966f4bb7SRichard Henderson * .../S2ConvertAttrsHints() 3007966f4bb7SRichard Henderson */ 3008ac76c2e5SRichard Henderson static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs) 3009966f4bb7SRichard Henderson { 3010966f4bb7SRichard Henderson uint8_t hiattr = extract32(s2attrs, 2, 2); 3011966f4bb7SRichard Henderson uint8_t loattr = extract32(s2attrs, 0, 2); 3012966f4bb7SRichard Henderson uint8_t hihint = 0, lohint = 0; 3013966f4bb7SRichard Henderson 3014966f4bb7SRichard Henderson if (hiattr != 0) { /* normal memory */ 3015ac76c2e5SRichard Henderson if (hcr & HCR_CD) { /* cache disabled */ 3016966f4bb7SRichard Henderson hiattr = loattr = 1; /* non-cacheable */ 3017966f4bb7SRichard Henderson } else { 3018966f4bb7SRichard Henderson if (hiattr != 1) { /* Write-through or write-back */ 3019966f4bb7SRichard Henderson hihint = 3; /* RW allocate */ 3020966f4bb7SRichard Henderson } 3021966f4bb7SRichard Henderson if (loattr != 1) { /* Write-through or write-back */ 3022966f4bb7SRichard Henderson lohint = 3; /* RW allocate */ 3023966f4bb7SRichard Henderson } 3024966f4bb7SRichard Henderson } 3025966f4bb7SRichard Henderson } 3026966f4bb7SRichard Henderson 3027966f4bb7SRichard Henderson return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 3028966f4bb7SRichard Henderson } 3029966f4bb7SRichard Henderson 3030966f4bb7SRichard Henderson /* 3031966f4bb7SRichard Henderson * Combine either inner or outer cacheability attributes for normal 3032966f4bb7SRichard Henderson * memory, according to table D4-42 and pseudocode procedure 3033966f4bb7SRichard Henderson * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 3034966f4bb7SRichard Henderson * 3035966f4bb7SRichard Henderson * NB: only stage 1 includes allocation hints (RW bits), leading to 3036966f4bb7SRichard Henderson * some asymmetry. 3037966f4bb7SRichard Henderson */ 3038966f4bb7SRichard Henderson static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 3039966f4bb7SRichard Henderson { 3040966f4bb7SRichard Henderson if (s1 == 4 || s2 == 4) { 3041966f4bb7SRichard Henderson /* non-cacheable has precedence */ 3042966f4bb7SRichard Henderson return 4; 3043966f4bb7SRichard Henderson } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 3044966f4bb7SRichard Henderson /* stage 1 write-through takes precedence */ 3045966f4bb7SRichard Henderson return s1; 3046966f4bb7SRichard Henderson } else if (extract32(s2, 2, 2) == 2) { 3047966f4bb7SRichard Henderson /* stage 2 write-through takes precedence, but the allocation hint 3048966f4bb7SRichard Henderson * is still taken from stage 1 3049966f4bb7SRichard Henderson */ 3050966f4bb7SRichard Henderson return (2 << 2) | extract32(s1, 0, 2); 3051966f4bb7SRichard Henderson } else { /* write-back */ 3052966f4bb7SRichard Henderson return s1; 3053966f4bb7SRichard Henderson } 3054966f4bb7SRichard Henderson } 3055966f4bb7SRichard Henderson 3056966f4bb7SRichard Henderson /* 3057966f4bb7SRichard Henderson * Combine the memory type and cacheability attributes of 3058966f4bb7SRichard Henderson * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the 3059966f4bb7SRichard Henderson * combined attributes in MAIR_EL1 format. 3060966f4bb7SRichard Henderson */ 3061ac76c2e5SRichard Henderson static uint8_t combined_attrs_nofwb(uint64_t hcr, 3062966f4bb7SRichard Henderson ARMCacheAttrs s1, ARMCacheAttrs s2) 3063966f4bb7SRichard Henderson { 3064966f4bb7SRichard Henderson uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs; 3065966f4bb7SRichard Henderson 3066faa1451eSTobias Röhmel if (s2.is_s2_format) { 3067ac76c2e5SRichard Henderson s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs); 3068faa1451eSTobias Röhmel } else { 3069faa1451eSTobias Röhmel s2_mair_attrs = s2.attrs; 3070faa1451eSTobias Röhmel } 3071966f4bb7SRichard Henderson 3072966f4bb7SRichard Henderson s1lo = extract32(s1.attrs, 0, 4); 3073966f4bb7SRichard Henderson s2lo = extract32(s2_mair_attrs, 0, 4); 3074966f4bb7SRichard Henderson s1hi = extract32(s1.attrs, 4, 4); 3075966f4bb7SRichard Henderson s2hi = extract32(s2_mair_attrs, 4, 4); 3076966f4bb7SRichard Henderson 3077966f4bb7SRichard Henderson /* Combine memory type and cacheability attributes */ 3078966f4bb7SRichard Henderson if (s1hi == 0 || s2hi == 0) { 3079966f4bb7SRichard Henderson /* Device has precedence over normal */ 3080966f4bb7SRichard Henderson if (s1lo == 0 || s2lo == 0) { 3081966f4bb7SRichard Henderson /* nGnRnE has precedence over anything */ 3082966f4bb7SRichard Henderson ret_attrs = 0; 3083966f4bb7SRichard Henderson } else if (s1lo == 4 || s2lo == 4) { 3084966f4bb7SRichard Henderson /* non-Reordering has precedence over Reordering */ 3085966f4bb7SRichard Henderson ret_attrs = 4; /* nGnRE */ 3086966f4bb7SRichard Henderson } else if (s1lo == 8 || s2lo == 8) { 3087966f4bb7SRichard Henderson /* non-Gathering has precedence over Gathering */ 3088966f4bb7SRichard Henderson ret_attrs = 8; /* nGRE */ 3089966f4bb7SRichard Henderson } else { 3090966f4bb7SRichard Henderson ret_attrs = 0xc; /* GRE */ 3091966f4bb7SRichard Henderson } 3092966f4bb7SRichard Henderson } else { /* Normal memory */ 3093966f4bb7SRichard Henderson /* Outer/inner cacheability combine independently */ 3094966f4bb7SRichard Henderson ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 3095966f4bb7SRichard Henderson | combine_cacheattr_nibble(s1lo, s2lo); 3096966f4bb7SRichard Henderson } 3097966f4bb7SRichard Henderson return ret_attrs; 3098966f4bb7SRichard Henderson } 3099966f4bb7SRichard Henderson 3100966f4bb7SRichard Henderson static uint8_t force_cacheattr_nibble_wb(uint8_t attr) 3101966f4bb7SRichard Henderson { 3102966f4bb7SRichard Henderson /* 3103966f4bb7SRichard Henderson * Given the 4 bits specifying the outer or inner cacheability 3104966f4bb7SRichard Henderson * in MAIR format, return a value specifying Normal Write-Back, 3105966f4bb7SRichard Henderson * with the allocation and transient hints taken from the input 3106966f4bb7SRichard Henderson * if the input specified some kind of cacheable attribute. 3107966f4bb7SRichard Henderson */ 3108966f4bb7SRichard Henderson if (attr == 0 || attr == 4) { 3109966f4bb7SRichard Henderson /* 3110966f4bb7SRichard Henderson * 0 == an UNPREDICTABLE encoding 3111966f4bb7SRichard Henderson * 4 == Non-cacheable 3112966f4bb7SRichard Henderson * Either way, force Write-Back RW allocate non-transient 3113966f4bb7SRichard Henderson */ 3114966f4bb7SRichard Henderson return 0xf; 3115966f4bb7SRichard Henderson } 3116966f4bb7SRichard Henderson /* Change WriteThrough to WriteBack, keep allocation and transient hints */ 3117966f4bb7SRichard Henderson return attr | 4; 3118966f4bb7SRichard Henderson } 3119966f4bb7SRichard Henderson 3120966f4bb7SRichard Henderson /* 3121966f4bb7SRichard Henderson * Combine the memory type and cacheability attributes of 3122966f4bb7SRichard Henderson * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the 3123966f4bb7SRichard Henderson * combined attributes in MAIR_EL1 format. 3124966f4bb7SRichard Henderson */ 312572cef09cSRichard Henderson static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2) 3126966f4bb7SRichard Henderson { 3127faa1451eSTobias Röhmel assert(s2.is_s2_format && !s1.is_s2_format); 3128faa1451eSTobias Röhmel 3129966f4bb7SRichard Henderson switch (s2.attrs) { 3130966f4bb7SRichard Henderson case 7: 3131966f4bb7SRichard Henderson /* Use stage 1 attributes */ 3132966f4bb7SRichard Henderson return s1.attrs; 3133966f4bb7SRichard Henderson case 6: 3134966f4bb7SRichard Henderson /* 3135966f4bb7SRichard Henderson * Force Normal Write-Back. Note that if S1 is Normal cacheable 3136966f4bb7SRichard Henderson * then we take the allocation hints from it; otherwise it is 3137966f4bb7SRichard Henderson * RW allocate, non-transient. 3138966f4bb7SRichard Henderson */ 3139966f4bb7SRichard Henderson if ((s1.attrs & 0xf0) == 0) { 3140966f4bb7SRichard Henderson /* S1 is Device */ 3141966f4bb7SRichard Henderson return 0xff; 3142966f4bb7SRichard Henderson } 3143966f4bb7SRichard Henderson /* Need to check the Inner and Outer nibbles separately */ 3144966f4bb7SRichard Henderson return force_cacheattr_nibble_wb(s1.attrs & 0xf) | 3145966f4bb7SRichard Henderson force_cacheattr_nibble_wb(s1.attrs >> 4) << 4; 3146966f4bb7SRichard Henderson case 5: 3147966f4bb7SRichard Henderson /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */ 3148966f4bb7SRichard Henderson if ((s1.attrs & 0xf0) == 0) { 3149966f4bb7SRichard Henderson return s1.attrs; 3150966f4bb7SRichard Henderson } 3151966f4bb7SRichard Henderson return 0x44; 3152966f4bb7SRichard Henderson case 0 ... 3: 3153966f4bb7SRichard Henderson /* Force Device, of subtype specified by S2 */ 3154966f4bb7SRichard Henderson return s2.attrs << 2; 3155966f4bb7SRichard Henderson default: 3156966f4bb7SRichard Henderson /* 3157966f4bb7SRichard Henderson * RESERVED values (including RES0 descriptor bit [5] being nonzero); 3158966f4bb7SRichard Henderson * arbitrarily force Device. 3159966f4bb7SRichard Henderson */ 3160966f4bb7SRichard Henderson return 0; 3161966f4bb7SRichard Henderson } 3162966f4bb7SRichard Henderson } 3163966f4bb7SRichard Henderson 3164966f4bb7SRichard Henderson /* 3165966f4bb7SRichard Henderson * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 3166966f4bb7SRichard Henderson * and CombineS1S2Desc() 3167966f4bb7SRichard Henderson * 3168966f4bb7SRichard Henderson * @env: CPUARMState 3169966f4bb7SRichard Henderson * @s1: Attributes from stage 1 walk 3170966f4bb7SRichard Henderson * @s2: Attributes from stage 2 walk 3171966f4bb7SRichard Henderson */ 3172ac76c2e5SRichard Henderson static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, 3173966f4bb7SRichard Henderson ARMCacheAttrs s1, ARMCacheAttrs s2) 3174966f4bb7SRichard Henderson { 3175966f4bb7SRichard Henderson ARMCacheAttrs ret; 3176966f4bb7SRichard Henderson bool tagged = false; 3177966f4bb7SRichard Henderson 3178faa1451eSTobias Röhmel assert(!s1.is_s2_format); 3179966f4bb7SRichard Henderson ret.is_s2_format = false; 3180966f4bb7SRichard Henderson 3181966f4bb7SRichard Henderson if (s1.attrs == 0xf0) { 3182966f4bb7SRichard Henderson tagged = true; 3183966f4bb7SRichard Henderson s1.attrs = 0xff; 3184966f4bb7SRichard Henderson } 3185966f4bb7SRichard Henderson 3186966f4bb7SRichard Henderson /* Combine shareability attributes (table D4-43) */ 3187966f4bb7SRichard Henderson if (s1.shareability == 2 || s2.shareability == 2) { 3188966f4bb7SRichard Henderson /* if either are outer-shareable, the result is outer-shareable */ 3189966f4bb7SRichard Henderson ret.shareability = 2; 3190966f4bb7SRichard Henderson } else if (s1.shareability == 3 || s2.shareability == 3) { 3191966f4bb7SRichard Henderson /* if either are inner-shareable, the result is inner-shareable */ 3192966f4bb7SRichard Henderson ret.shareability = 3; 3193966f4bb7SRichard Henderson } else { 3194966f4bb7SRichard Henderson /* both non-shareable */ 3195966f4bb7SRichard Henderson ret.shareability = 0; 3196966f4bb7SRichard Henderson } 3197966f4bb7SRichard Henderson 3198966f4bb7SRichard Henderson /* Combine memory type and cacheability attributes */ 3199ac76c2e5SRichard Henderson if (hcr & HCR_FWB) { 320072cef09cSRichard Henderson ret.attrs = combined_attrs_fwb(s1, s2); 3201966f4bb7SRichard Henderson } else { 3202ac76c2e5SRichard Henderson ret.attrs = combined_attrs_nofwb(hcr, s1, s2); 3203966f4bb7SRichard Henderson } 3204966f4bb7SRichard Henderson 3205966f4bb7SRichard Henderson /* 3206966f4bb7SRichard Henderson * Any location for which the resultant memory type is any 3207966f4bb7SRichard Henderson * type of Device memory is always treated as Outer Shareable. 3208966f4bb7SRichard Henderson * Any location for which the resultant memory type is Normal 3209966f4bb7SRichard Henderson * Inner Non-cacheable, Outer Non-cacheable is always treated 3210966f4bb7SRichard Henderson * as Outer Shareable. 3211966f4bb7SRichard Henderson * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC 3212966f4bb7SRichard Henderson */ 3213966f4bb7SRichard Henderson if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) { 3214966f4bb7SRichard Henderson ret.shareability = 2; 3215966f4bb7SRichard Henderson } 3216966f4bb7SRichard Henderson 3217966f4bb7SRichard Henderson /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */ 3218966f4bb7SRichard Henderson if (tagged && ret.attrs == 0xff) { 3219966f4bb7SRichard Henderson ret.attrs = 0xf0; 3220966f4bb7SRichard Henderson } 3221966f4bb7SRichard Henderson 3222966f4bb7SRichard Henderson return ret; 3223966f4bb7SRichard Henderson } 3224966f4bb7SRichard Henderson 3225448e42fdSRichard Henderson /* 3226448e42fdSRichard Henderson * MMU disabled. S1 addresses within aa64 translation regimes are 3227448e42fdSRichard Henderson * still checked for bounds -- see AArch64.S1DisabledOutput(). 3228448e42fdSRichard Henderson */ 3229a5637becSPeter Maydell static bool get_phys_addr_disabled(CPUARMState *env, 3230a5637becSPeter Maydell S1Translate *ptw, 323167d762e7SArd Biesheuvel vaddr address, 3232448e42fdSRichard Henderson MMUAccessType access_type, 3233448e42fdSRichard Henderson GetPhysAddrResult *result, 3234448e42fdSRichard Henderson ARMMMUFaultInfo *fi) 3235448e42fdSRichard Henderson { 3236a5637becSPeter Maydell ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 32375b74f9b4SRichard Henderson uint8_t memattr = 0x00; /* Device nGnRnE */ 323846f38c97SRichard Henderson uint8_t shareability = 0; /* non-shareable */ 3239a1ce3084SRichard Henderson int r_el; 3240448e42fdSRichard Henderson 3241a1ce3084SRichard Henderson switch (mmu_idx) { 3242a1ce3084SRichard Henderson case ARMMMUIdx_Stage2: 3243a1ce3084SRichard Henderson case ARMMMUIdx_Stage2_S: 3244a1ce3084SRichard Henderson case ARMMMUIdx_Phys_S: 3245bb5cc2c8SRichard Henderson case ARMMMUIdx_Phys_NS: 3246bb5cc2c8SRichard Henderson case ARMMMUIdx_Phys_Root: 3247bb5cc2c8SRichard Henderson case ARMMMUIdx_Phys_Realm: 3248a1ce3084SRichard Henderson break; 32495b74f9b4SRichard Henderson 3250a1ce3084SRichard Henderson default: 3251a1ce3084SRichard Henderson r_el = regime_el(env, mmu_idx); 3252448e42fdSRichard Henderson if (arm_el_is_aa64(env, r_el)) { 3253448e42fdSRichard Henderson int pamax = arm_pamax(env_archcpu(env)); 3254448e42fdSRichard Henderson uint64_t tcr = env->cp15.tcr_el[r_el]; 3255448e42fdSRichard Henderson int addrtop, tbi; 3256448e42fdSRichard Henderson 3257448e42fdSRichard Henderson tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 3258448e42fdSRichard Henderson if (access_type == MMU_INST_FETCH) { 3259448e42fdSRichard Henderson tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 3260448e42fdSRichard Henderson } 3261448e42fdSRichard Henderson tbi = (tbi >> extract64(address, 55, 1)) & 1; 3262448e42fdSRichard Henderson addrtop = (tbi ? 55 : 63); 3263448e42fdSRichard Henderson 3264448e42fdSRichard Henderson if (extract64(address, pamax, addrtop - pamax + 1) != 0) { 3265448e42fdSRichard Henderson fi->type = ARMFault_AddressSize; 3266448e42fdSRichard Henderson fi->level = 0; 3267448e42fdSRichard Henderson fi->stage2 = false; 3268448e42fdSRichard Henderson return 1; 3269448e42fdSRichard Henderson } 3270448e42fdSRichard Henderson 3271448e42fdSRichard Henderson /* 3272448e42fdSRichard Henderson * When TBI is disabled, we've just validated that all of the 3273448e42fdSRichard Henderson * bits above PAMax are zero, so logically we only need to 3274448e42fdSRichard Henderson * clear the top byte for TBI. But it's clearer to follow 3275448e42fdSRichard Henderson * the pseudocode set of addrdesc.paddress. 3276448e42fdSRichard Henderson */ 3277448e42fdSRichard Henderson address = extract64(address, 0, 52); 3278448e42fdSRichard Henderson } 3279448e42fdSRichard Henderson 3280448e42fdSRichard Henderson /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ 32815b74f9b4SRichard Henderson if (r_el == 1) { 32822d12bb96SPeter Maydell uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); 3283448e42fdSRichard Henderson if (hcr & HCR_DC) { 3284448e42fdSRichard Henderson if (hcr & HCR_DCT) { 3285448e42fdSRichard Henderson memattr = 0xf0; /* Tagged, Normal, WB, RWA */ 3286448e42fdSRichard Henderson } else { 3287448e42fdSRichard Henderson memattr = 0xff; /* Normal, WB, RWA */ 3288448e42fdSRichard Henderson } 32895b74f9b4SRichard Henderson } 32905b74f9b4SRichard Henderson } 32913d9ca962SPeter Maydell if (memattr == 0) { 32923d9ca962SPeter Maydell if (access_type == MMU_INST_FETCH) { 3293448e42fdSRichard Henderson if (regime_sctlr(env, mmu_idx) & SCTLR_I) { 3294448e42fdSRichard Henderson memattr = 0xee; /* Normal, WT, RA, NT */ 3295448e42fdSRichard Henderson } else { 3296448e42fdSRichard Henderson memattr = 0x44; /* Normal, NC, No */ 3297448e42fdSRichard Henderson } 32983d9ca962SPeter Maydell } 329946f38c97SRichard Henderson shareability = 2; /* outer shareable */ 3300448e42fdSRichard Henderson } 33015b74f9b4SRichard Henderson result->cacheattrs.is_s2_format = false; 3302a1ce3084SRichard Henderson break; 33035b74f9b4SRichard Henderson } 33045b74f9b4SRichard Henderson 33057fa7ea8fSRichard Henderson result->f.phys_addr = address; 33067fa7ea8fSRichard Henderson result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 33077fa7ea8fSRichard Henderson result->f.lg_page_size = TARGET_PAGE_BITS; 33085b74f9b4SRichard Henderson result->cacheattrs.shareability = shareability; 3309448e42fdSRichard Henderson result->cacheattrs.attrs = memattr; 33106b72c542SRichard Henderson return false; 3311448e42fdSRichard Henderson } 3312448e42fdSRichard Henderson 33133f5a74c5SRichard Henderson static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, 331467d762e7SArd Biesheuvel vaddr address, 331521e5a287SRichard Henderson MMUAccessType access_type, MemOp memop, 33164a358556SRichard Henderson GetPhysAddrResult *result, 3317def8aa5bSRichard Henderson ARMMMUFaultInfo *fi) 33188ae08860SRichard Henderson { 33198ae08860SRichard Henderson hwaddr ipa; 3320c8d6c286SRichard Henderson int s1_prot, s1_lgpgsz; 3321eeb9578cSPeter Maydell ARMSecuritySpace in_space = ptw->in_space; 33224c09abeaSPeter Maydell bool ret, ipa_secure, s1_guarded; 3323de05a709SRichard Henderson ARMCacheAttrs cacheattrs1; 332490c66293SRichard Henderson ARMSecuritySpace ipa_space; 3325ac76c2e5SRichard Henderson uint64_t hcr; 33268ae08860SRichard Henderson 332721e5a287SRichard Henderson ret = get_phys_addr_nogpc(env, ptw, address, access_type, 332821e5a287SRichard Henderson memop, result, fi); 33298ae08860SRichard Henderson 333026ba00cfSPeter Maydell /* If S1 fails, return early. */ 333126ba00cfSPeter Maydell if (ret) { 33328ae08860SRichard Henderson return ret; 33338ae08860SRichard Henderson } 33348ae08860SRichard Henderson 33357fa7ea8fSRichard Henderson ipa = result->f.phys_addr; 33367fa7ea8fSRichard Henderson ipa_secure = result->f.attrs.secure; 333790c66293SRichard Henderson ipa_space = result->f.attrs.space; 33388ae08860SRichard Henderson 33397c19b2d6SRichard Henderson ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0; 3340fcc0b041SPeter Maydell ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; 334190c66293SRichard Henderson ptw->in_space = ipa_space; 3342fcc0b041SPeter Maydell ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx); 33438ae08860SRichard Henderson 3344de05a709SRichard Henderson /* 3345de05a709SRichard Henderson * S1 is done, now do S2 translation. 33463f5a74c5SRichard Henderson * Save the stage1 results so that we may merge prot and cacheattrs later. 3347de05a709SRichard Henderson */ 33487fa7ea8fSRichard Henderson s1_prot = result->f.prot; 3349c8d6c286SRichard Henderson s1_lgpgsz = result->f.lg_page_size; 33504c09abeaSPeter Maydell s1_guarded = result->f.extra.arm.guarded; 3351de05a709SRichard Henderson cacheattrs1 = result->cacheattrs; 3352de05a709SRichard Henderson memset(result, 0, sizeof(*result)); 3353de05a709SRichard Henderson 335421e5a287SRichard Henderson ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, 335521e5a287SRichard Henderson memop, result, fi); 33568ae08860SRichard Henderson fi->s2addr = ipa; 3357de05a709SRichard Henderson 33588ae08860SRichard Henderson /* Combine the S1 and S2 perms. */ 33597fa7ea8fSRichard Henderson result->f.prot &= s1_prot; 33608ae08860SRichard Henderson 33618ae08860SRichard Henderson /* If S2 fails, return early. */ 33628ae08860SRichard Henderson if (ret) { 33638ae08860SRichard Henderson return ret; 33648ae08860SRichard Henderson } 33658ae08860SRichard Henderson 3366c8d6c286SRichard Henderson /* 33679e65f4e6SPeter Maydell * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE, 33689e65f4e6SPeter Maydell * this means "don't put this in the TLB"; in this case, return a 33699e65f4e6SPeter Maydell * result with lg_page_size == 0 to achieve that. Otherwise, 33709e65f4e6SPeter Maydell * use the maximum of the S1 & S2 page size, so that invalidation 33719e65f4e6SPeter Maydell * of pages > TARGET_PAGE_SIZE works correctly. (This works even though 33729e65f4e6SPeter Maydell * we know the combined result permissions etc only cover the minimum 33739e65f4e6SPeter Maydell * of the S1 and S2 page size, because we know that the common TLB code 33749e65f4e6SPeter Maydell * never actually creates TLB entries bigger than TARGET_PAGE_SIZE, 33759e65f4e6SPeter Maydell * and passing a larger page size value only affects invalidations.) 3376c8d6c286SRichard Henderson */ 33779e65f4e6SPeter Maydell if (result->f.lg_page_size < TARGET_PAGE_BITS || 33789e65f4e6SPeter Maydell s1_lgpgsz < TARGET_PAGE_BITS) { 33799e65f4e6SPeter Maydell result->f.lg_page_size = 0; 33809e65f4e6SPeter Maydell } else if (result->f.lg_page_size < s1_lgpgsz) { 3381c8d6c286SRichard Henderson result->f.lg_page_size = s1_lgpgsz; 3382c8d6c286SRichard Henderson } 3383c8d6c286SRichard Henderson 33848ae08860SRichard Henderson /* Combine the S1 and S2 cache attributes. */ 33852d12bb96SPeter Maydell hcr = arm_hcr_el2_eff_secstate(env, in_space); 3386ac76c2e5SRichard Henderson if (hcr & HCR_DC) { 33878ae08860SRichard Henderson /* 33888ae08860SRichard Henderson * HCR.DC forces the first stage attributes to 33898ae08860SRichard Henderson * Normal Non-Shareable, 33908ae08860SRichard Henderson * Inner Write-Back Read-Allocate Write-Allocate, 33918ae08860SRichard Henderson * Outer Write-Back Read-Allocate Write-Allocate. 33928ae08860SRichard Henderson * Do not overwrite Tagged within attrs. 33938ae08860SRichard Henderson */ 3394de05a709SRichard Henderson if (cacheattrs1.attrs != 0xf0) { 3395de05a709SRichard Henderson cacheattrs1.attrs = 0xff; 33968ae08860SRichard Henderson } 3397de05a709SRichard Henderson cacheattrs1.shareability = 0; 33988ae08860SRichard Henderson } 3399ac76c2e5SRichard Henderson result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1, 3400de05a709SRichard Henderson result->cacheattrs); 34018ae08860SRichard Henderson 34024c09abeaSPeter Maydell /* No BTI GP information in stage 2, we just use the S1 value */ 34034c09abeaSPeter Maydell result->f.extra.arm.guarded = s1_guarded; 34044c09abeaSPeter Maydell 34059b5ba97aSRichard Henderson /* 34069b5ba97aSRichard Henderson * Check if IPA translates to secure or non-secure PA space. 34079b5ba97aSRichard Henderson * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. 34089b5ba97aSRichard Henderson */ 3409eeb9578cSPeter Maydell if (in_space == ARMSS_Secure) { 34107fa7ea8fSRichard Henderson result->f.attrs.secure = 3411eeb9578cSPeter Maydell !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) 34129b5ba97aSRichard Henderson && (ipa_secure 3413eeb9578cSPeter Maydell || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); 3414eeb9578cSPeter Maydell result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); 3415eeb9578cSPeter Maydell } 34169b5ba97aSRichard Henderson 34176b72c542SRichard Henderson return false; 34183f5a74c5SRichard Henderson } 34193f5a74c5SRichard Henderson 342046f38c97SRichard Henderson static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, 342167d762e7SArd Biesheuvel vaddr address, 3422c6cd9f9fSRichard Henderson MMUAccessType access_type, MemOp memop, 34233f5a74c5SRichard Henderson GetPhysAddrResult *result, 34243f5a74c5SRichard Henderson ARMMMUFaultInfo *fi) 34253f5a74c5SRichard Henderson { 34263f5a74c5SRichard Henderson ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 342748da29e4SRichard Henderson ARMMMUIdx s1_mmu_idx; 34283f5a74c5SRichard Henderson 3429cead7fa4SRichard Henderson /* 343090c66293SRichard Henderson * The page table entries may downgrade Secure to NonSecure, but 343190c66293SRichard Henderson * cannot upgrade a NonSecure translation regime's attributes 343290c66293SRichard Henderson * to Secure or Realm. 3433cead7fa4SRichard Henderson */ 343490c66293SRichard Henderson result->f.attrs.space = ptw->in_space; 3435cdbae5e7SPeter Maydell result->f.attrs.secure = arm_space_is_secure(ptw->in_space); 3436cead7fa4SRichard Henderson 343748da29e4SRichard Henderson switch (mmu_idx) { 343848da29e4SRichard Henderson case ARMMMUIdx_Phys_S: 343948da29e4SRichard Henderson case ARMMMUIdx_Phys_NS: 3440bb5cc2c8SRichard Henderson case ARMMMUIdx_Phys_Root: 3441bb5cc2c8SRichard Henderson case ARMMMUIdx_Phys_Realm: 344248da29e4SRichard Henderson /* Checking Phys early avoids special casing later vs regime_el. */ 3443a5637becSPeter Maydell return get_phys_addr_disabled(env, ptw, address, access_type, 3444a5637becSPeter Maydell result, fi); 344548da29e4SRichard Henderson 344648da29e4SRichard Henderson case ARMMMUIdx_Stage1_E0: 344748da29e4SRichard Henderson case ARMMMUIdx_Stage1_E1: 344848da29e4SRichard Henderson case ARMMMUIdx_Stage1_E1_PAN: 3449cdbae5e7SPeter Maydell /* 3450cdbae5e7SPeter Maydell * First stage lookup uses second stage for ptw; only 3451cdbae5e7SPeter Maydell * Secure has both S and NS IPA and starts with Stage2_S. 3452cdbae5e7SPeter Maydell */ 3453cdbae5e7SPeter Maydell ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ? 3454cdbae5e7SPeter Maydell ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; 345548da29e4SRichard Henderson break; 345648da29e4SRichard Henderson 3457fcc0b041SPeter Maydell case ARMMMUIdx_Stage2: 3458fcc0b041SPeter Maydell case ARMMMUIdx_Stage2_S: 3459fcc0b041SPeter Maydell /* 3460fcc0b041SPeter Maydell * Second stage lookup uses physical for ptw; whether this is S or 3461fcc0b041SPeter Maydell * NS may depend on the SW/NSW bits if this is a stage 2 lookup for 3462fcc0b041SPeter Maydell * the Secure EL2&0 regime. 3463fcc0b041SPeter Maydell */ 3464fcc0b041SPeter Maydell ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx); 3465fcc0b041SPeter Maydell break; 3466fcc0b041SPeter Maydell 346748da29e4SRichard Henderson case ARMMMUIdx_E10_0: 346848da29e4SRichard Henderson s1_mmu_idx = ARMMMUIdx_Stage1_E0; 346948da29e4SRichard Henderson goto do_twostage; 347048da29e4SRichard Henderson case ARMMMUIdx_E10_1: 347148da29e4SRichard Henderson s1_mmu_idx = ARMMMUIdx_Stage1_E1; 347248da29e4SRichard Henderson goto do_twostage; 347348da29e4SRichard Henderson case ARMMMUIdx_E10_1_PAN: 347448da29e4SRichard Henderson s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN; 347548da29e4SRichard Henderson do_twostage: 34768ae08860SRichard Henderson /* 34773f5a74c5SRichard Henderson * Call ourselves recursively to do the stage 1 and then stage 2 34783f5a74c5SRichard Henderson * translations if mmu_idx is a two-stage regime, and EL2 present. 34793f5a74c5SRichard Henderson * Otherwise, a stage1+stage2 translation is just stage 1. 34808ae08860SRichard Henderson */ 34813f5a74c5SRichard Henderson ptw->in_mmu_idx = mmu_idx = s1_mmu_idx; 348226ba00cfSPeter Maydell if (arm_feature(env, ARM_FEATURE_EL2) && 3483d1289140SPeter Maydell !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) { 34843f5a74c5SRichard Henderson return get_phys_addr_twostage(env, ptw, address, access_type, 348521e5a287SRichard Henderson memop, result, fi); 34868ae08860SRichard Henderson } 348748da29e4SRichard Henderson /* fall through */ 348848da29e4SRichard Henderson 348948da29e4SRichard Henderson default: 3490fcc0b041SPeter Maydell /* Single stage uses physical for ptw. */ 349190c66293SRichard Henderson ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space); 349248da29e4SRichard Henderson break; 34938ae08860SRichard Henderson } 34948ae08860SRichard Henderson 34957fa7ea8fSRichard Henderson result->f.attrs.user = regime_is_user(env, mmu_idx); 34968ae08860SRichard Henderson 34978ae08860SRichard Henderson /* 34988ae08860SRichard Henderson * Fast Context Switch Extension. This doesn't exist at all in v8. 34998ae08860SRichard Henderson * In v7 and earlier it affects all stage 1 translations. 35008ae08860SRichard Henderson */ 35018ae08860SRichard Henderson if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 35028ae08860SRichard Henderson && !arm_feature(env, ARM_FEATURE_V8)) { 35038ae08860SRichard Henderson if (regime_el(env, mmu_idx) == 3) { 35048ae08860SRichard Henderson address += env->cp15.fcseidr_s; 35058ae08860SRichard Henderson } else { 35068ae08860SRichard Henderson address += env->cp15.fcseidr_ns; 35078ae08860SRichard Henderson } 35088ae08860SRichard Henderson } 35098ae08860SRichard Henderson 35108ae08860SRichard Henderson if (arm_feature(env, ARM_FEATURE_PMSA)) { 35118ae08860SRichard Henderson bool ret; 35127fa7ea8fSRichard Henderson result->f.lg_page_size = TARGET_PAGE_BITS; 35138ae08860SRichard Henderson 35148ae08860SRichard Henderson if (arm_feature(env, ARM_FEATURE_V8)) { 35158ae08860SRichard Henderson /* PMSAv8 */ 3516a5637becSPeter Maydell ret = get_phys_addr_pmsav8(env, ptw, address, access_type, 3517a5637becSPeter Maydell result, fi); 35188ae08860SRichard Henderson } else if (arm_feature(env, ARM_FEATURE_V7)) { 35198ae08860SRichard Henderson /* PMSAv7 */ 3520a5637becSPeter Maydell ret = get_phys_addr_pmsav7(env, ptw, address, access_type, 3521a5637becSPeter Maydell result, fi); 35228ae08860SRichard Henderson } else { 35238ae08860SRichard Henderson /* Pre-v7 MPU */ 3524a5637becSPeter Maydell ret = get_phys_addr_pmsav5(env, ptw, address, access_type, 3525a5637becSPeter Maydell result, fi); 35268ae08860SRichard Henderson } 35278ae08860SRichard Henderson qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 35288ae08860SRichard Henderson " mmu_idx %u -> %s (prot %c%c%c)\n", 35298ae08860SRichard Henderson access_type == MMU_DATA_LOAD ? "reading" : 35308ae08860SRichard Henderson (access_type == MMU_DATA_STORE ? "writing" : "execute"), 35318ae08860SRichard Henderson (uint32_t)address, mmu_idx, 35328ae08860SRichard Henderson ret ? "Miss" : "Hit", 35337fa7ea8fSRichard Henderson result->f.prot & PAGE_READ ? 'r' : '-', 35347fa7ea8fSRichard Henderson result->f.prot & PAGE_WRITE ? 'w' : '-', 35357fa7ea8fSRichard Henderson result->f.prot & PAGE_EXEC ? 'x' : '-'); 35368ae08860SRichard Henderson 35378ae08860SRichard Henderson return ret; 35388ae08860SRichard Henderson } 35398ae08860SRichard Henderson 35408ae08860SRichard Henderson /* Definitely a real MMU, not an MPU */ 35418ae08860SRichard Henderson 3542d1289140SPeter Maydell if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { 3543a5637becSPeter Maydell return get_phys_addr_disabled(env, ptw, address, access_type, 3544a5637becSPeter Maydell result, fi); 35458ae08860SRichard Henderson } 35466d2654ffSRichard Henderson 35478ae08860SRichard Henderson if (regime_using_lpae_format(env, mmu_idx)) { 3548c053f40bSRichard Henderson return get_phys_addr_lpae(env, ptw, address, access_type, 3549c053f40bSRichard Henderson memop, result, fi); 35506f2d9d74STimofey Kutergin } else if (arm_feature(env, ARM_FEATURE_V7) || 35516f2d9d74STimofey Kutergin regime_sctlr(env, mmu_idx) & SCTLR_XP) { 35524a358556SRichard Henderson return get_phys_addr_v6(env, ptw, address, access_type, result, fi); 35538ae08860SRichard Henderson } else { 35544a358556SRichard Henderson return get_phys_addr_v5(env, ptw, address, access_type, result, fi); 35558ae08860SRichard Henderson } 35568ae08860SRichard Henderson } 355723971205SRichard Henderson 355846f38c97SRichard Henderson static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, 355967d762e7SArd Biesheuvel vaddr address, 35605458670bSRichard Henderson MMUAccessType access_type, MemOp memop, 356146f38c97SRichard Henderson GetPhysAddrResult *result, 356246f38c97SRichard Henderson ARMMMUFaultInfo *fi) 356346f38c97SRichard Henderson { 3564c6cd9f9fSRichard Henderson if (get_phys_addr_nogpc(env, ptw, address, access_type, 3565c6cd9f9fSRichard Henderson memop, result, fi)) { 356646f38c97SRichard Henderson return true; 356746f38c97SRichard Henderson } 356846f38c97SRichard Henderson if (!granule_protection_check(env, result->f.phys_addr, 356946f38c97SRichard Henderson result->f.attrs.space, fi)) { 357046f38c97SRichard Henderson fi->type = ARMFault_GPCFOnOutput; 357146f38c97SRichard Henderson return true; 357246f38c97SRichard Henderson } 357346f38c97SRichard Henderson return false; 357446f38c97SRichard Henderson } 357546f38c97SRichard Henderson 357667d762e7SArd Biesheuvel bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 357729b4d7dbSRichard Henderson MMUAccessType access_type, MemOp memop, 3578e1ee56ecSJean-Philippe Brucker ARMMMUIdx mmu_idx, ARMSecuritySpace space, 3579f1269a98SJean-Philippe Brucker GetPhysAddrResult *result, 35804a358556SRichard Henderson ARMMMUFaultInfo *fi) 35814a358556SRichard Henderson { 35824a358556SRichard Henderson S1Translate ptw = { 35834a358556SRichard Henderson .in_mmu_idx = mmu_idx, 3584e1ee56ecSJean-Philippe Brucker .in_space = space, 35854a358556SRichard Henderson }; 3586c6cd9f9fSRichard Henderson return get_phys_addr_nogpc(env, &ptw, address, access_type, 3587c6cd9f9fSRichard Henderson memop, result, fi); 35884a358556SRichard Henderson } 35894a358556SRichard Henderson 359067d762e7SArd Biesheuvel bool get_phys_addr(CPUARMState *env, vaddr address, 3591ec2c9337SRichard Henderson MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 3592def8aa5bSRichard Henderson GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 3593def8aa5bSRichard Henderson { 359490c66293SRichard Henderson S1Translate ptw = { 359590c66293SRichard Henderson .in_mmu_idx = mmu_idx, 359690c66293SRichard Henderson }; 359790c66293SRichard Henderson ARMSecuritySpace ss; 359803bea66eSRichard Henderson 359903bea66eSRichard Henderson switch (mmu_idx) { 360003bea66eSRichard Henderson case ARMMMUIdx_E10_0: 360103bea66eSRichard Henderson case ARMMMUIdx_E10_1: 360203bea66eSRichard Henderson case ARMMMUIdx_E10_1_PAN: 360303bea66eSRichard Henderson case ARMMMUIdx_E20_0: 360403bea66eSRichard Henderson case ARMMMUIdx_E20_2: 360503bea66eSRichard Henderson case ARMMMUIdx_E20_2_PAN: 360603bea66eSRichard Henderson case ARMMMUIdx_Stage1_E0: 360703bea66eSRichard Henderson case ARMMMUIdx_Stage1_E1: 360803bea66eSRichard Henderson case ARMMMUIdx_Stage1_E1_PAN: 360903bea66eSRichard Henderson case ARMMMUIdx_E2: 36104c2c0474SPeter Maydell if (arm_aa32_secure_pl1_0(env)) { 36114c2c0474SPeter Maydell ss = ARMSS_Secure; 36124c2c0474SPeter Maydell } else { 361390c66293SRichard Henderson ss = arm_security_space_below_el3(env); 36144c2c0474SPeter Maydell } 3615d902ae75SRichard Henderson break; 361603bea66eSRichard Henderson case ARMMMUIdx_Stage2: 361790c66293SRichard Henderson /* 361890c66293SRichard Henderson * For Secure EL2, we need this index to be NonSecure; 361990c66293SRichard Henderson * otherwise this will already be NonSecure or Realm. 362090c66293SRichard Henderson */ 362190c66293SRichard Henderson ss = arm_security_space_below_el3(env); 362290c66293SRichard Henderson if (ss == ARMSS_Secure) { 362390c66293SRichard Henderson ss = ARMSS_NonSecure; 362490c66293SRichard Henderson } 362590c66293SRichard Henderson break; 3626a1ce3084SRichard Henderson case ARMMMUIdx_Phys_NS: 362703bea66eSRichard Henderson case ARMMMUIdx_MPrivNegPri: 362803bea66eSRichard Henderson case ARMMMUIdx_MUserNegPri: 362903bea66eSRichard Henderson case ARMMMUIdx_MPriv: 363003bea66eSRichard Henderson case ARMMMUIdx_MUser: 363190c66293SRichard Henderson ss = ARMSS_NonSecure; 363203bea66eSRichard Henderson break; 363303bea66eSRichard Henderson case ARMMMUIdx_Stage2_S: 3634a1ce3084SRichard Henderson case ARMMMUIdx_Phys_S: 363503bea66eSRichard Henderson case ARMMMUIdx_MSPrivNegPri: 363603bea66eSRichard Henderson case ARMMMUIdx_MSUserNegPri: 363703bea66eSRichard Henderson case ARMMMUIdx_MSPriv: 363803bea66eSRichard Henderson case ARMMMUIdx_MSUser: 363990c66293SRichard Henderson ss = ARMSS_Secure; 364090c66293SRichard Henderson break; 364190c66293SRichard Henderson case ARMMMUIdx_E3: 364290c66293SRichard Henderson if (arm_feature(env, ARM_FEATURE_AARCH64) && 364390c66293SRichard Henderson cpu_isar_feature(aa64_rme, env_archcpu(env))) { 364490c66293SRichard Henderson ss = ARMSS_Root; 364590c66293SRichard Henderson } else { 364690c66293SRichard Henderson ss = ARMSS_Secure; 364790c66293SRichard Henderson } 364890c66293SRichard Henderson break; 364990c66293SRichard Henderson case ARMMMUIdx_Phys_Root: 365090c66293SRichard Henderson ss = ARMSS_Root; 365190c66293SRichard Henderson break; 365290c66293SRichard Henderson case ARMMMUIdx_Phys_Realm: 365390c66293SRichard Henderson ss = ARMSS_Realm; 365403bea66eSRichard Henderson break; 365503bea66eSRichard Henderson default: 365603bea66eSRichard Henderson g_assert_not_reached(); 365703bea66eSRichard Henderson } 365890c66293SRichard Henderson 365990c66293SRichard Henderson ptw.in_space = ss; 36605458670bSRichard Henderson return get_phys_addr_gpc(env, &ptw, address, access_type, 36615458670bSRichard Henderson memop, result, fi); 3662def8aa5bSRichard Henderson } 3663def8aa5bSRichard Henderson 366423971205SRichard Henderson hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 366523971205SRichard Henderson MemTxAttrs *attrs) 366623971205SRichard Henderson { 366723971205SRichard Henderson ARMCPU *cpu = ARM_CPU(cs); 366823971205SRichard Henderson CPUARMState *env = &cpu->env; 366990c66293SRichard Henderson ARMMMUIdx mmu_idx = arm_mmu_idx(env); 367090c66293SRichard Henderson ARMSecuritySpace ss = arm_security_space(env); 36714a358556SRichard Henderson S1Translate ptw = { 367290c66293SRichard Henderson .in_mmu_idx = mmu_idx, 367390c66293SRichard Henderson .in_space = ss, 36744a358556SRichard Henderson .in_debug = true, 36754a358556SRichard Henderson }; 3676de05a709SRichard Henderson GetPhysAddrResult res = {}; 367723971205SRichard Henderson ARMMMUFaultInfo fi = {}; 3678de05a709SRichard Henderson bool ret; 367923971205SRichard Henderson 36805458670bSRichard Henderson ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi); 36817fa7ea8fSRichard Henderson *attrs = res.f.attrs; 368223971205SRichard Henderson 368323971205SRichard Henderson if (ret) { 368423971205SRichard Henderson return -1; 368523971205SRichard Henderson } 36867fa7ea8fSRichard Henderson return res.f.phys_addr; 368723971205SRichard Henderson } 3688