1 /* 2 * ARM page table walking. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "qemu/range.h" 12 #include "qemu/main-loop.h" 13 #include "exec/page-protection.h" 14 #include "exec/target_page.h" 15 #include "exec/tlb-flags.h" 16 #include "accel/tcg/probe.h" 17 #include "cpu.h" 18 #include "internals.h" 19 #include "cpu-features.h" 20 #include "idau.h" 21 22 typedef struct S1Translate { 23 /* 24 * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk. 25 * Together with in_space, specifies the architectural translation regime. 26 */ 27 ARMMMUIdx in_mmu_idx; 28 /* 29 * in_ptw_idx: specifies which mmuidx to use for the actual 30 * page table descriptor load operations. This will be one of the 31 * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes. 32 * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, 33 * this field is updated accordingly. 34 */ 35 ARMMMUIdx in_ptw_idx; 36 /* 37 * in_space: the security space for this walk. This plus 38 * the in_mmu_idx specify the architectural translation regime. 39 * 40 * Note that the security space for the in_ptw_idx may be different 41 * from that for the in_mmu_idx. We do not need to explicitly track 42 * the in_ptw_idx security space because: 43 * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx 44 * itself specifies the security space 45 * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security 46 * space used for ptw reads is the same as that of the security 47 * space of the stage 1 translation for all cases except where 48 * stage 1 is Secure; in that case the only possibilities for 49 * the ptw read are Secure and NonSecure, and the in_ptw_idx 50 * value being Stage2 vs Stage2_S distinguishes those. 51 */ 52 ARMSecuritySpace in_space; 53 /* 54 * Like in_space, except this may be "downgraded" to NonSecure 55 * by an NSTable bit. 56 */ 57 ARMSecuritySpace cur_space; 58 /* 59 * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug 60 * accesses will not update the guest page table access flags 61 * and will not change the state of the softmmu TLBs. 62 */ 63 bool in_debug; 64 /* 65 * in_at: is this AccessType_AT? 66 * This is also set for debug, because at heart that is also 67 * an address translation, and simplifies a test. 68 */ 69 bool in_at; 70 /* 71 * If this is stage 2 of a stage 1+2 page table walk, then this must 72 * be true if stage 1 is an EL0 access; otherwise this is ignored. 73 * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}. 74 */ 75 bool in_s1_is_el0; 76 /* 77 * The set of PAGE_* bits to be use in the permission check. 78 * This is normally directly related to the access_type, but 79 * may be suppressed for debug or AT insns. 80 */ 81 uint8_t in_prot_check; 82 /* Cached EffectiveHCR_EL2_NVx() bit */ 83 bool in_nv1; 84 bool out_rw; 85 bool out_be; 86 ARMSecuritySpace out_space; 87 hwaddr out_virt; 88 hwaddr out_phys; 89 void *out_host; 90 } S1Translate; 91 92 static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, 93 vaddr address, 94 MMUAccessType access_type, MemOp memop, 95 GetPhysAddrResult *result, 96 ARMMMUFaultInfo *fi); 97 98 static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, 99 vaddr address, 100 MMUAccessType access_type, MemOp memop, 101 GetPhysAddrResult *result, 102 ARMMMUFaultInfo *fi); 103 104 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 105 int user_rw, int prot_rw, int xn, int pxn, 106 ARMSecuritySpace in_pa, ARMSecuritySpace out_pa); 107 108 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ 109 static const uint8_t pamax_map[] = { 110 [0] = 32, 111 [1] = 36, 112 [2] = 40, 113 [3] = 42, 114 [4] = 44, 115 [5] = 48, 116 [6] = 52, 117 }; 118 119 uint8_t round_down_to_parange_index(uint8_t bit_size) 120 { 121 for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) { 122 if (pamax_map[i] <= bit_size) { 123 return i; 124 } 125 } 126 g_assert_not_reached(); 127 } 128 129 uint8_t round_down_to_parange_bit_size(uint8_t bit_size) 130 { 131 return pamax_map[round_down_to_parange_index(bit_size)]; 132 } 133 134 /* 135 * The cpu-specific constant value of PAMax; also used by hw/arm/virt. 136 * Note that machvirt_init calls this on a CPU that is inited but not realized! 137 */ 138 unsigned int arm_pamax(ARMCPU *cpu) 139 { 140 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 141 unsigned int parange = 142 FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE); 143 144 /* 145 * id_aa64mmfr0 is a read-only register so values outside of the 146 * supported mappings can be considered an implementation error. 147 */ 148 assert(parange < ARRAY_SIZE(pamax_map)); 149 return pamax_map[parange]; 150 } 151 152 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 153 /* v7 or v8 with LPAE */ 154 return 40; 155 } 156 /* Anything else */ 157 return 32; 158 } 159 160 /* 161 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index 162 */ 163 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 164 { 165 switch (mmu_idx) { 166 case ARMMMUIdx_E10_0: 167 return ARMMMUIdx_Stage1_E0; 168 case ARMMMUIdx_E10_1: 169 return ARMMMUIdx_Stage1_E1; 170 case ARMMMUIdx_E10_1_PAN: 171 return ARMMMUIdx_Stage1_E1_PAN; 172 case ARMMMUIdx_E10_0_GCS: 173 return ARMMMUIdx_Stage1_E0_GCS; 174 case ARMMMUIdx_E10_1_GCS: 175 return ARMMMUIdx_Stage1_E1_GCS; 176 default: 177 return mmu_idx; 178 } 179 } 180 181 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 182 { 183 return stage_1_mmu_idx(arm_mmu_idx(env)); 184 } 185 186 /* 187 * Return where we should do ptw loads from for a stage 2 walk. 188 * This depends on whether the address we are looking up is a 189 * Secure IPA or a NonSecure IPA, which we know from whether this is 190 * Stage2 or Stage2_S. 191 * If this is the Secure EL1&0 regime we need to check the NSW and SW bits. 192 */ 193 static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx) 194 { 195 bool s2walk_secure; 196 197 /* 198 * We're OK to check the current state of the CPU here because 199 * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit 200 * changes. 201 * (2) there's no way to do a lookup that cares about Stage 2 for a 202 * different security state to the current one for AArch64, and AArch32 203 * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do 204 * an NS stage 1+2 lookup while the NS bit is 0.) 205 */ 206 if (!arm_el_is_aa64(env, 3)) { 207 return ARMMMUIdx_Phys_NS; 208 } 209 210 switch (arm_security_space_below_el3(env)) { 211 case ARMSS_NonSecure: 212 return ARMMMUIdx_Phys_NS; 213 case ARMSS_Realm: 214 return ARMMMUIdx_Phys_Realm; 215 case ARMSS_Secure: 216 if (stage2idx == ARMMMUIdx_Stage2_S) { 217 s2walk_secure = !(env->cp15.vstcr_el2 & R_VSTCR_SW_MASK); 218 } else { 219 s2walk_secure = !(env->cp15.vtcr_el2 & R_VTCR_NSW_MASK); 220 } 221 return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; 222 default: 223 g_assert_not_reached(); 224 } 225 } 226 227 static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) 228 { 229 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 230 } 231 232 /* Return the TTBR associated with this translation regime */ 233 static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) 234 { 235 if (mmu_idx == ARMMMUIdx_Stage2) { 236 return env->cp15.vttbr_el2; 237 } 238 if (mmu_idx == ARMMMUIdx_Stage2_S) { 239 return env->cp15.vsttbr_el2; 240 } 241 if (ttbrn == 0) { 242 return env->cp15.ttbr0_el[regime_el(mmu_idx)]; 243 } else { 244 return env->cp15.ttbr1_el[regime_el(mmu_idx)]; 245 } 246 } 247 248 /* Return true if the specified stage of address translation is disabled */ 249 static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, 250 ARMSecuritySpace space) 251 { 252 uint64_t hcr_el2; 253 254 if (arm_feature(env, ARM_FEATURE_M)) { 255 bool is_secure = arm_space_is_secure(space); 256 switch (env->v7m.mpu_ctrl[is_secure] & 257 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 258 case R_V7M_MPU_CTRL_ENABLE_MASK: 259 /* Enabled, but not for HardFault and NMI */ 260 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 261 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 262 /* Enabled for all cases */ 263 return false; 264 case 0: 265 default: 266 /* 267 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 268 * we warned about that in armv7m_nvic.c when the guest set it. 269 */ 270 return true; 271 } 272 } 273 274 275 switch (mmu_idx) { 276 case ARMMMUIdx_Stage2: 277 case ARMMMUIdx_Stage2_S: 278 /* HCR.DC means HCR.VM behaves as 1 */ 279 hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 280 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; 281 282 case ARMMMUIdx_E10_0: 283 case ARMMMUIdx_E10_0_GCS: 284 case ARMMMUIdx_E10_1: 285 case ARMMMUIdx_E10_1_PAN: 286 case ARMMMUIdx_E10_1_GCS: 287 /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */ 288 hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 289 if (hcr_el2 & HCR_TGE) { 290 return true; 291 } 292 break; 293 294 case ARMMMUIdx_Stage1_E0: 295 case ARMMMUIdx_Stage1_E0_GCS: 296 case ARMMMUIdx_Stage1_E1: 297 case ARMMMUIdx_Stage1_E1_PAN: 298 case ARMMMUIdx_Stage1_E1_GCS: 299 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 300 hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 301 if (hcr_el2 & HCR_DC) { 302 return true; 303 } 304 break; 305 306 case ARMMMUIdx_E20_0: 307 case ARMMMUIdx_E20_0_GCS: 308 case ARMMMUIdx_E20_2: 309 case ARMMMUIdx_E20_2_PAN: 310 case ARMMMUIdx_E20_2_GCS: 311 case ARMMMUIdx_E2: 312 case ARMMMUIdx_E2_GCS: 313 case ARMMMUIdx_E3: 314 case ARMMMUIdx_E3_GCS: 315 case ARMMMUIdx_E30_0: 316 case ARMMMUIdx_E30_3_PAN: 317 break; 318 319 case ARMMMUIdx_Phys_S: 320 case ARMMMUIdx_Phys_NS: 321 case ARMMMUIdx_Phys_Root: 322 case ARMMMUIdx_Phys_Realm: 323 /* No translation for physical address spaces. */ 324 return true; 325 326 default: 327 g_assert_not_reached(); 328 } 329 330 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 331 } 332 333 static bool granule_protection_check(CPUARMState *env, uint64_t paddress, 334 ARMSecuritySpace pspace, 335 ARMSecuritySpace ss, 336 ARMMMUFaultInfo *fi) 337 { 338 MemTxAttrs attrs = { 339 .secure = true, 340 .space = ARMSS_Root, 341 }; 342 ARMCPU *cpu = env_archcpu(env); 343 uint64_t gpccr = env->cp15.gpccr_el3; 344 unsigned pps, pgs, l0gptsz, level = 0; 345 uint64_t tableaddr, pps_mask, align, entry, index; 346 AddressSpace *as; 347 MemTxResult result; 348 int gpi; 349 350 if (!FIELD_EX64(gpccr, GPCCR, GPC)) { 351 return true; 352 } 353 354 /* 355 * GPC Priority 1 (R_GMGRR): 356 * R_JWCSM: If the configuration of GPCCR_EL3 is invalid, 357 * the access fails as GPT walk fault at level 0. 358 */ 359 360 /* 361 * Configuration of PPS to a value exceeding the implemented 362 * physical address size is invalid. 363 */ 364 pps = FIELD_EX64(gpccr, GPCCR, PPS); 365 if (pps > FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE)) { 366 goto fault_walk; 367 } 368 pps = pamax_map[pps]; 369 pps_mask = MAKE_64BIT_MASK(0, pps); 370 371 switch (FIELD_EX64(gpccr, GPCCR, SH)) { 372 case 0b10: /* outer shareable */ 373 break; 374 case 0b00: /* non-shareable */ 375 case 0b11: /* inner shareable */ 376 /* Inner and Outer non-cacheable requires Outer shareable. */ 377 if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 && 378 FIELD_EX64(gpccr, GPCCR, IRGN) == 0) { 379 goto fault_walk; 380 } 381 break; 382 default: /* reserved */ 383 goto fault_walk; 384 } 385 386 switch (FIELD_EX64(gpccr, GPCCR, PGS)) { 387 case 0b00: /* 4KB */ 388 pgs = 12; 389 break; 390 case 0b01: /* 64KB */ 391 pgs = 16; 392 break; 393 case 0b10: /* 16KB */ 394 pgs = 14; 395 break; 396 default: /* reserved */ 397 goto fault_walk; 398 } 399 400 /* Note this field is read-only and fixed at reset. */ 401 l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ); 402 403 /* 404 * GPC Priority 2: Access to Secure, NonSecure or Realm is prevented 405 * by one of the GPCCR_EL3 address space disable bits (R_TCWMD). 406 * All of these bits are checked vs aa64_rme_gpc2 in gpccr_write. 407 */ 408 { 409 static const uint8_t disable_masks[4] = { 410 [ARMSS_Secure] = R_GPCCR_SPAD_MASK, 411 [ARMSS_NonSecure] = R_GPCCR_NSPAD_MASK, 412 [ARMSS_Root] = 0, 413 [ARMSS_Realm] = R_GPCCR_RLPAD_MASK, 414 }; 415 416 if (gpccr & disable_masks[pspace]) { 417 goto fault_fail; 418 } 419 } 420 421 /* 422 * GPC Priority 3: Secure, Realm or Root address exceeds PPS. 423 * R_CPDSB: A NonSecure physical address input exceeding PPS 424 * does not experience any fault. 425 * R_PBPSH: Other address spaces have fault suppressed by APPSAA. 426 */ 427 if (paddress & ~pps_mask) { 428 if (pspace == ARMSS_NonSecure || FIELD_EX64(gpccr, GPCCR, APPSAA)) { 429 return true; 430 } 431 goto fault_fail; 432 } 433 434 /* GPC Priority 4: the base address of GPTBR_EL3 exceeds PPS. */ 435 tableaddr = env->cp15.gptbr_el3 << 12; 436 if (tableaddr & ~pps_mask) { 437 goto fault_size; 438 } 439 440 /* 441 * BADDR is aligned per a function of PPS and L0GPTSZ. 442 * These bits of GPTBR_EL3 are RES0, but are not a configuration error, 443 * unlike the RES0 bits of the GPT entries (R_XNKFZ). 444 */ 445 align = MAX(pps - l0gptsz + 3, 12); 446 align = MAKE_64BIT_MASK(0, align); 447 tableaddr &= ~align; 448 449 as = arm_addressspace(env_cpu(env), attrs); 450 451 /* Level 0 lookup. */ 452 index = extract64(paddress, l0gptsz, pps - l0gptsz); 453 tableaddr += index * 8; 454 entry = address_space_ldq_le(as, tableaddr, attrs, &result); 455 if (result != MEMTX_OK) { 456 goto fault_eabt; 457 } 458 459 switch (extract32(entry, 0, 4)) { 460 case 1: /* block descriptor */ 461 if (entry >> 8) { 462 goto fault_walk; /* RES0 bits not 0 */ 463 } 464 gpi = extract32(entry, 4, 4); 465 goto found; 466 case 3: /* table descriptor */ 467 tableaddr = entry & ~0xf; 468 align = MAX(l0gptsz - pgs - 1, 12); 469 align = MAKE_64BIT_MASK(0, align); 470 if (tableaddr & (~pps_mask | align)) { 471 goto fault_walk; /* RES0 bits not 0 */ 472 } 473 break; 474 default: /* invalid */ 475 goto fault_walk; 476 } 477 478 /* Level 1 lookup */ 479 level = 1; 480 index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4); 481 tableaddr += index * 8; 482 entry = address_space_ldq_le(as, tableaddr, attrs, &result); 483 if (result != MEMTX_OK) { 484 goto fault_eabt; 485 } 486 487 switch (extract32(entry, 0, 4)) { 488 case 1: /* contiguous descriptor */ 489 if (entry >> 10) { 490 goto fault_walk; /* RES0 bits not 0 */ 491 } 492 /* 493 * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE, 494 * and because we cannot invalidate by pa, and thus will always 495 * flush entire tlbs, we don't actually care about the range here 496 * and can simply extract the GPI as the result. 497 */ 498 if (extract32(entry, 8, 2) == 0) { 499 goto fault_walk; /* reserved contig */ 500 } 501 gpi = extract32(entry, 4, 4); 502 break; 503 default: 504 index = extract64(paddress, pgs, 4); 505 gpi = extract64(entry, index * 4, 4); 506 break; 507 } 508 509 found: 510 switch (gpi) { 511 case 0b0000: /* no access */ 512 break; 513 case 0b1111: /* all access */ 514 return true; 515 case 0b1000: /* secure */ 516 if (!cpu_isar_feature(aa64_sel2, cpu)) { 517 goto fault_walk; 518 } 519 /* fall through */ 520 case 0b1001: /* non-secure */ 521 case 0b1010: /* root */ 522 case 0b1011: /* realm */ 523 if (pspace == (gpi & 3)) { 524 return true; 525 } 526 break; 527 case 0b1101: /* non-secure only */ 528 /* aa64_rme_gpc2 was checked in gpccr_write */ 529 if (FIELD_EX64(gpccr, GPCCR, NSO)) { 530 return (pspace == ARMSS_NonSecure && 531 (ss == ARMSS_NonSecure || ss == ARMSS_Root)); 532 } 533 goto fault_walk; 534 default: 535 goto fault_walk; /* reserved */ 536 } 537 538 fault_fail: 539 fi->gpcf = GPCF_Fail; 540 goto fault_common; 541 fault_eabt: 542 fi->gpcf = GPCF_EABT; 543 goto fault_common; 544 fault_size: 545 fi->gpcf = GPCF_AddressSize; 546 goto fault_common; 547 fault_walk: 548 fi->gpcf = GPCF_Walk; 549 fault_common: 550 fi->level = level; 551 fi->paddr = paddress; 552 fi->paddr_space = pspace; 553 return false; 554 } 555 556 static bool S1_attrs_are_device(uint8_t attrs) 557 { 558 /* 559 * This slightly under-decodes the MAIR_ELx field: 560 * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE; 561 * 0b0000dd1x is UNPREDICTABLE. 562 */ 563 return (attrs & 0xf0) == 0; 564 } 565 566 static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) 567 { 568 /* 569 * For an S1 page table walk, the stage 1 attributes are always 570 * some form of "this is Normal memory". The combined S1+S2 571 * attributes are therefore only Device if stage 2 specifies Device. 572 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00, 573 * ie when cacheattrs.attrs bits [3:2] are 0b00. 574 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie 575 * when cacheattrs.attrs bit [2] is 0. 576 */ 577 if (hcr & HCR_FWB) { 578 return (attrs & 0x4) == 0; 579 } else { 580 return (attrs & 0xc) == 0; 581 } 582 } 583 584 static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space, 585 ARMMMUIdx s2_mmu_idx) 586 { 587 /* 588 * Return the security space to use for stage 2 when doing 589 * the S1 page table descriptor load. 590 */ 591 if (regime_is_stage2(s2_mmu_idx)) { 592 /* 593 * The security space for ptw reads is almost always the same 594 * as that of the security space of the stage 1 translation. 595 * The only exception is when stage 1 is Secure; in that case 596 * the ptw read might be to the Secure or the NonSecure space 597 * (but never Realm or Root), and the s2_mmu_idx tells us which. 598 * Root translations are always single-stage. 599 */ 600 if (s1_space == ARMSS_Secure) { 601 return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S); 602 } else { 603 assert(s2_mmu_idx != ARMMMUIdx_Stage2_S); 604 assert(s1_space != ARMSS_Root); 605 return s1_space; 606 } 607 } else { 608 /* ptw loads are from phys: the mmu idx itself says which space */ 609 return arm_phys_to_space(s2_mmu_idx); 610 } 611 } 612 613 static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx) 614 { 615 /* 616 * For stage 2 faults in Secure EL22, S1NS indicates 617 * whether the faulting IPA is in the Secure or NonSecure 618 * IPA space. For all other kinds of fault, it is false. 619 */ 620 return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx) 621 && s2_mmu_idx == ARMMMUIdx_Stage2_S; 622 } 623 624 /* Translate a S1 pagetable walk through S2 if needed. */ 625 static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, 626 hwaddr addr, ARMMMUFaultInfo *fi) 627 { 628 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 629 ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; 630 uint8_t pte_attrs; 631 632 ptw->out_virt = addr; 633 634 if (unlikely(ptw->in_debug)) { 635 /* 636 * From gdbstub, do not use softmmu so that we don't modify the 637 * state of the cpu at all, including softmmu tlb contents. 638 */ 639 ARMSecuritySpace s2_space 640 = S2_security_space(ptw->cur_space, s2_mmu_idx); 641 S1Translate s2ptw = { 642 .in_mmu_idx = s2_mmu_idx, 643 .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), 644 .in_space = s2_space, 645 .in_debug = true, 646 .in_prot_check = PAGE_READ, 647 }; 648 GetPhysAddrResult s2 = { }; 649 650 if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) { 651 goto fail; 652 } 653 654 ptw->out_phys = s2.f.phys_addr; 655 pte_attrs = s2.cacheattrs.attrs; 656 ptw->out_host = NULL; 657 ptw->out_rw = false; 658 ptw->out_space = s2.f.attrs.space; 659 } else { 660 #ifdef CONFIG_TCG 661 CPUTLBEntryFull *full; 662 int flags; 663 664 env->tlb_fi = fi; 665 flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD, 666 arm_to_core_mmu_idx(s2_mmu_idx), 667 &ptw->out_host, &full); 668 env->tlb_fi = NULL; 669 670 if (unlikely(flags & TLB_INVALID_MASK)) { 671 goto fail; 672 } 673 ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 674 ptw->out_rw = full->prot & PAGE_WRITE; 675 pte_attrs = full->extra.arm.pte_attrs; 676 ptw->out_space = full->attrs.space; 677 #else 678 g_assert_not_reached(); 679 #endif 680 } 681 682 if (regime_is_stage2(s2_mmu_idx)) { 683 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->cur_space); 684 685 if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) { 686 /* 687 * PTW set and S1 walk touched S2 Device memory: 688 * generate Permission fault. 689 */ 690 fi->type = ARMFault_Permission; 691 fi->s2addr = addr; 692 fi->stage2 = true; 693 fi->s1ptw = true; 694 fi->s1ns = fault_s1ns(ptw->cur_space, s2_mmu_idx); 695 return false; 696 } 697 } 698 699 ptw->out_be = regime_translation_big_endian(env, mmu_idx); 700 return true; 701 702 fail: 703 assert(fi->type != ARMFault_None); 704 if (fi->type == ARMFault_GPCFOnOutput) { 705 fi->type = ARMFault_GPCFOnWalk; 706 } 707 fi->s2addr = addr; 708 fi->stage2 = regime_is_stage2(s2_mmu_idx); 709 fi->s1ptw = fi->stage2; 710 fi->s1ns = fault_s1ns(ptw->cur_space, s2_mmu_idx); 711 return false; 712 } 713 714 /* All loads done in the course of a page table walk go through here. */ 715 static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, 716 ARMMMUFaultInfo *fi) 717 { 718 CPUState *cs = env_cpu(env); 719 void *host = ptw->out_host; 720 uint32_t data; 721 722 if (likely(host)) { 723 /* Page tables are in RAM, and we have the host address. */ 724 data = qatomic_read((uint32_t *)host); 725 if (ptw->out_be) { 726 data = be32_to_cpu(data); 727 } else { 728 data = le32_to_cpu(data); 729 } 730 } else { 731 /* Page tables are in MMIO. */ 732 MemTxAttrs attrs = { 733 .space = ptw->out_space, 734 .secure = arm_space_is_secure(ptw->out_space), 735 }; 736 AddressSpace *as = arm_addressspace(cs, attrs); 737 MemTxResult result = MEMTX_OK; 738 739 if (ptw->out_be) { 740 data = address_space_ldl_be(as, ptw->out_phys, attrs, &result); 741 } else { 742 data = address_space_ldl_le(as, ptw->out_phys, attrs, &result); 743 } 744 if (unlikely(result != MEMTX_OK)) { 745 fi->type = ARMFault_SyncExternalOnWalk; 746 fi->ea = arm_extabort_type(result); 747 return 0; 748 } 749 } 750 return data; 751 } 752 753 static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, 754 ARMMMUFaultInfo *fi) 755 { 756 CPUState *cs = env_cpu(env); 757 void *host = ptw->out_host; 758 uint64_t data; 759 760 if (likely(host)) { 761 /* Page tables are in RAM, and we have the host address. */ 762 #ifdef CONFIG_ATOMIC64 763 data = qatomic_read__nocheck((uint64_t *)host); 764 if (ptw->out_be) { 765 data = be64_to_cpu(data); 766 } else { 767 data = le64_to_cpu(data); 768 } 769 #else 770 if (ptw->out_be) { 771 data = ldq_be_p(host); 772 } else { 773 data = ldq_le_p(host); 774 } 775 #endif 776 } else { 777 /* Page tables are in MMIO. */ 778 MemTxAttrs attrs = { 779 .space = ptw->out_space, 780 .secure = arm_space_is_secure(ptw->out_space), 781 }; 782 AddressSpace *as = arm_addressspace(cs, attrs); 783 MemTxResult result = MEMTX_OK; 784 785 if (ptw->out_be) { 786 data = address_space_ldq_be(as, ptw->out_phys, attrs, &result); 787 } else { 788 data = address_space_ldq_le(as, ptw->out_phys, attrs, &result); 789 } 790 if (unlikely(result != MEMTX_OK)) { 791 fi->type = ARMFault_SyncExternalOnWalk; 792 fi->ea = arm_extabort_type(result); 793 return 0; 794 } 795 } 796 return data; 797 } 798 799 static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, 800 uint64_t new_val, S1Translate *ptw, 801 ARMMMUFaultInfo *fi) 802 { 803 #if defined(CONFIG_ATOMIC64) && defined(CONFIG_TCG) 804 uint64_t cur_val; 805 void *host = ptw->out_host; 806 807 if (unlikely(!host)) { 808 /* Page table in MMIO Memory Region */ 809 CPUState *cs = env_cpu(env); 810 MemTxAttrs attrs = { 811 .space = ptw->out_space, 812 .secure = arm_space_is_secure(ptw->out_space), 813 }; 814 AddressSpace *as = arm_addressspace(cs, attrs); 815 MemTxResult result = MEMTX_OK; 816 bool need_lock = !bql_locked(); 817 818 if (need_lock) { 819 bql_lock(); 820 } 821 if (ptw->out_be) { 822 cur_val = address_space_ldq_be(as, ptw->out_phys, attrs, &result); 823 if (unlikely(result != MEMTX_OK)) { 824 fi->type = ARMFault_SyncExternalOnWalk; 825 fi->ea = arm_extabort_type(result); 826 if (need_lock) { 827 bql_unlock(); 828 } 829 return old_val; 830 } 831 if (cur_val == old_val) { 832 address_space_stq_be(as, ptw->out_phys, new_val, attrs, &result); 833 if (unlikely(result != MEMTX_OK)) { 834 fi->type = ARMFault_SyncExternalOnWalk; 835 fi->ea = arm_extabort_type(result); 836 if (need_lock) { 837 bql_unlock(); 838 } 839 return old_val; 840 } 841 cur_val = new_val; 842 } 843 } else { 844 cur_val = address_space_ldq_le(as, ptw->out_phys, attrs, &result); 845 if (unlikely(result != MEMTX_OK)) { 846 fi->type = ARMFault_SyncExternalOnWalk; 847 fi->ea = arm_extabort_type(result); 848 if (need_lock) { 849 bql_unlock(); 850 } 851 return old_val; 852 } 853 if (cur_val == old_val) { 854 address_space_stq_le(as, ptw->out_phys, new_val, attrs, &result); 855 if (unlikely(result != MEMTX_OK)) { 856 fi->type = ARMFault_SyncExternalOnWalk; 857 fi->ea = arm_extabort_type(result); 858 if (need_lock) { 859 bql_unlock(); 860 } 861 return old_val; 862 } 863 cur_val = new_val; 864 } 865 } 866 if (need_lock) { 867 bql_unlock(); 868 } 869 return cur_val; 870 } 871 872 /* 873 * Raising a stage2 Protection fault for an atomic update to a read-only 874 * page is delayed until it is certain that there is a change to make. 875 */ 876 if (unlikely(!ptw->out_rw)) { 877 int flags; 878 879 env->tlb_fi = fi; 880 flags = probe_access_full_mmu(env, ptw->out_virt, 0, 881 MMU_DATA_STORE, 882 arm_to_core_mmu_idx(ptw->in_ptw_idx), 883 NULL, NULL); 884 env->tlb_fi = NULL; 885 886 if (unlikely(flags & TLB_INVALID_MASK)) { 887 /* 888 * We know this must be a stage 2 fault because the granule 889 * protection table does not separately track read and write 890 * permission, so all GPC faults are caught in S1_ptw_translate(): 891 * we only get here for "readable but not writeable". 892 */ 893 assert(fi->type != ARMFault_None); 894 fi->s2addr = ptw->out_virt; 895 fi->stage2 = true; 896 fi->s1ptw = true; 897 fi->s1ns = fault_s1ns(ptw->cur_space, ptw->in_ptw_idx); 898 return 0; 899 } 900 901 /* In case CAS mismatches and we loop, remember writability. */ 902 ptw->out_rw = true; 903 } 904 905 if (ptw->out_be) { 906 old_val = cpu_to_be64(old_val); 907 new_val = cpu_to_be64(new_val); 908 cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); 909 cur_val = be64_to_cpu(cur_val); 910 } else { 911 old_val = cpu_to_le64(old_val); 912 new_val = cpu_to_le64(new_val); 913 cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); 914 cur_val = le64_to_cpu(cur_val); 915 } 916 return cur_val; 917 #else 918 /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */ 919 g_assert_not_reached(); 920 #endif 921 } 922 923 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 924 uint32_t *table, uint32_t address) 925 { 926 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 927 uint64_t tcr = regime_tcr(env, mmu_idx); 928 int maskshift = extract32(tcr, 0, 3); 929 uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift); 930 uint32_t base_mask; 931 932 if (address & mask) { 933 if (tcr & TTBCR_PD1) { 934 /* Translation table walk disabled for TTBR1 */ 935 return false; 936 } 937 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 938 } else { 939 if (tcr & TTBCR_PD0) { 940 /* Translation table walk disabled for TTBR0 */ 941 return false; 942 } 943 base_mask = ~((uint32_t)0x3fffu >> maskshift); 944 *table = regime_ttbr(env, mmu_idx, 0) & base_mask; 945 } 946 *table |= (address >> 18) & 0x3ffc; 947 return true; 948 } 949 950 /* 951 * Translate section/page access permissions to page R/W protection flags 952 * @env: CPUARMState 953 * @mmu_idx: MMU index indicating required translation regime 954 * @ap: The 3-bit access permissions (AP[2:0]) 955 * @domain_prot: The 2-bit domain access permissions 956 * @is_user: TRUE if accessing from PL0 957 */ 958 static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx, 959 int ap, int domain_prot, bool is_user) 960 { 961 if (domain_prot == 3) { 962 return PAGE_READ | PAGE_WRITE; 963 } 964 965 switch (ap) { 966 case 0: 967 if (arm_feature(env, ARM_FEATURE_V7)) { 968 return 0; 969 } 970 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 971 case SCTLR_S: 972 return is_user ? 0 : PAGE_READ; 973 case SCTLR_R: 974 return PAGE_READ; 975 default: 976 return 0; 977 } 978 case 1: 979 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 980 case 2: 981 if (is_user) { 982 return PAGE_READ; 983 } else { 984 return PAGE_READ | PAGE_WRITE; 985 } 986 case 3: 987 return PAGE_READ | PAGE_WRITE; 988 case 4: /* Reserved. */ 989 return 0; 990 case 5: 991 return is_user ? 0 : PAGE_READ; 992 case 6: 993 return PAGE_READ; 994 case 7: 995 if (!arm_feature(env, ARM_FEATURE_V6K)) { 996 return 0; 997 } 998 return PAGE_READ; 999 default: 1000 g_assert_not_reached(); 1001 } 1002 } 1003 1004 /* 1005 * Translate section/page access permissions to page R/W protection flags 1006 * @env: CPUARMState 1007 * @mmu_idx: MMU index indicating required translation regime 1008 * @ap: The 3-bit access permissions (AP[2:0]) 1009 * @domain_prot: The 2-bit domain access permissions 1010 */ 1011 static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 1012 int ap, int domain_prot) 1013 { 1014 return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1015 regime_is_user(mmu_idx)); 1016 } 1017 1018 /* 1019 * Translate section/page access permissions to page R/W protection flags. 1020 * @ap: The 2-bit simple AP (AP[2:1]) 1021 * @is_user: TRUE if accessing from PL0 1022 */ 1023 static int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 1024 { 1025 switch (ap) { 1026 case 0: 1027 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 1028 case 1: 1029 return PAGE_READ | PAGE_WRITE; 1030 case 2: 1031 return is_user ? 0 : PAGE_READ; 1032 case 3: 1033 return PAGE_READ; 1034 default: 1035 g_assert_not_reached(); 1036 } 1037 } 1038 1039 static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 1040 { 1041 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(mmu_idx)); 1042 } 1043 1044 static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw, 1045 uint32_t address, MMUAccessType access_type, 1046 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1047 { 1048 int level = 1; 1049 uint32_t table; 1050 uint32_t desc; 1051 int type; 1052 int ap; 1053 int domain = 0; 1054 int domain_prot; 1055 hwaddr phys_addr; 1056 uint32_t dacr; 1057 1058 /* Pagetable walk. */ 1059 /* Lookup l1 descriptor. */ 1060 if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) { 1061 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 1062 fi->type = ARMFault_Translation; 1063 goto do_fault; 1064 } 1065 if (!S1_ptw_translate(env, ptw, table, fi)) { 1066 goto do_fault; 1067 } 1068 desc = arm_ldl_ptw(env, ptw, fi); 1069 if (fi->type != ARMFault_None) { 1070 goto do_fault; 1071 } 1072 type = (desc & 3); 1073 domain = (desc >> 5) & 0x0f; 1074 if (regime_el(ptw->in_mmu_idx) == 1) { 1075 dacr = env->cp15.dacr_ns; 1076 } else { 1077 dacr = env->cp15.dacr_s; 1078 } 1079 domain_prot = (dacr >> (domain * 2)) & 3; 1080 if (type == 0) { 1081 /* Section translation fault. */ 1082 fi->type = ARMFault_Translation; 1083 goto do_fault; 1084 } 1085 if (type != 2) { 1086 level = 2; 1087 } 1088 if (domain_prot == 0 || domain_prot == 2) { 1089 fi->type = ARMFault_Domain; 1090 goto do_fault; 1091 } 1092 if (type == 2) { 1093 /* 1Mb section. */ 1094 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 1095 ap = (desc >> 10) & 3; 1096 result->f.lg_page_size = 20; /* 1MB */ 1097 } else { 1098 /* Lookup l2 entry. */ 1099 if (type == 1) { 1100 /* Coarse pagetable. */ 1101 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 1102 } else { 1103 /* Fine pagetable. */ 1104 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 1105 } 1106 if (!S1_ptw_translate(env, ptw, table, fi)) { 1107 goto do_fault; 1108 } 1109 desc = arm_ldl_ptw(env, ptw, fi); 1110 if (fi->type != ARMFault_None) { 1111 goto do_fault; 1112 } 1113 switch (desc & 3) { 1114 case 0: /* Page translation fault. */ 1115 fi->type = ARMFault_Translation; 1116 goto do_fault; 1117 case 1: /* 64k page. */ 1118 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 1119 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 1120 result->f.lg_page_size = 16; 1121 break; 1122 case 2: /* 4k page. */ 1123 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1124 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 1125 result->f.lg_page_size = 12; 1126 break; 1127 case 3: /* 1k page, or ARMv6 "extended small (4k) page" */ 1128 if (type == 1) { 1129 /* ARMv6 extended small page format */ 1130 if (arm_feature(env, ARM_FEATURE_V6)) { 1131 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1132 result->f.lg_page_size = 12; 1133 } else { 1134 /* 1135 * UNPREDICTABLE in ARMv5; we choose to take a 1136 * page translation fault. 1137 */ 1138 fi->type = ARMFault_Translation; 1139 goto do_fault; 1140 } 1141 } else { 1142 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 1143 result->f.lg_page_size = 10; 1144 } 1145 ap = (desc >> 4) & 3; 1146 break; 1147 default: 1148 /* Never happens, but compiler isn't smart enough to tell. */ 1149 g_assert_not_reached(); 1150 } 1151 } 1152 result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot); 1153 result->f.prot |= result->f.prot ? PAGE_EXEC : 0; 1154 if (ptw->in_prot_check & ~result->f.prot) { 1155 /* Access permission fault. */ 1156 fi->type = ARMFault_Permission; 1157 goto do_fault; 1158 } 1159 result->f.phys_addr = phys_addr; 1160 return false; 1161 do_fault: 1162 fi->domain = domain; 1163 fi->level = level; 1164 return true; 1165 } 1166 1167 static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, 1168 uint32_t address, MMUAccessType access_type, 1169 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1170 { 1171 ARMCPU *cpu = env_archcpu(env); 1172 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 1173 int level = 1; 1174 uint32_t table; 1175 uint32_t desc; 1176 uint32_t xn; 1177 uint32_t pxn = 0; 1178 int type; 1179 int ap; 1180 int domain = 0; 1181 int domain_prot; 1182 hwaddr phys_addr; 1183 uint32_t dacr; 1184 bool ns; 1185 ARMSecuritySpace out_space; 1186 1187 /* Pagetable walk. */ 1188 /* Lookup l1 descriptor. */ 1189 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 1190 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 1191 fi->type = ARMFault_Translation; 1192 goto do_fault; 1193 } 1194 if (!S1_ptw_translate(env, ptw, table, fi)) { 1195 goto do_fault; 1196 } 1197 desc = arm_ldl_ptw(env, ptw, fi); 1198 if (fi->type != ARMFault_None) { 1199 goto do_fault; 1200 } 1201 type = (desc & 3); 1202 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) { 1203 /* Section translation fault, or attempt to use the encoding 1204 * which is Reserved on implementations without PXN. 1205 */ 1206 fi->type = ARMFault_Translation; 1207 goto do_fault; 1208 } 1209 if ((type == 1) || !(desc & (1 << 18))) { 1210 /* Page or Section. */ 1211 domain = (desc >> 5) & 0x0f; 1212 } 1213 if (regime_el(mmu_idx) == 1) { 1214 dacr = env->cp15.dacr_ns; 1215 } else { 1216 dacr = env->cp15.dacr_s; 1217 } 1218 if (type == 1) { 1219 level = 2; 1220 } 1221 domain_prot = (dacr >> (domain * 2)) & 3; 1222 if (domain_prot == 0 || domain_prot == 2) { 1223 /* Section or Page domain fault */ 1224 fi->type = ARMFault_Domain; 1225 goto do_fault; 1226 } 1227 if (type != 1) { 1228 if (desc & (1 << 18)) { 1229 /* Supersection. */ 1230 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 1231 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 1232 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 1233 result->f.lg_page_size = 24; /* 16MB */ 1234 } else { 1235 /* Section. */ 1236 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 1237 result->f.lg_page_size = 20; /* 1MB */ 1238 } 1239 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 1240 xn = desc & (1 << 4); 1241 pxn = desc & 1; 1242 ns = extract32(desc, 19, 1); 1243 } else { 1244 if (cpu_isar_feature(aa32_pxn, cpu)) { 1245 pxn = (desc >> 2) & 1; 1246 } 1247 ns = extract32(desc, 3, 1); 1248 /* Lookup l2 entry. */ 1249 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 1250 if (!S1_ptw_translate(env, ptw, table, fi)) { 1251 goto do_fault; 1252 } 1253 desc = arm_ldl_ptw(env, ptw, fi); 1254 if (fi->type != ARMFault_None) { 1255 goto do_fault; 1256 } 1257 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 1258 switch (desc & 3) { 1259 case 0: /* Page translation fault. */ 1260 fi->type = ARMFault_Translation; 1261 goto do_fault; 1262 case 1: /* 64k page. */ 1263 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 1264 xn = desc & (1 << 15); 1265 result->f.lg_page_size = 16; 1266 break; 1267 case 2: case 3: /* 4k page. */ 1268 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1269 xn = desc & 1; 1270 result->f.lg_page_size = 12; 1271 break; 1272 default: 1273 /* Never happens, but compiler isn't smart enough to tell. */ 1274 g_assert_not_reached(); 1275 } 1276 } 1277 out_space = ptw->cur_space; 1278 if (ns) { 1279 /* 1280 * The NS bit will (as required by the architecture) have no effect if 1281 * the CPU doesn't support TZ or this is a non-secure translation 1282 * regime, because the output space will already be non-secure. 1283 */ 1284 out_space = ARMSS_NonSecure; 1285 } 1286 if (domain_prot == 3) { 1287 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 1288 } else { 1289 int user_rw, prot_rw; 1290 1291 if (arm_feature(env, ARM_FEATURE_V6K) && 1292 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 1293 /* The simplified model uses AP[0] as an access control bit. */ 1294 if ((ap & 1) == 0) { 1295 /* Access flag fault. */ 1296 fi->type = ARMFault_AccessFlag; 1297 goto do_fault; 1298 } 1299 prot_rw = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 1300 user_rw = simple_ap_to_rw_prot_is_user(ap >> 1, 1); 1301 } else { 1302 prot_rw = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 1303 user_rw = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1); 1304 } 1305 1306 result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw, 1307 xn, pxn, ptw->in_space, out_space); 1308 if (ptw->in_prot_check & ~result->f.prot) { 1309 /* Access permission fault. */ 1310 fi->type = ARMFault_Permission; 1311 goto do_fault; 1312 } 1313 } 1314 result->f.attrs.space = out_space; 1315 result->f.attrs.secure = arm_space_is_secure(out_space); 1316 result->f.phys_addr = phys_addr; 1317 return false; 1318 do_fault: 1319 fi->domain = domain; 1320 fi->level = level; 1321 return true; 1322 } 1323 1324 /* 1325 * Translate S2 section/page access permissions to protection flags 1326 * @env: CPUARMState 1327 * @s2ap: The 2-bit stage2 access permissions (S2AP) 1328 * @xn: XN (execute-never) bits 1329 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 1330 */ 1331 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) 1332 { 1333 int prot = 0; 1334 1335 if (s2ap & 1) { 1336 prot |= PAGE_READ; 1337 } 1338 if (s2ap & 2) { 1339 prot |= PAGE_WRITE; 1340 } 1341 1342 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { 1343 switch (xn) { 1344 case 0: 1345 prot |= PAGE_EXEC; 1346 break; 1347 case 1: 1348 if (s1_is_el0) { 1349 prot |= PAGE_EXEC; 1350 } 1351 break; 1352 case 2: 1353 break; 1354 case 3: 1355 if (!s1_is_el0) { 1356 prot |= PAGE_EXEC; 1357 } 1358 break; 1359 default: 1360 g_assert_not_reached(); 1361 } 1362 } else { 1363 if (!extract32(xn, 1, 1)) { 1364 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 1365 prot |= PAGE_EXEC; 1366 } 1367 } 1368 } 1369 return prot; 1370 } 1371 1372 static int get_S2prot_indirect(CPUARMState *env, GetPhysAddrResult *result, 1373 int pi_index, int po_index, bool s1_is_el0) 1374 { 1375 /* Last index is (priv, unpriv, ttw) */ 1376 static const uint8_t perm_table[16][3] = { 1377 /* 0 */ { 0, 0, 0 }, /* no access */ 1378 /* 1 */ { 0, 0, 0 }, /* reserved */ 1379 /* 2 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE }, 1380 /* 3 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE }, 1381 /* 4 */ { PAGE_WRITE, PAGE_WRITE, 0 }, 1382 /* 5 */ { 0, 0, 0 }, /* reserved */ 1383 /* 6 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE }, 1384 /* 7 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE }, 1385 /* 8 */ { PAGE_READ, PAGE_READ, PAGE_READ }, 1386 /* 9 */ { PAGE_READ, PAGE_READ | PAGE_EXEC, PAGE_READ }, 1387 /* A */ { PAGE_READ | PAGE_EXEC, PAGE_READ, PAGE_READ }, 1388 /* B */ { PAGE_READ | PAGE_EXEC, PAGE_READ | PAGE_EXEC, PAGE_READ }, 1389 /* C */ { PAGE_READ | PAGE_WRITE, 1390 PAGE_READ | PAGE_WRITE, 1391 PAGE_READ | PAGE_WRITE }, 1392 /* D */ { PAGE_READ | PAGE_WRITE, 1393 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 1394 PAGE_READ | PAGE_WRITE }, 1395 /* E */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC, 1396 PAGE_READ | PAGE_WRITE, 1397 PAGE_READ | PAGE_WRITE }, 1398 /* F */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC, 1399 PAGE_READ | PAGE_WRITE | PAGE_EXEC, 1400 PAGE_READ | PAGE_WRITE }, 1401 }; 1402 1403 uint64_t pir = (env->cp15.scr_el3 & SCR_PIEN ? env->cp15.s2pir_el2 : 0); 1404 int s2pi = extract64(pir, pi_index * 4, 4); 1405 1406 result->f.prot = perm_table[s2pi][2]; 1407 return perm_table[s2pi][s1_is_el0]; 1408 } 1409 1410 /* 1411 * Translate section/page access permissions to protection flags 1412 * @env: CPUARMState 1413 * @mmu_idx: MMU index indicating required translation regime 1414 * @is_aa64: TRUE if AArch64 1415 * @user_rw: Translated AP for user access 1416 * @prot_rw: Translated AP for privileged access 1417 * @xn: XN (execute-never) bit 1418 * @pxn: PXN (privileged execute-never) bit 1419 * @in_pa: The original input pa space 1420 * @out_pa: The output pa space, modified by NSTable, NS, and NSE 1421 */ 1422 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 1423 int user_rw, int prot_rw, int xn, int pxn, 1424 ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) 1425 { 1426 ARMCPU *cpu = env_archcpu(env); 1427 bool is_user = regime_is_user(mmu_idx); 1428 bool have_wxn; 1429 int wxn = 0; 1430 1431 assert(!regime_is_stage2(mmu_idx)); 1432 1433 if (is_user) { 1434 prot_rw = user_rw; 1435 } else { 1436 /* 1437 * PAN controls can forbid data accesses but don't affect insn fetch. 1438 * Plain PAN forbids data accesses if EL0 has data permissions; 1439 * PAN3 forbids data accesses if EL0 has either data or exec perms. 1440 * Note that for AArch64 the 'user can exec' case is exactly !xn. 1441 * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0 1442 * do not affect EPAN. 1443 */ 1444 if (user_rw && regime_is_pan(mmu_idx)) { 1445 prot_rw = 0; 1446 } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 && 1447 regime_is_pan(mmu_idx) && 1448 (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) { 1449 prot_rw = 0; 1450 } 1451 } 1452 1453 if (in_pa != out_pa) { 1454 switch (in_pa) { 1455 case ARMSS_Root: 1456 /* 1457 * R_ZWRVD: permission fault for insn fetched from non-Root, 1458 * I_WWBFB: SIF has no effect in EL3. 1459 */ 1460 return prot_rw; 1461 case ARMSS_Realm: 1462 /* 1463 * R_PKTDS: permission fault for insn fetched from non-Realm, 1464 * for Realm EL2 or EL2&0. The corresponding fault for EL1&0 1465 * happens during any stage2 translation. 1466 */ 1467 switch (mmu_idx) { 1468 case ARMMMUIdx_E2: 1469 case ARMMMUIdx_E20_0: 1470 case ARMMMUIdx_E20_2: 1471 case ARMMMUIdx_E20_2_PAN: 1472 return prot_rw; 1473 default: 1474 break; 1475 } 1476 break; 1477 case ARMSS_Secure: 1478 if (env->cp15.scr_el3 & SCR_SIF) { 1479 return prot_rw; 1480 } 1481 break; 1482 default: 1483 /* Input NonSecure must have output NonSecure. */ 1484 g_assert_not_reached(); 1485 } 1486 } 1487 1488 /* TODO have_wxn should be replaced with 1489 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 1490 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 1491 * compatible processors have EL2, which is required for [U]WXN. 1492 */ 1493 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 1494 1495 if (have_wxn) { 1496 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 1497 } 1498 1499 if (is_aa64) { 1500 if (regime_has_2_ranges(mmu_idx) && !is_user) { 1501 xn = pxn || (user_rw & PAGE_WRITE); 1502 } 1503 } else if (arm_feature(env, ARM_FEATURE_V7)) { 1504 switch (regime_el(mmu_idx)) { 1505 case 1: 1506 case 3: 1507 if (is_user) { 1508 xn = xn || !(user_rw & PAGE_READ); 1509 } else { 1510 int uwxn = 0; 1511 if (have_wxn) { 1512 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 1513 } 1514 xn = xn || !(prot_rw & PAGE_READ) || pxn || 1515 (uwxn && (user_rw & PAGE_WRITE)); 1516 } 1517 break; 1518 case 2: 1519 break; 1520 } 1521 } else { 1522 xn = wxn = 0; 1523 } 1524 1525 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 1526 return prot_rw; 1527 } 1528 return prot_rw | PAGE_EXEC; 1529 } 1530 1531 /* Extra page permission bits, during get_S1prot_indirect only. */ 1532 #define PAGE_GCS (1 << 3) 1533 #define PAGE_WXN (1 << 4) 1534 #define PAGE_OVERLAY (1 << 5) 1535 QEMU_BUILD_BUG_ON(PAGE_RWX & (PAGE_GCS | PAGE_WXN | PAGE_OVERLAY)); 1536 1537 static int get_S1prot_indirect(CPUARMState *env, S1Translate *ptw, 1538 ARMMMUIdx mmu_idx, int pi_index, int po_index, 1539 ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) 1540 { 1541 static const uint8_t perm_table[16] = { 1542 /* 0 */ PAGE_OVERLAY, /* no access */ 1543 /* 1 */ PAGE_OVERLAY | PAGE_READ, 1544 /* 2 */ PAGE_OVERLAY | PAGE_EXEC, 1545 /* 3 */ PAGE_OVERLAY | PAGE_READ | PAGE_EXEC, 1546 /* 4 */ PAGE_OVERLAY, /* reserved */ 1547 /* 5 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE, 1548 /* 6 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_WXN, 1549 /* 7 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC, 1550 /* 8 */ PAGE_READ, 1551 /* 9 */ PAGE_READ | PAGE_GCS, 1552 /* A */ PAGE_READ | PAGE_EXEC, 1553 /* B */ 0, /* reserved */ 1554 /* C */ PAGE_READ | PAGE_WRITE, 1555 /* D */ 0, /* reserved */ 1556 /* E */ PAGE_READ | PAGE_WRITE | PAGE_EXEC, 1557 /* F */ 0, /* reserved */ 1558 }; 1559 1560 uint32_t el = regime_el(mmu_idx); 1561 uint64_t pir = env->cp15.pir_el[el]; 1562 uint64_t pire0 = 0; 1563 int perm; 1564 1565 if (el < 3) { 1566 if (arm_feature(env, ARM_FEATURE_EL3) 1567 && !(env->cp15.scr_el3 & SCR_PIEN)) { 1568 pir = 0; 1569 } else if (el == 2) { 1570 pire0 = env->cp15.pire0_el2; 1571 } else if (!ptw->in_nv1) { 1572 pire0 = env->cp15.pir_el[0]; 1573 } 1574 } 1575 perm = perm_table[extract64(pir, pi_index * 4, 4)]; 1576 1577 if (regime_has_2_ranges(mmu_idx)) { 1578 int p_perm = perm; 1579 int u_perm = perm_table[extract64(pire0, pi_index * 4, 4)]; 1580 1581 if ((p_perm & (PAGE_EXEC | PAGE_GCS)) && 1582 (u_perm & (PAGE_WRITE | PAGE_GCS))) { 1583 p_perm &= ~(PAGE_RWX | PAGE_GCS); 1584 u_perm &= ~(PAGE_RWX | PAGE_GCS); 1585 } 1586 if ((u_perm & (PAGE_RWX | PAGE_GCS)) && regime_is_pan(mmu_idx)) { 1587 p_perm &= ~(PAGE_READ | PAGE_WRITE); 1588 } 1589 perm = regime_is_user(mmu_idx) ? u_perm : p_perm; 1590 } 1591 1592 if (in_pa != out_pa) { 1593 switch (in_pa) { 1594 case ARMSS_Root: 1595 /* 1596 * R_ZWRVD: permission fault for insn fetched from non-Root, 1597 * I_WWBFB: SIF has no effect in EL3. 1598 */ 1599 perm &= ~(PAGE_EXEC | PAGE_GCS); 1600 break; 1601 case ARMSS_Realm: 1602 /* 1603 * R_PKTDS: permission fault for insn fetched from non-Realm, 1604 * for Realm EL2 or EL2&0. The corresponding fault for EL1&0 1605 * happens during any stage2 translation. 1606 */ 1607 if (el == 2) { 1608 perm &= ~(PAGE_EXEC | PAGE_GCS); 1609 } 1610 break; 1611 case ARMSS_Secure: 1612 if (env->cp15.scr_el3 & SCR_SIF) { 1613 perm &= ~(PAGE_EXEC | PAGE_GCS); 1614 } 1615 break; 1616 default: 1617 /* Input NonSecure must have output NonSecure. */ 1618 g_assert_not_reached(); 1619 } 1620 } 1621 1622 if (regime_is_gcs(mmu_idx)) { 1623 /* 1624 * Note that the one s1perms.gcs bit controls both read and write 1625 * access via AccessType_GCS. See AArch64.S1CheckPermissions. 1626 */ 1627 perm = (perm & PAGE_GCS ? PAGE_READ | PAGE_WRITE : 0); 1628 } else if (perm & PAGE_WXN) { 1629 perm &= ~PAGE_EXEC; 1630 } 1631 1632 return perm & PAGE_RWX; 1633 } 1634 1635 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 1636 ARMMMUIdx mmu_idx) 1637 { 1638 uint64_t tcr = regime_tcr(env, mmu_idx); 1639 uint32_t el = regime_el(mmu_idx); 1640 int select, tsz; 1641 bool epd, hpd; 1642 1643 assert(mmu_idx != ARMMMUIdx_Stage2_S); 1644 1645 if (mmu_idx == ARMMMUIdx_Stage2) { 1646 /* VTCR */ 1647 bool sext = extract32(tcr, 4, 1); 1648 bool sign = extract32(tcr, 3, 1); 1649 1650 /* 1651 * If the sign-extend bit is not the same as t0sz[3], the result 1652 * is unpredictable. Flag this as a guest error. 1653 */ 1654 if (sign != sext) { 1655 qemu_log_mask(LOG_GUEST_ERROR, 1656 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 1657 } 1658 tsz = sextract32(tcr, 0, 4) + 8; 1659 select = 0; 1660 epd = false; 1661 /* 1662 * Stage2 does not have hierarchical permissions. 1663 * Thus disabling them makes things easier during ptw. 1664 */ 1665 hpd = true; 1666 } else if (el == 2) { 1667 /* HTCR */ 1668 tsz = extract32(tcr, 0, 3); 1669 select = 0; 1670 hpd = extract64(tcr, 24, 1); 1671 epd = false; 1672 } else { 1673 int t0sz = extract32(tcr, 0, 3); 1674 int t1sz = extract32(tcr, 16, 3); 1675 1676 if (t1sz == 0) { 1677 select = va > (0xffffffffu >> t0sz); 1678 } else { 1679 /* Note that we will detect errors later. */ 1680 select = va >= ~(0xffffffffu >> t1sz); 1681 } 1682 if (!select) { 1683 tsz = t0sz; 1684 epd = extract32(tcr, 7, 1); 1685 hpd = extract64(tcr, 41, 1); 1686 } else { 1687 tsz = t1sz; 1688 epd = extract32(tcr, 23, 1); 1689 hpd = extract64(tcr, 42, 1); 1690 } 1691 /* For aarch32, hpd0 is not enabled without t2e as well. */ 1692 hpd &= extract32(tcr, 6, 1); 1693 } 1694 1695 return (ARMVAParameters) { 1696 .tsz = tsz, 1697 .select = select, 1698 .epd = epd, 1699 .hpd = hpd, 1700 }; 1701 } 1702 1703 /* 1704 * check_s2_mmu_setup 1705 * @cpu: ARMCPU 1706 * @is_aa64: True if the translation regime is in AArch64 state 1707 * @tcr: VTCR_EL2 or VSTCR_EL2 1708 * @ds: Effective value of TCR.DS. 1709 * @iasize: Bitsize of IPAs 1710 * @stride: Page-table stride (See the ARM ARM) 1711 * 1712 * Decode the starting level of the S2 lookup, returning INT_MIN if 1713 * the configuration is invalid. 1714 */ 1715 static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr, 1716 bool ds, int iasize, int stride) 1717 { 1718 int sl0, sl2, startlevel, granulebits, levels; 1719 int s1_min_iasize, s1_max_iasize; 1720 1721 sl0 = extract32(tcr, 6, 2); 1722 if (is_aa64) { 1723 /* 1724 * AArch64.S2InvalidSL: Interpretation of SL depends on the page size, 1725 * so interleave AArch64.S2StartLevel. 1726 */ 1727 switch (stride) { 1728 case 9: /* 4KB */ 1729 /* SL2 is RES0 unless DS=1 & 4KB granule. */ 1730 sl2 = extract64(tcr, 33, 1); 1731 if (ds && sl2) { 1732 if (sl0 != 0) { 1733 goto fail; 1734 } 1735 startlevel = -1; 1736 } else { 1737 startlevel = 2 - sl0; 1738 switch (sl0) { 1739 case 2: 1740 if (arm_pamax(cpu) < 44) { 1741 goto fail; 1742 } 1743 break; 1744 case 3: 1745 if (!cpu_isar_feature(aa64_st, cpu)) { 1746 goto fail; 1747 } 1748 startlevel = 3; 1749 break; 1750 } 1751 } 1752 break; 1753 case 11: /* 16KB */ 1754 switch (sl0) { 1755 case 2: 1756 if (arm_pamax(cpu) < 42) { 1757 goto fail; 1758 } 1759 break; 1760 case 3: 1761 if (!ds) { 1762 goto fail; 1763 } 1764 break; 1765 } 1766 startlevel = 3 - sl0; 1767 break; 1768 case 13: /* 64KB */ 1769 switch (sl0) { 1770 case 2: 1771 if (arm_pamax(cpu) < 44) { 1772 goto fail; 1773 } 1774 break; 1775 case 3: 1776 goto fail; 1777 } 1778 startlevel = 3 - sl0; 1779 break; 1780 default: 1781 g_assert_not_reached(); 1782 } 1783 } else { 1784 /* 1785 * Things are simpler for AArch32 EL2, with only 4k pages. 1786 * There is no separate S2InvalidSL function, but AArch32.S2Walk 1787 * begins with walkparms.sl0 in {'1x'}. 1788 */ 1789 assert(stride == 9); 1790 if (sl0 >= 2) { 1791 goto fail; 1792 } 1793 startlevel = 2 - sl0; 1794 } 1795 1796 /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */ 1797 levels = 3 - startlevel; 1798 granulebits = stride + 3; 1799 1800 s1_min_iasize = levels * stride + granulebits + 1; 1801 s1_max_iasize = s1_min_iasize + (stride - 1) + 4; 1802 1803 if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) { 1804 return startlevel; 1805 } 1806 1807 fail: 1808 return INT_MIN; 1809 } 1810 1811 static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds, 1812 ARMGranuleSize gran, int level) 1813 { 1814 /* 1815 * See pseudocode AArch46.BlockDescSupported(): block descriptors 1816 * are not valid at all levels, depending on the page size. 1817 */ 1818 switch (gran) { 1819 case Gran4K: 1820 return (level == 0 && ds) || level == 1 || level == 2; 1821 case Gran16K: 1822 return (level == 1 && ds) || level == 2; 1823 case Gran64K: 1824 return (level == 1 && arm_pamax(cpu) == 52) || level == 2; 1825 default: 1826 g_assert_not_reached(); 1827 } 1828 } 1829 1830 /** 1831 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format 1832 * 1833 * Returns false if the translation was successful. Otherwise, phys_ptr, 1834 * attrs, prot and page_size may not be filled in, and the populated fsr 1835 * value provides information on why the translation aborted, in the format 1836 * of a long-format DFSR/IFSR fault register, with the following caveat: 1837 * the WnR bit is never set (the caller must do this). 1838 * 1839 * @env: CPUARMState 1840 * @ptw: Current and next stage parameters for the walk. 1841 * @address: virtual address to get physical address for 1842 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH 1843 * @memop: memory operation feeding this access, or 0 for none 1844 * @result: set on translation success, 1845 * @fi: set to fault info if the translation fails 1846 */ 1847 static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, 1848 uint64_t address, 1849 MMUAccessType access_type, MemOp memop, 1850 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1851 { 1852 ARMCPU *cpu = env_archcpu(env); 1853 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 1854 int32_t level; 1855 ARMVAParameters param; 1856 uint64_t ttbr; 1857 hwaddr descaddr, indexmask, indexmask_grainsize; 1858 uint32_t tableattrs; 1859 uint64_t page_size; 1860 uint64_t attrs; 1861 int32_t stride; 1862 int addrsize, inputsize, outputsize; 1863 uint64_t tcr = regime_tcr(env, mmu_idx); 1864 int ap, prot; 1865 uint32_t el = regime_el(mmu_idx); 1866 uint64_t descaddrmask; 1867 bool aarch64 = arm_el_is_aa64(env, el); 1868 uint64_t descriptor, new_descriptor; 1869 ARMSecuritySpace out_space; 1870 bool device; 1871 1872 /* TODO: This code does not support shareability levels. */ 1873 if (aarch64) { 1874 int ps; 1875 1876 param = aa64_va_parameters(env, address, mmu_idx, 1877 access_type != MMU_INST_FETCH, 1878 !arm_el_is_aa64(env, 1)); 1879 level = 0; 1880 1881 /* 1882 * Cache NV1 before we adjust ptw->in_space for NSTable. 1883 * Note that this is only relevant for EL1&0, and that 1884 * computing it would assert for ARMSS_Root. 1885 */ 1886 if (el == 1) { 1887 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); 1888 ptw->in_nv1 = (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1); 1889 } 1890 1891 /* 1892 * If TxSZ is programmed to a value larger than the maximum, 1893 * or smaller than the effective minimum, it is IMPLEMENTATION 1894 * DEFINED whether we behave as if the field were programmed 1895 * within bounds, or if a level 0 Translation fault is generated. 1896 * 1897 * With FEAT_LVA, fault on less than minimum becomes required, 1898 * so our choice is to always raise the fault. 1899 */ 1900 if (param.tsz_oob) { 1901 goto do_translation_fault; 1902 } 1903 1904 addrsize = 64 - 8 * param.tbi; 1905 inputsize = 64 - param.tsz; 1906 1907 /* 1908 * Bound PS by PARANGE to find the effective output address size. 1909 * ID_AA64MMFR0 is a read-only register so values outside of the 1910 * supported mappings can be considered an implementation error. 1911 */ 1912 ps = FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE); 1913 ps = MIN(ps, param.ps); 1914 assert(ps < ARRAY_SIZE(pamax_map)); 1915 outputsize = pamax_map[ps]; 1916 1917 /* 1918 * With LPA2, the effective output address (OA) size is at most 48 bits 1919 * unless TCR.DS == 1 1920 */ 1921 if (!param.ds && param.gran != Gran64K) { 1922 outputsize = MIN(outputsize, 48); 1923 } 1924 } else { 1925 param = aa32_va_parameters(env, address, mmu_idx); 1926 level = 1; 1927 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 1928 inputsize = addrsize - param.tsz; 1929 outputsize = 40; 1930 } 1931 1932 /* 1933 * We determined the region when collecting the parameters, but we 1934 * have not yet validated that the address is valid for the region. 1935 * Extract the top bits and verify that they all match select. 1936 * 1937 * For aa32, if inputsize == addrsize, then we have selected the 1938 * region by exclusion in aa32_va_parameters and there is no more 1939 * validation to do here. 1940 */ 1941 if (inputsize < addrsize) { 1942 uint64_t top_bits = sextract64(address, inputsize, 1943 addrsize - inputsize); 1944 if (-top_bits != param.select) { 1945 /* The gap between the two regions is a Translation fault */ 1946 goto do_translation_fault; 1947 } 1948 } 1949 1950 stride = arm_granule_bits(param.gran) - 3; 1951 1952 /* 1953 * Note that QEMU ignores shareability and cacheability attributes, 1954 * so we don't need to do anything with the SH, ORGN, IRGN fields 1955 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 1956 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 1957 * implement any ASID-like capability so we can ignore it (instead 1958 * we will always flush the TLB any time the ASID is changed). 1959 */ 1960 ttbr = regime_ttbr(env, mmu_idx, param.select); 1961 1962 /* 1963 * Here we should have set up all the parameters for the translation: 1964 * inputsize, ttbr, epd, stride, tbi 1965 */ 1966 1967 if (param.epd) { 1968 /* 1969 * Translation table walk disabled => Translation fault on TLB miss 1970 * Note: This is always 0 on 64-bit EL2 and EL3. 1971 */ 1972 goto do_translation_fault; 1973 } 1974 1975 if (!regime_is_stage2(mmu_idx)) { 1976 /* 1977 * The starting level depends on the virtual address size (which can 1978 * be up to 48 bits) and the translation granule size. It indicates 1979 * the number of strides (stride bits at a time) needed to 1980 * consume the bits of the input address. In the pseudocode this is: 1981 * level = 4 - RoundUp((inputsize - grainsize) / stride) 1982 * where their 'inputsize' is our 'inputsize', 'grainsize' is 1983 * our 'stride + 3' and 'stride' is our 'stride'. 1984 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 1985 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 1986 * = 4 - (inputsize - 4) / stride; 1987 */ 1988 level = 4 - (inputsize - 4) / stride; 1989 } else { 1990 int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds, 1991 inputsize, stride); 1992 if (startlevel == INT_MIN) { 1993 level = 0; 1994 goto do_translation_fault; 1995 } 1996 level = startlevel; 1997 } 1998 1999 indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3); 2000 indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level))); 2001 2002 /* Now we can extract the actual base address from the TTBR */ 2003 descaddr = extract64(ttbr, 0, 48); 2004 2005 /* 2006 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR. 2007 * 2008 * Otherwise, if the base address is out of range, raise AddressSizeFault. 2009 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>), 2010 * but we've just cleared the bits above 47, so simplify the test. 2011 */ 2012 if (outputsize > 48) { 2013 descaddr |= extract64(ttbr, 2, 4) << 48; 2014 } else if (descaddr >> outputsize) { 2015 level = 0; 2016 fi->type = ARMFault_AddressSize; 2017 goto do_fault; 2018 } 2019 2020 /* 2021 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR 2022 * and also to mask out CnP (bit 0) which could validly be non-zero. 2023 */ 2024 descaddr &= ~indexmask; 2025 2026 /* 2027 * For AArch32, the address field in the descriptor goes up to bit 39 2028 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0 2029 * or an AddressSize fault is raised. So for v8 we extract those SBZ 2030 * bits as part of the address, which will be checked via outputsize. 2031 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2; 2032 * the highest bits of a 52-bit output are placed elsewhere. 2033 */ 2034 if (param.ds) { 2035 descaddrmask = MAKE_64BIT_MASK(0, 50); 2036 } else if (arm_feature(env, ARM_FEATURE_V8)) { 2037 descaddrmask = MAKE_64BIT_MASK(0, 48); 2038 } else { 2039 descaddrmask = MAKE_64BIT_MASK(0, 40); 2040 } 2041 descaddrmask &= ~indexmask_grainsize; 2042 tableattrs = 0; 2043 2044 next_level: 2045 descaddr |= (address >> (stride * (4 - level))) & indexmask; 2046 descaddr &= ~7ULL; 2047 2048 /* 2049 * Process the NSTable bit from the previous level. This changes 2050 * the table address space and the output space from Secure to 2051 * NonSecure. With RME, the EL3 translation regime does not change 2052 * from Root to NonSecure. 2053 */ 2054 if (ptw->cur_space == ARMSS_Secure 2055 && !regime_is_stage2(mmu_idx) 2056 && extract32(tableattrs, 4, 1)) { 2057 /* 2058 * Stage2_S -> Stage2 or Phys_S -> Phys_NS 2059 * Assert the relative order of the secure/non-secure indexes. 2060 */ 2061 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS); 2062 QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2); 2063 ptw->in_ptw_idx += 1; 2064 ptw->cur_space = ARMSS_NonSecure; 2065 } 2066 2067 if (!S1_ptw_translate(env, ptw, descaddr, fi)) { 2068 goto do_fault; 2069 } 2070 descriptor = arm_ldq_ptw(env, ptw, fi); 2071 if (fi->type != ARMFault_None) { 2072 goto do_fault; 2073 } 2074 new_descriptor = descriptor; 2075 2076 restart_atomic_update: 2077 if (!(descriptor & 1) || 2078 (!(descriptor & 2) && 2079 !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) { 2080 /* Invalid, or a block descriptor at an invalid level */ 2081 goto do_translation_fault; 2082 } 2083 2084 descaddr = descriptor & descaddrmask; 2085 2086 /* 2087 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12] 2088 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of 2089 * descaddr are in [9:8]. Otherwise, if descaddr is out of range, 2090 * raise AddressSizeFault. 2091 */ 2092 if (outputsize > 48) { 2093 if (param.ds) { 2094 descaddr |= extract64(descriptor, 8, 2) << 50; 2095 } else { 2096 descaddr |= extract64(descriptor, 12, 4) << 48; 2097 } 2098 } else if (descaddr >> outputsize) { 2099 fi->type = ARMFault_AddressSize; 2100 goto do_fault; 2101 } 2102 2103 if ((descriptor & 2) && (level < 3)) { 2104 /* 2105 * Table entry. The top five bits are attributes which may 2106 * propagate down through lower levels of the table (and 2107 * which are all arranged so that 0 means "no effect", so 2108 * we can gather them up by ORing in the bits at each level). 2109 */ 2110 tableattrs |= extract64(descriptor, 59, 5); 2111 level++; 2112 indexmask = indexmask_grainsize; 2113 goto next_level; 2114 } 2115 2116 /* 2117 * Block entry at level 1 or 2, or page entry at level 3. 2118 * These are basically the same thing, although the number 2119 * of bits we pull in from the vaddr varies. Note that although 2120 * descaddrmask masks enough of the low bits of the descriptor 2121 * to give a correct page or table address, the address field 2122 * in a block descriptor is smaller; so we need to explicitly 2123 * clear the lower bits here before ORing in the low vaddr bits. 2124 * 2125 * Afterward, descaddr is the final physical address. 2126 */ 2127 page_size = (1ULL << ((stride * (4 - level)) + 3)); 2128 descaddr &= ~(hwaddr)(page_size - 1); 2129 descaddr |= (address & (page_size - 1)); 2130 2131 /* 2132 * For AccessType_AT, DB is not updated (AArch64.SetDirtyFlag), 2133 * and it is IMPLEMENTATION DEFINED whether AF is updated 2134 * (AArch64.SetAccessFlag; qemu chooses to not update). 2135 */ 2136 if (likely(!ptw->in_at)) { 2137 /* 2138 * Access flag. 2139 * If HA is enabled, prepare to update the descriptor below. 2140 * Otherwise, pass the access fault on to software. 2141 */ 2142 if (!(descriptor & (1 << 10))) { 2143 if (param.ha) { 2144 new_descriptor |= 1 << 10; /* AF */ 2145 } else { 2146 fi->type = ARMFault_AccessFlag; 2147 goto do_fault; 2148 } 2149 } 2150 2151 /* 2152 * Dirty Bit. 2153 * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP 2154 * bit for writeback. The actual write protection test may still be 2155 * overridden by tableattrs, to be merged below. 2156 */ 2157 if (param.hd 2158 && extract64(descriptor, 51, 1) /* DBM */ 2159 && access_type == MMU_DATA_STORE) { 2160 if (regime_is_stage2(mmu_idx)) { 2161 new_descriptor |= 1ull << 7; /* set S2AP[1] */ 2162 } else { 2163 new_descriptor &= ~(1ull << 7); /* clear AP[2] */ 2164 } 2165 } 2166 } 2167 2168 /* 2169 * Extract attributes from the (modified) descriptor, and apply 2170 * table descriptors. Stage 2 table descriptors do not include 2171 * any attribute fields. HPD disables all the table attributes 2172 * except NSTable (which we have already handled). 2173 */ 2174 attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14)); 2175 if (!param.hpd) { 2176 attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */ 2177 /* 2178 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 2179 * means "force PL1 access only", which means forcing AP[1] to 0. 2180 */ 2181 attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */ 2182 attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */ 2183 } 2184 2185 ap = extract32(attrs, 6, 2); 2186 out_space = ptw->cur_space; 2187 if (regime_is_stage2(mmu_idx)) { 2188 if (param.pie) { 2189 int pi = extract64(attrs, 6, 1) 2190 | (extract64(attrs, 51, 1) << 1) 2191 | (extract64(attrs, 53, 2) << 2); 2192 int po = extract64(attrs, 60, 3); 2193 prot = get_S2prot_indirect(env, result, pi, po, ptw->in_s1_is_el0); 2194 } else { 2195 int xn = extract64(attrs, 53, 2); 2196 prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); 2197 /* Install TTW permissions in f.prot. */ 2198 result->f.prot = prot & (PAGE_READ | PAGE_WRITE); 2199 } 2200 /* 2201 * R_GYNXY: For stage2 in Realm security state, bit 55 is NS. 2202 * The bit remains ignored for other security states. 2203 * R_YMCSL: Executing an insn fetched from non-Realm causes 2204 * a stage2 permission fault. 2205 */ 2206 if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) { 2207 out_space = ARMSS_NonSecure; 2208 prot &= ~PAGE_EXEC; 2209 } 2210 result->s2prot = prot; 2211 2212 result->cacheattrs.is_s2_format = true; 2213 result->cacheattrs.attrs = extract32(attrs, 2, 4); 2214 /* 2215 * Security state does not really affect HCR_EL2.FWB; 2216 * we only need to filter FWB for aa32 or other FEAT. 2217 */ 2218 device = S2_attrs_are_device(arm_hcr_el2_eff(env), 2219 result->cacheattrs.attrs); 2220 } else { 2221 int nse, ns = extract32(attrs, 5, 1); 2222 uint8_t attrindx; 2223 uint64_t mair; 2224 2225 switch (out_space) { 2226 case ARMSS_Root: 2227 /* 2228 * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime. 2229 * R_XTYPW: NSE and NS together select the output pa space. 2230 */ 2231 nse = extract32(attrs, 11, 1); 2232 out_space = (nse << 1) | ns; 2233 if (out_space == ARMSS_Secure && 2234 !cpu_isar_feature(aa64_sel2, cpu)) { 2235 out_space = ARMSS_NonSecure; 2236 } 2237 break; 2238 case ARMSS_Secure: 2239 if (ns) { 2240 out_space = ARMSS_NonSecure; 2241 } 2242 break; 2243 case ARMSS_Realm: 2244 switch (mmu_idx) { 2245 case ARMMMUIdx_Stage1_E0: 2246 case ARMMMUIdx_Stage1_E1: 2247 case ARMMMUIdx_Stage1_E1_PAN: 2248 /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */ 2249 break; 2250 case ARMMMUIdx_E2: 2251 case ARMMMUIdx_E20_0: 2252 case ARMMMUIdx_E20_2: 2253 case ARMMMUIdx_E20_2_PAN: 2254 /* 2255 * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1, 2256 * NS changes the output to non-secure space. 2257 */ 2258 if (ns) { 2259 out_space = ARMSS_NonSecure; 2260 } 2261 break; 2262 default: 2263 g_assert_not_reached(); 2264 } 2265 break; 2266 case ARMSS_NonSecure: 2267 /* R_QRMFF: For NonSecure state, the NS bit is RES0. */ 2268 break; 2269 default: 2270 g_assert_not_reached(); 2271 } 2272 2273 if (param.pie) { 2274 int pi = extract64(attrs, 6, 1) 2275 | (extract64(attrs, 51, 1) << 1) 2276 | (extract64(attrs, 53, 2) << 2); 2277 int po = extract64(attrs, 60, 3); 2278 /* 2279 * Note that we modified ptw->in_space earlier for NSTable, but 2280 * result->f.attrs retains a copy of the original security space. 2281 */ 2282 prot = get_S1prot_indirect(env, ptw, mmu_idx, pi, po, 2283 result->f.attrs.space, out_space); 2284 } else if (regime_is_gcs(mmu_idx)) { 2285 /* 2286 * While one must use indirect permissions to successfully 2287 * use GCS instructions, AArch64.S1DirectBasePermissions 2288 * faithfully supplies s1perms.gcs = 0, Just In Case. 2289 */ 2290 prot = 0; 2291 } else { 2292 int xn = extract64(attrs, 54, 1); 2293 int pxn = extract64(attrs, 53, 1); 2294 int user_rw, prot_rw; 2295 2296 if (el == 1 && ptw->in_nv1) { 2297 /* 2298 * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, 2299 * the block/page descriptor bit 54 holds PXN, 2300 * 53 is RES0, and the effective value of UXN is 0. 2301 * Similarly for bits 59 and 60 in table descriptors 2302 * (which we have already folded into bits 53 and 54 of attrs). 2303 * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0. 2304 * Similarly, APTable[0] from the table descriptor is treated 2305 * as 0; we already folded this into AP[1] and squashing 2306 * that to 0 does the right thing. 2307 */ 2308 pxn = xn; 2309 xn = 0; 2310 ap &= ~1; 2311 } 2312 2313 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 2314 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 2315 prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw, 2316 xn, pxn, ptw->in_space, out_space); 2317 } 2318 result->f.prot = prot; 2319 2320 /* Index into MAIR registers for cache attributes */ 2321 attrindx = extract32(attrs, 2, 3); 2322 mair = (param.aie && extract64(attrs, 59, 1) 2323 ? env->cp15.mair2_el[el] 2324 : env->cp15.mair_el[el]); 2325 result->cacheattrs.is_s2_format = false; 2326 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); 2327 2328 /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ 2329 if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { 2330 result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ 2331 } 2332 device = S1_attrs_are_device(result->cacheattrs.attrs); 2333 } 2334 2335 /* 2336 * Enable alignment checks on Device memory. 2337 * 2338 * Per R_XCHFJ, the correct ordering for alignment, permission, 2339 * and stage 2 faults is: 2340 * - Alignment fault caused by the memory type 2341 * - Permission fault 2342 * - A stage 2 fault on the memory access 2343 * Perform the alignment check now, so that we recognize it in 2344 * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent 2345 * softmmu tlb hit will also check the alignment; clear along the 2346 * non-device path so that tlb_fill_flags is consistent in the 2347 * event of restart_atomic_update. 2348 * 2349 * In v7, for a CPU without the Virtualization Extensions this 2350 * access is UNPREDICTABLE; we choose to make it take the alignment 2351 * fault as is required for a v7VE CPU. (QEMU doesn't emulate any 2352 * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) 2353 */ 2354 if (device) { 2355 unsigned a_bits = memop_tlb_alignment_bits(memop, true); 2356 if (address & ((1 << a_bits) - 1)) { 2357 fi->type = ARMFault_Alignment; 2358 goto do_fault; 2359 } 2360 result->f.tlb_fill_flags = TLB_CHECK_ALIGNED; 2361 } else { 2362 result->f.tlb_fill_flags = 0; 2363 } 2364 2365 if (ptw->in_prot_check & ~prot) { 2366 fi->type = ARMFault_Permission; 2367 goto do_fault; 2368 } 2369 2370 /* S1PIE and S2PIE both have a bit for software dirty page tracking. */ 2371 if (access_type == MMU_DATA_STORE && param.pie) { 2372 /* 2373 * For S1PIE, bit 7 is nDirty and both HA and HD are checked. 2374 * For S2PIE, bit 7 is Dirty and only HD is checked. 2375 */ 2376 bool bit7 = extract64(attrs, 7, 1); 2377 if (regime_is_stage2(mmu_idx) 2378 ? !bit7 && !param.hd 2379 : bit7 && !(param.ha && param.hd)) { 2380 fi->type = ARMFault_Permission; 2381 fi->dirtybit = true; 2382 goto do_fault; 2383 } 2384 } 2385 2386 /* If FEAT_HAFDBS has made changes, update the PTE. */ 2387 if (new_descriptor != descriptor) { 2388 new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi); 2389 if (fi->type != ARMFault_None) { 2390 goto do_fault; 2391 } 2392 /* 2393 * I_YZSVV says that if the in-memory descriptor has changed, 2394 * then we must use the information in that new value 2395 * (which might include a different output address, different 2396 * attributes, or generate a fault). 2397 * Restart the handling of the descriptor value from scratch. 2398 */ 2399 if (new_descriptor != descriptor) { 2400 descriptor = new_descriptor; 2401 goto restart_atomic_update; 2402 } 2403 } 2404 2405 result->f.attrs.space = out_space; 2406 result->f.attrs.secure = arm_space_is_secure(out_space); 2407 2408 /* 2409 * For FEAT_LPA2 and effective DS, the SH field in the attributes 2410 * was re-purposed for output address bits. The SH attribute in 2411 * that case comes from TCR_ELx, which we extracted earlier. 2412 */ 2413 if (param.ds) { 2414 result->cacheattrs.shareability = param.sh; 2415 } else { 2416 result->cacheattrs.shareability = extract32(attrs, 8, 2); 2417 } 2418 2419 result->f.phys_addr = descaddr; 2420 result->f.lg_page_size = ctz64(page_size); 2421 return false; 2422 2423 do_translation_fault: 2424 fi->type = ARMFault_Translation; 2425 do_fault: 2426 if (fi->s1ptw) { 2427 /* Retain the existing stage 2 fi->level */ 2428 assert(fi->stage2); 2429 } else { 2430 fi->level = level; 2431 fi->stage2 = regime_is_stage2(mmu_idx); 2432 } 2433 fi->s1ns = fault_s1ns(ptw->cur_space, mmu_idx); 2434 return true; 2435 } 2436 2437 static bool get_phys_addr_pmsav5(CPUARMState *env, 2438 S1Translate *ptw, 2439 uint32_t address, 2440 MMUAccessType access_type, 2441 GetPhysAddrResult *result, 2442 ARMMMUFaultInfo *fi) 2443 { 2444 int n; 2445 uint32_t mask; 2446 uint32_t base; 2447 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 2448 bool is_user = regime_is_user(mmu_idx); 2449 2450 if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { 2451 /* MPU disabled. */ 2452 result->f.phys_addr = address; 2453 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 2454 return false; 2455 } 2456 2457 result->f.phys_addr = address; 2458 for (n = 7; n >= 0; n--) { 2459 base = env->cp15.c6_region[n]; 2460 if ((base & 1) == 0) { 2461 continue; 2462 } 2463 mask = 1 << ((base >> 1) & 0x1f); 2464 /* Keep this shift separate from the above to avoid an 2465 (undefined) << 32. */ 2466 mask = (mask << 1) - 1; 2467 if (((base ^ address) & ~mask) == 0) { 2468 break; 2469 } 2470 } 2471 if (n < 0) { 2472 fi->type = ARMFault_Background; 2473 return true; 2474 } 2475 2476 if (access_type == MMU_INST_FETCH) { 2477 mask = env->cp15.pmsav5_insn_ap; 2478 } else { 2479 mask = env->cp15.pmsav5_data_ap; 2480 } 2481 mask = (mask >> (n * 4)) & 0xf; 2482 switch (mask) { 2483 case 0: 2484 fi->type = ARMFault_Permission; 2485 fi->level = 1; 2486 return true; 2487 case 1: 2488 if (is_user) { 2489 fi->type = ARMFault_Permission; 2490 fi->level = 1; 2491 return true; 2492 } 2493 result->f.prot = PAGE_READ | PAGE_WRITE; 2494 break; 2495 case 2: 2496 result->f.prot = PAGE_READ; 2497 if (!is_user) { 2498 result->f.prot |= PAGE_WRITE; 2499 } 2500 break; 2501 case 3: 2502 result->f.prot = PAGE_READ | PAGE_WRITE; 2503 break; 2504 case 5: 2505 if (is_user) { 2506 fi->type = ARMFault_Permission; 2507 fi->level = 1; 2508 return true; 2509 } 2510 result->f.prot = PAGE_READ; 2511 break; 2512 case 6: 2513 result->f.prot = PAGE_READ; 2514 break; 2515 default: 2516 /* Bad permission. */ 2517 fi->type = ARMFault_Permission; 2518 fi->level = 1; 2519 return true; 2520 } 2521 result->f.prot |= PAGE_EXEC; 2522 return false; 2523 } 2524 2525 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx, 2526 int32_t address, uint8_t *prot) 2527 { 2528 if (!arm_feature(env, ARM_FEATURE_M)) { 2529 *prot = PAGE_READ | PAGE_WRITE; 2530 switch (address) { 2531 case 0xF0000000 ... 0xFFFFFFFF: 2532 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 2533 /* hivecs execing is ok */ 2534 *prot |= PAGE_EXEC; 2535 } 2536 break; 2537 case 0x00000000 ... 0x7FFFFFFF: 2538 *prot |= PAGE_EXEC; 2539 break; 2540 } 2541 } else { 2542 /* Default system address map for M profile cores. 2543 * The architecture specifies which regions are execute-never; 2544 * at the MPU level no other checks are defined. 2545 */ 2546 switch (address) { 2547 case 0x00000000 ... 0x1fffffff: /* ROM */ 2548 case 0x20000000 ... 0x3fffffff: /* SRAM */ 2549 case 0x60000000 ... 0x7fffffff: /* RAM */ 2550 case 0x80000000 ... 0x9fffffff: /* RAM */ 2551 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 2552 break; 2553 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 2554 case 0xa0000000 ... 0xbfffffff: /* Device */ 2555 case 0xc0000000 ... 0xdfffffff: /* Device */ 2556 case 0xe0000000 ... 0xffffffff: /* System */ 2557 *prot = PAGE_READ | PAGE_WRITE; 2558 break; 2559 default: 2560 g_assert_not_reached(); 2561 } 2562 } 2563 } 2564 2565 static bool m_is_ppb_region(CPUARMState *env, uint32_t address) 2566 { 2567 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 2568 return arm_feature(env, ARM_FEATURE_M) && 2569 extract32(address, 20, 12) == 0xe00; 2570 } 2571 2572 static bool m_is_system_region(CPUARMState *env, uint32_t address) 2573 { 2574 /* 2575 * True if address is in the M profile system region 2576 * 0xe0000000 - 0xffffffff 2577 */ 2578 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 2579 } 2580 2581 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx, 2582 bool is_secure, bool is_user) 2583 { 2584 /* 2585 * Return true if we should use the default memory map as a 2586 * "background" region if there are no hits against any MPU regions. 2587 */ 2588 CPUARMState *env = &cpu->env; 2589 2590 if (is_user) { 2591 return false; 2592 } 2593 2594 if (arm_feature(env, ARM_FEATURE_M)) { 2595 return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 2596 } 2597 2598 if (mmu_idx == ARMMMUIdx_Stage2) { 2599 return false; 2600 } 2601 2602 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 2603 } 2604 2605 static bool get_phys_addr_pmsav7(CPUARMState *env, 2606 S1Translate *ptw, 2607 uint32_t address, 2608 MMUAccessType access_type, 2609 GetPhysAddrResult *result, 2610 ARMMMUFaultInfo *fi) 2611 { 2612 ARMCPU *cpu = env_archcpu(env); 2613 int n; 2614 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 2615 bool is_user = regime_is_user(mmu_idx); 2616 bool secure = arm_space_is_secure(ptw->in_space); 2617 2618 result->f.phys_addr = address; 2619 result->f.lg_page_size = TARGET_PAGE_BITS; 2620 result->f.prot = 0; 2621 2622 if (regime_translation_disabled(env, mmu_idx, ptw->in_space) || 2623 m_is_ppb_region(env, address)) { 2624 /* 2625 * MPU disabled or M profile PPB access: use default memory map. 2626 * The other case which uses the default memory map in the 2627 * v7M ARM ARM pseudocode is exception vector reads from the vector 2628 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 2629 * which always does a direct read using address_space_ldl(), rather 2630 * than going via this function, so we don't need to check that here. 2631 */ 2632 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); 2633 } else { /* MPU enabled */ 2634 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 2635 /* region search */ 2636 uint32_t base = env->pmsav7.drbar[n]; 2637 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 2638 uint32_t rmask; 2639 bool srdis = false; 2640 2641 if (!(env->pmsav7.drsr[n] & 0x1)) { 2642 continue; 2643 } 2644 2645 if (!rsize) { 2646 qemu_log_mask(LOG_GUEST_ERROR, 2647 "DRSR[%d]: Rsize field cannot be 0\n", n); 2648 continue; 2649 } 2650 rsize++; 2651 rmask = (1ull << rsize) - 1; 2652 2653 if (base & rmask) { 2654 qemu_log_mask(LOG_GUEST_ERROR, 2655 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 2656 "to DRSR region size, mask = 0x%" PRIx32 "\n", 2657 n, base, rmask); 2658 continue; 2659 } 2660 2661 if (address < base || address > base + rmask) { 2662 /* 2663 * Address not in this region. We must check whether the 2664 * region covers addresses in the same page as our address. 2665 * In that case we must not report a size that covers the 2666 * whole page for a subsequent hit against a different MPU 2667 * region or the background region, because it would result in 2668 * incorrect TLB hits for subsequent accesses to addresses that 2669 * are in this MPU region. 2670 */ 2671 if (ranges_overlap(base, rmask, 2672 address & TARGET_PAGE_MASK, 2673 TARGET_PAGE_SIZE)) { 2674 result->f.lg_page_size = 0; 2675 } 2676 continue; 2677 } 2678 2679 /* Region matched */ 2680 2681 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 2682 int i, snd; 2683 uint32_t srdis_mask; 2684 2685 rsize -= 3; /* sub region size (power of 2) */ 2686 snd = ((address - base) >> rsize) & 0x7; 2687 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 2688 2689 srdis_mask = srdis ? 0x3 : 0x0; 2690 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 2691 /* 2692 * This will check in groups of 2, 4 and then 8, whether 2693 * the subregion bits are consistent. rsize is incremented 2694 * back up to give the region size, considering consistent 2695 * adjacent subregions as one region. Stop testing if rsize 2696 * is already big enough for an entire QEMU page. 2697 */ 2698 int snd_rounded = snd & ~(i - 1); 2699 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 2700 snd_rounded + 8, i); 2701 if (srdis_mask ^ srdis_multi) { 2702 break; 2703 } 2704 srdis_mask = (srdis_mask << i) | srdis_mask; 2705 rsize++; 2706 } 2707 } 2708 if (srdis) { 2709 continue; 2710 } 2711 if (rsize < TARGET_PAGE_BITS) { 2712 result->f.lg_page_size = rsize; 2713 } 2714 break; 2715 } 2716 2717 if (n == -1) { /* no hits */ 2718 if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { 2719 /* background fault */ 2720 fi->type = ARMFault_Background; 2721 return true; 2722 } 2723 get_phys_addr_pmsav7_default(env, mmu_idx, address, 2724 &result->f.prot); 2725 } else { /* a MPU hit! */ 2726 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 2727 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 2728 2729 if (m_is_system_region(env, address)) { 2730 /* System space is always execute never */ 2731 xn = 1; 2732 } 2733 2734 if (is_user) { /* User mode AP bit decoding */ 2735 switch (ap) { 2736 case 0: 2737 case 1: 2738 case 5: 2739 break; /* no access */ 2740 case 3: 2741 result->f.prot |= PAGE_WRITE; 2742 /* fall through */ 2743 case 2: 2744 case 6: 2745 result->f.prot |= PAGE_READ | PAGE_EXEC; 2746 break; 2747 case 7: 2748 /* for v7M, same as 6; for R profile a reserved value */ 2749 if (arm_feature(env, ARM_FEATURE_M)) { 2750 result->f.prot |= PAGE_READ | PAGE_EXEC; 2751 break; 2752 } 2753 /* fall through */ 2754 default: 2755 qemu_log_mask(LOG_GUEST_ERROR, 2756 "DRACR[%d]: Bad value for AP bits: 0x%" 2757 PRIx32 "\n", n, ap); 2758 } 2759 } else { /* Priv. mode AP bits decoding */ 2760 switch (ap) { 2761 case 0: 2762 break; /* no access */ 2763 case 1: 2764 case 2: 2765 case 3: 2766 result->f.prot |= PAGE_WRITE; 2767 /* fall through */ 2768 case 5: 2769 case 6: 2770 result->f.prot |= PAGE_READ | PAGE_EXEC; 2771 break; 2772 case 7: 2773 /* for v7M, same as 6; for R profile a reserved value */ 2774 if (arm_feature(env, ARM_FEATURE_M)) { 2775 result->f.prot |= PAGE_READ | PAGE_EXEC; 2776 break; 2777 } 2778 /* fall through */ 2779 default: 2780 qemu_log_mask(LOG_GUEST_ERROR, 2781 "DRACR[%d]: Bad value for AP bits: 0x%" 2782 PRIx32 "\n", n, ap); 2783 } 2784 } 2785 2786 /* execute never */ 2787 if (xn) { 2788 result->f.prot &= ~PAGE_EXEC; 2789 } 2790 } 2791 } 2792 2793 fi->type = ARMFault_Permission; 2794 fi->level = 1; 2795 return (ptw->in_prot_check & ~result->f.prot) != 0; 2796 } 2797 2798 static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx, 2799 uint32_t secure) 2800 { 2801 if (regime_el(mmu_idx) == 2) { 2802 return env->pmsav8.hprbar; 2803 } else { 2804 return env->pmsav8.rbar[secure]; 2805 } 2806 } 2807 2808 static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx, 2809 uint32_t secure) 2810 { 2811 if (regime_el(mmu_idx) == 2) { 2812 return env->pmsav8.hprlar; 2813 } else { 2814 return env->pmsav8.rlar[secure]; 2815 } 2816 } 2817 2818 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 2819 MMUAccessType access_type, unsigned prot_check, 2820 ARMMMUIdx mmu_idx, bool secure, 2821 GetPhysAddrResult *result, 2822 ARMMMUFaultInfo *fi, uint32_t *mregion) 2823 { 2824 /* 2825 * Perform a PMSAv8 MPU lookup (without also doing the SAU check 2826 * that a full phys-to-virt translation does). 2827 * mregion is (if not NULL) set to the region number which matched, 2828 * or -1 if no region number is returned (MPU off, address did not 2829 * hit a region, address hit in multiple regions). 2830 * If the region hit doesn't cover the entire TARGET_PAGE the address 2831 * is within, then we set the result page_size to 1 to force the 2832 * memory system to use a subpage. 2833 */ 2834 ARMCPU *cpu = env_archcpu(env); 2835 bool is_user = regime_is_user(mmu_idx); 2836 int n; 2837 int matchregion = -1; 2838 bool hit = false; 2839 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 2840 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 2841 int region_counter; 2842 2843 if (regime_el(mmu_idx) == 2) { 2844 region_counter = cpu->pmsav8r_hdregion; 2845 } else { 2846 region_counter = cpu->pmsav7_dregion; 2847 } 2848 2849 result->f.lg_page_size = TARGET_PAGE_BITS; 2850 result->f.phys_addr = address; 2851 result->f.prot = 0; 2852 if (mregion) { 2853 *mregion = -1; 2854 } 2855 2856 if (mmu_idx == ARMMMUIdx_Stage2) { 2857 fi->stage2 = true; 2858 } 2859 2860 /* 2861 * Unlike the ARM ARM pseudocode, we don't need to check whether this 2862 * was an exception vector read from the vector table (which is always 2863 * done using the default system address map), because those accesses 2864 * are done in arm_v7m_load_vector(), which always does a direct 2865 * read using address_space_ldl(), rather than going via this function. 2866 */ 2867 if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) { 2868 /* MPU disabled */ 2869 hit = true; 2870 } else if (m_is_ppb_region(env, address)) { 2871 hit = true; 2872 } else { 2873 if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { 2874 hit = true; 2875 } 2876 2877 uint32_t bitmask; 2878 if (arm_feature(env, ARM_FEATURE_M)) { 2879 bitmask = 0x1f; 2880 } else { 2881 bitmask = 0x3f; 2882 fi->level = 0; 2883 } 2884 2885 for (n = region_counter - 1; n >= 0; n--) { 2886 /* region search */ 2887 /* 2888 * Note that the base address is bits [31:x] from the register 2889 * with bits [x-1:0] all zeroes, but the limit address is bits 2890 * [31:x] from the register with bits [x:0] all ones. Where x is 2891 * 5 for Cortex-M and 6 for Cortex-R 2892 */ 2893 uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask; 2894 uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask; 2895 2896 if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) { 2897 /* Region disabled */ 2898 continue; 2899 } 2900 2901 if (address < base || address > limit) { 2902 /* 2903 * Address not in this region. We must check whether the 2904 * region covers addresses in the same page as our address. 2905 * In that case we must not report a size that covers the 2906 * whole page for a subsequent hit against a different MPU 2907 * region or the background region, because it would result in 2908 * incorrect TLB hits for subsequent accesses to addresses that 2909 * are in this MPU region. 2910 */ 2911 if (limit >= base && 2912 ranges_overlap(base, limit - base + 1, 2913 addr_page_base, 2914 TARGET_PAGE_SIZE)) { 2915 result->f.lg_page_size = 0; 2916 } 2917 continue; 2918 } 2919 2920 if (base > addr_page_base || limit < addr_page_limit) { 2921 result->f.lg_page_size = 0; 2922 } 2923 2924 if (matchregion != -1) { 2925 /* 2926 * Multiple regions match -- always a failure (unlike 2927 * PMSAv7 where highest-numbered-region wins) 2928 */ 2929 fi->type = ARMFault_Permission; 2930 if (arm_feature(env, ARM_FEATURE_M)) { 2931 fi->level = 1; 2932 } 2933 return true; 2934 } 2935 2936 matchregion = n; 2937 hit = true; 2938 } 2939 } 2940 2941 if (!hit) { 2942 if (arm_feature(env, ARM_FEATURE_M)) { 2943 fi->type = ARMFault_Background; 2944 } else { 2945 fi->type = ARMFault_Permission; 2946 } 2947 return true; 2948 } 2949 2950 if (matchregion == -1) { 2951 /* hit using the background region */ 2952 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); 2953 } else { 2954 uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion]; 2955 uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion]; 2956 uint32_t ap = extract32(matched_rbar, 1, 2); 2957 uint32_t xn = extract32(matched_rbar, 0, 1); 2958 bool pxn = false; 2959 2960 if (arm_feature(env, ARM_FEATURE_V8_1M)) { 2961 pxn = extract32(matched_rlar, 4, 1); 2962 } 2963 2964 if (m_is_system_region(env, address)) { 2965 /* System space is always execute never */ 2966 xn = 1; 2967 } 2968 2969 if (regime_el(mmu_idx) == 2) { 2970 result->f.prot = simple_ap_to_rw_prot_is_user(ap, 2971 mmu_idx != ARMMMUIdx_E2); 2972 } else { 2973 result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 2974 } 2975 2976 if (!arm_feature(env, ARM_FEATURE_M)) { 2977 uint8_t attrindx = extract32(matched_rlar, 1, 3); 2978 uint64_t mair = env->cp15.mair_el[regime_el(mmu_idx)]; 2979 uint8_t sh = extract32(matched_rlar, 3, 2); 2980 2981 if (regime_sctlr(env, mmu_idx) & SCTLR_WXN && 2982 result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) { 2983 xn = 0x1; 2984 } 2985 2986 if ((regime_el(mmu_idx) == 1) && 2987 regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) { 2988 pxn = 0x1; 2989 } 2990 2991 result->cacheattrs.is_s2_format = false; 2992 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); 2993 result->cacheattrs.shareability = sh; 2994 } 2995 2996 if (result->f.prot && !xn && !(pxn && !is_user)) { 2997 result->f.prot |= PAGE_EXEC; 2998 } 2999 3000 if (mregion) { 3001 *mregion = matchregion; 3002 } 3003 } 3004 3005 fi->type = ARMFault_Permission; 3006 if (arm_feature(env, ARM_FEATURE_M)) { 3007 fi->level = 1; 3008 } 3009 return (prot_check & ~result->f.prot) != 0; 3010 } 3011 3012 static bool v8m_is_sau_exempt(CPUARMState *env, 3013 uint32_t address, MMUAccessType access_type) 3014 { 3015 /* 3016 * The architecture specifies that certain address ranges are 3017 * exempt from v8M SAU/IDAU checks. 3018 */ 3019 return 3020 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 3021 (address >= 0xe0000000 && address <= 0xe0002fff) || 3022 (address >= 0xe000e000 && address <= 0xe000efff) || 3023 (address >= 0xe002e000 && address <= 0xe002efff) || 3024 (address >= 0xe0040000 && address <= 0xe0041fff) || 3025 (address >= 0xe00ff000 && address <= 0xe00fffff); 3026 } 3027 3028 void v8m_security_lookup(CPUARMState *env, uint32_t address, 3029 MMUAccessType access_type, ARMMMUIdx mmu_idx, 3030 bool is_secure, V8M_SAttributes *sattrs) 3031 { 3032 /* 3033 * Look up the security attributes for this address. Compare the 3034 * pseudocode SecurityCheck() function. 3035 * We assume the caller has zero-initialized *sattrs. 3036 */ 3037 ARMCPU *cpu = env_archcpu(env); 3038 int r; 3039 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 3040 int idau_region = IREGION_NOTVALID; 3041 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 3042 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 3043 3044 if (cpu->idau) { 3045 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 3046 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 3047 3048 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 3049 &idau_nsc); 3050 } 3051 3052 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 3053 /* 0xf0000000..0xffffffff is always S for insn fetches */ 3054 return; 3055 } 3056 3057 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 3058 sattrs->ns = !is_secure; 3059 return; 3060 } 3061 3062 if (idau_region != IREGION_NOTVALID) { 3063 sattrs->irvalid = true; 3064 sattrs->iregion = idau_region; 3065 } 3066 3067 switch (env->sau.ctrl & 3) { 3068 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 3069 break; 3070 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 3071 sattrs->ns = true; 3072 break; 3073 default: /* SAU.ENABLE == 1 */ 3074 for (r = 0; r < cpu->sau_sregion; r++) { 3075 if (env->sau.rlar[r] & 1) { 3076 uint32_t base = env->sau.rbar[r] & ~0x1f; 3077 uint32_t limit = env->sau.rlar[r] | 0x1f; 3078 3079 if (base <= address && limit >= address) { 3080 if (base > addr_page_base || limit < addr_page_limit) { 3081 sattrs->subpage = true; 3082 } 3083 if (sattrs->srvalid) { 3084 /* 3085 * If we hit in more than one region then we must report 3086 * as Secure, not NS-Callable, with no valid region 3087 * number info. 3088 */ 3089 sattrs->ns = false; 3090 sattrs->nsc = false; 3091 sattrs->sregion = 0; 3092 sattrs->srvalid = false; 3093 break; 3094 } else { 3095 if (env->sau.rlar[r] & 2) { 3096 sattrs->nsc = true; 3097 } else { 3098 sattrs->ns = true; 3099 } 3100 sattrs->srvalid = true; 3101 sattrs->sregion = r; 3102 } 3103 } else { 3104 /* 3105 * Address not in this region. We must check whether the 3106 * region covers addresses in the same page as our address. 3107 * In that case we must not report a size that covers the 3108 * whole page for a subsequent hit against a different MPU 3109 * region or the background region, because it would result 3110 * in incorrect TLB hits for subsequent accesses to 3111 * addresses that are in this MPU region. 3112 */ 3113 if (limit >= base && 3114 ranges_overlap(base, limit - base + 1, 3115 addr_page_base, 3116 TARGET_PAGE_SIZE)) { 3117 sattrs->subpage = true; 3118 } 3119 } 3120 } 3121 } 3122 break; 3123 } 3124 3125 /* 3126 * The IDAU will override the SAU lookup results if it specifies 3127 * higher security than the SAU does. 3128 */ 3129 if (!idau_ns) { 3130 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 3131 sattrs->ns = false; 3132 sattrs->nsc = idau_nsc; 3133 } 3134 } 3135 } 3136 3137 static bool get_phys_addr_pmsav8(CPUARMState *env, 3138 S1Translate *ptw, 3139 uint32_t address, 3140 MMUAccessType access_type, 3141 GetPhysAddrResult *result, 3142 ARMMMUFaultInfo *fi) 3143 { 3144 V8M_SAttributes sattrs = {}; 3145 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 3146 bool secure = arm_space_is_secure(ptw->in_space); 3147 bool ret; 3148 3149 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 3150 v8m_security_lookup(env, address, access_type, mmu_idx, 3151 secure, &sattrs); 3152 if (access_type == MMU_INST_FETCH) { 3153 /* 3154 * Instruction fetches always use the MMU bank and the 3155 * transaction attribute determined by the fetch address, 3156 * regardless of CPU state. This is painful for QEMU 3157 * to handle, because it would mean we need to encode 3158 * into the mmu_idx not just the (user, negpri) information 3159 * for the current security state but also that for the 3160 * other security state, which would balloon the number 3161 * of mmu_idx values needed alarmingly. 3162 * Fortunately we can avoid this because it's not actually 3163 * possible to arbitrarily execute code from memory with 3164 * the wrong security attribute: it will always generate 3165 * an exception of some kind or another, apart from the 3166 * special case of an NS CPU executing an SG instruction 3167 * in S&NSC memory. So we always just fail the translation 3168 * here and sort things out in the exception handler 3169 * (including possibly emulating an SG instruction). 3170 */ 3171 if (sattrs.ns != !secure) { 3172 if (sattrs.nsc) { 3173 fi->type = ARMFault_QEMU_NSCExec; 3174 } else { 3175 fi->type = ARMFault_QEMU_SFault; 3176 } 3177 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; 3178 result->f.phys_addr = address; 3179 result->f.prot = 0; 3180 return true; 3181 } 3182 } else { 3183 /* 3184 * For data accesses we always use the MMU bank indicated 3185 * by the current CPU state, but the security attributes 3186 * might downgrade a secure access to nonsecure. 3187 */ 3188 if (sattrs.ns) { 3189 result->f.attrs.secure = false; 3190 result->f.attrs.space = ARMSS_NonSecure; 3191 } else if (!secure) { 3192 /* 3193 * NS access to S memory must fault. 3194 * Architecturally we should first check whether the 3195 * MPU information for this address indicates that we 3196 * are doing an unaligned access to Device memory, which 3197 * should generate a UsageFault instead. QEMU does not 3198 * currently check for that kind of unaligned access though. 3199 * If we added it we would need to do so as a special case 3200 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 3201 */ 3202 fi->type = ARMFault_QEMU_SFault; 3203 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; 3204 result->f.phys_addr = address; 3205 result->f.prot = 0; 3206 return true; 3207 } 3208 } 3209 } 3210 3211 ret = pmsav8_mpu_lookup(env, address, access_type, ptw->in_prot_check, 3212 mmu_idx, secure, result, fi, NULL); 3213 if (sattrs.subpage) { 3214 result->f.lg_page_size = 0; 3215 } 3216 return ret; 3217 } 3218 3219 /* 3220 * Translate from the 4-bit stage 2 representation of 3221 * memory attributes (without cache-allocation hints) to 3222 * the 8-bit representation of the stage 1 MAIR registers 3223 * (which includes allocation hints). 3224 * 3225 * ref: shared/translation/attrs/S2AttrDecode() 3226 * .../S2ConvertAttrsHints() 3227 */ 3228 static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs) 3229 { 3230 uint8_t hiattr = extract32(s2attrs, 2, 2); 3231 uint8_t loattr = extract32(s2attrs, 0, 2); 3232 uint8_t hihint = 0, lohint = 0; 3233 3234 if (hiattr != 0) { /* normal memory */ 3235 if (hcr & HCR_CD) { /* cache disabled */ 3236 hiattr = loattr = 1; /* non-cacheable */ 3237 } else { 3238 if (hiattr != 1) { /* Write-through or write-back */ 3239 hihint = 3; /* RW allocate */ 3240 } 3241 if (loattr != 1) { /* Write-through or write-back */ 3242 lohint = 3; /* RW allocate */ 3243 } 3244 } 3245 } 3246 3247 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 3248 } 3249 3250 /* 3251 * Combine either inner or outer cacheability attributes for normal 3252 * memory, according to table D4-42 and pseudocode procedure 3253 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 3254 * 3255 * NB: only stage 1 includes allocation hints (RW bits), leading to 3256 * some asymmetry. 3257 */ 3258 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 3259 { 3260 if (s1 == 4 || s2 == 4) { 3261 /* non-cacheable has precedence */ 3262 return 4; 3263 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 3264 /* stage 1 write-through takes precedence */ 3265 return s1; 3266 } else if (extract32(s2, 2, 2) == 2) { 3267 /* stage 2 write-through takes precedence, but the allocation hint 3268 * is still taken from stage 1 3269 */ 3270 return (2 << 2) | extract32(s1, 0, 2); 3271 } else { /* write-back */ 3272 return s1; 3273 } 3274 } 3275 3276 /* 3277 * Combine the memory type and cacheability attributes of 3278 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the 3279 * combined attributes in MAIR_EL1 format. 3280 */ 3281 static uint8_t combined_attrs_nofwb(uint64_t hcr, 3282 ARMCacheAttrs s1, ARMCacheAttrs s2) 3283 { 3284 uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs; 3285 3286 if (s2.is_s2_format) { 3287 s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs); 3288 } else { 3289 s2_mair_attrs = s2.attrs; 3290 } 3291 3292 s1lo = extract32(s1.attrs, 0, 4); 3293 s2lo = extract32(s2_mair_attrs, 0, 4); 3294 s1hi = extract32(s1.attrs, 4, 4); 3295 s2hi = extract32(s2_mair_attrs, 4, 4); 3296 3297 /* Combine memory type and cacheability attributes */ 3298 if (s1hi == 0 || s2hi == 0) { 3299 /* Device has precedence over normal */ 3300 if (s1lo == 0 || s2lo == 0) { 3301 /* nGnRnE has precedence over anything */ 3302 ret_attrs = 0; 3303 } else if (s1lo == 4 || s2lo == 4) { 3304 /* non-Reordering has precedence over Reordering */ 3305 ret_attrs = 4; /* nGnRE */ 3306 } else if (s1lo == 8 || s2lo == 8) { 3307 /* non-Gathering has precedence over Gathering */ 3308 ret_attrs = 8; /* nGRE */ 3309 } else { 3310 ret_attrs = 0xc; /* GRE */ 3311 } 3312 } else { /* Normal memory */ 3313 /* Outer/inner cacheability combine independently */ 3314 ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 3315 | combine_cacheattr_nibble(s1lo, s2lo); 3316 } 3317 return ret_attrs; 3318 } 3319 3320 static uint8_t force_cacheattr_nibble_wb(uint8_t attr) 3321 { 3322 /* 3323 * Given the 4 bits specifying the outer or inner cacheability 3324 * in MAIR format, return a value specifying Normal Write-Back, 3325 * with the allocation and transient hints taken from the input 3326 * if the input specified some kind of cacheable attribute. 3327 */ 3328 if (attr == 0 || attr == 4) { 3329 /* 3330 * 0 == an UNPREDICTABLE encoding 3331 * 4 == Non-cacheable 3332 * Either way, force Write-Back RW allocate non-transient 3333 */ 3334 return 0xf; 3335 } 3336 /* Change WriteThrough to WriteBack, keep allocation and transient hints */ 3337 return attr | 4; 3338 } 3339 3340 /* 3341 * Combine the memory type and cacheability attributes of 3342 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the 3343 * combined attributes in MAIR_EL1 format. 3344 */ 3345 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2) 3346 { 3347 assert(s2.is_s2_format && !s1.is_s2_format); 3348 3349 switch (s2.attrs) { 3350 case 7: 3351 /* Use stage 1 attributes */ 3352 return s1.attrs; 3353 case 6: 3354 /* 3355 * Force Normal Write-Back. Note that if S1 is Normal cacheable 3356 * then we take the allocation hints from it; otherwise it is 3357 * RW allocate, non-transient. 3358 */ 3359 if ((s1.attrs & 0xf0) == 0) { 3360 /* S1 is Device */ 3361 return 0xff; 3362 } 3363 /* Need to check the Inner and Outer nibbles separately */ 3364 return force_cacheattr_nibble_wb(s1.attrs & 0xf) | 3365 force_cacheattr_nibble_wb(s1.attrs >> 4) << 4; 3366 case 5: 3367 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */ 3368 if ((s1.attrs & 0xf0) == 0) { 3369 return s1.attrs; 3370 } 3371 return 0x44; 3372 case 0 ... 3: 3373 /* Force Device, of subtype specified by S2 */ 3374 return s2.attrs << 2; 3375 default: 3376 /* 3377 * RESERVED values (including RES0 descriptor bit [5] being nonzero); 3378 * arbitrarily force Device. 3379 */ 3380 return 0; 3381 } 3382 } 3383 3384 /* 3385 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 3386 * and CombineS1S2Desc() 3387 * 3388 * @env: CPUARMState 3389 * @s1: Attributes from stage 1 walk 3390 * @s2: Attributes from stage 2 walk 3391 */ 3392 static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, 3393 ARMCacheAttrs s1, ARMCacheAttrs s2) 3394 { 3395 ARMCacheAttrs ret; 3396 bool tagged = false; 3397 3398 assert(!s1.is_s2_format); 3399 ret.is_s2_format = false; 3400 3401 if (s1.attrs == 0xf0) { 3402 tagged = true; 3403 s1.attrs = 0xff; 3404 } 3405 3406 /* Combine shareability attributes (table D4-43) */ 3407 if (s1.shareability == 2 || s2.shareability == 2) { 3408 /* if either are outer-shareable, the result is outer-shareable */ 3409 ret.shareability = 2; 3410 } else if (s1.shareability == 3 || s2.shareability == 3) { 3411 /* if either are inner-shareable, the result is inner-shareable */ 3412 ret.shareability = 3; 3413 } else { 3414 /* both non-shareable */ 3415 ret.shareability = 0; 3416 } 3417 3418 /* Combine memory type and cacheability attributes */ 3419 if (hcr & HCR_FWB) { 3420 ret.attrs = combined_attrs_fwb(s1, s2); 3421 } else { 3422 ret.attrs = combined_attrs_nofwb(hcr, s1, s2); 3423 } 3424 3425 /* 3426 * Any location for which the resultant memory type is any 3427 * type of Device memory is always treated as Outer Shareable. 3428 * Any location for which the resultant memory type is Normal 3429 * Inner Non-cacheable, Outer Non-cacheable is always treated 3430 * as Outer Shareable. 3431 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC 3432 */ 3433 if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) { 3434 ret.shareability = 2; 3435 } 3436 3437 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */ 3438 if (tagged && ret.attrs == 0xff) { 3439 ret.attrs = 0xf0; 3440 } 3441 3442 return ret; 3443 } 3444 3445 /* 3446 * MMU disabled. S1 addresses within aa64 translation regimes are 3447 * still checked for bounds -- see AArch64.S1DisabledOutput(). 3448 */ 3449 static bool get_phys_addr_disabled(CPUARMState *env, 3450 S1Translate *ptw, 3451 vaddr address, 3452 MMUAccessType access_type, 3453 GetPhysAddrResult *result, 3454 ARMMMUFaultInfo *fi) 3455 { 3456 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 3457 uint8_t memattr = 0x00; /* Device nGnRnE */ 3458 uint8_t shareability = 0; /* non-shareable */ 3459 int r_el; 3460 3461 switch (mmu_idx) { 3462 case ARMMMUIdx_Stage2: 3463 case ARMMMUIdx_Stage2_S: 3464 case ARMMMUIdx_Phys_S: 3465 case ARMMMUIdx_Phys_NS: 3466 case ARMMMUIdx_Phys_Root: 3467 case ARMMMUIdx_Phys_Realm: 3468 break; 3469 3470 default: 3471 r_el = regime_el(mmu_idx); 3472 if (arm_el_is_aa64(env, r_el)) { 3473 int pamax = arm_pamax(env_archcpu(env)); 3474 uint64_t tcr = env->cp15.tcr_el[r_el]; 3475 int addrtop, tbi; 3476 3477 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 3478 if (access_type == MMU_INST_FETCH) { 3479 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 3480 } 3481 tbi = (tbi >> extract64(address, 55, 1)) & 1; 3482 addrtop = (tbi ? 55 : 63); 3483 3484 if (extract64(address, pamax, addrtop - pamax + 1) != 0) { 3485 fi->type = ARMFault_AddressSize; 3486 fi->level = 0; 3487 fi->stage2 = false; 3488 return 1; 3489 } 3490 3491 /* 3492 * When TBI is disabled, we've just validated that all of the 3493 * bits above PAMax are zero, so logically we only need to 3494 * clear the top byte for TBI. But it's clearer to follow 3495 * the pseudocode set of addrdesc.paddress. 3496 */ 3497 address = extract64(address, 0, 52); 3498 } 3499 3500 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ 3501 if (r_el == 1) { 3502 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); 3503 if (hcr & HCR_DC) { 3504 if (hcr & HCR_DCT) { 3505 memattr = 0xf0; /* Tagged, Normal, WB, RWA */ 3506 } else { 3507 memattr = 0xff; /* Normal, WB, RWA */ 3508 } 3509 } 3510 } 3511 if (memattr == 0) { 3512 if (access_type == MMU_INST_FETCH) { 3513 if (regime_sctlr(env, mmu_idx) & SCTLR_I) { 3514 memattr = 0xee; /* Normal, WT, RA, NT */ 3515 } else { 3516 memattr = 0x44; /* Normal, NC, No */ 3517 } 3518 } 3519 shareability = 2; /* outer shareable */ 3520 } 3521 result->cacheattrs.is_s2_format = false; 3522 break; 3523 } 3524 3525 result->f.phys_addr = address; 3526 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 3527 result->f.lg_page_size = TARGET_PAGE_BITS; 3528 result->cacheattrs.shareability = shareability; 3529 result->cacheattrs.attrs = memattr; 3530 return false; 3531 } 3532 3533 static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, 3534 vaddr address, 3535 MMUAccessType access_type, MemOp memop, 3536 GetPhysAddrResult *result, 3537 ARMMMUFaultInfo *fi) 3538 { 3539 hwaddr ipa; 3540 int s1_prot, s1_lgpgsz; 3541 ARMSecuritySpace in_space = ptw->in_space; 3542 bool ret, ipa_secure, s1_guarded; 3543 ARMCacheAttrs cacheattrs1; 3544 ARMSecuritySpace ipa_space; 3545 uint64_t hcr; 3546 3547 ret = get_phys_addr_nogpc(env, ptw, address, access_type, 3548 memop, result, fi); 3549 3550 /* If S1 fails, return early. */ 3551 if (ret) { 3552 return ret; 3553 } 3554 3555 ipa = result->f.phys_addr; 3556 ipa_secure = result->f.attrs.secure; 3557 ipa_space = result->f.attrs.space; 3558 3559 ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0; 3560 ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; 3561 ptw->in_space = ipa_space; 3562 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx); 3563 3564 /* 3565 * S1 is done, now do S2 translation. 3566 * Save the stage1 results so that we may merge prot and cacheattrs later. 3567 */ 3568 s1_prot = result->f.prot; 3569 s1_lgpgsz = result->f.lg_page_size; 3570 s1_guarded = result->f.extra.arm.guarded; 3571 cacheattrs1 = result->cacheattrs; 3572 memset(result, 0, sizeof(*result)); 3573 3574 ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, 3575 memop, result, fi); 3576 fi->s2addr = ipa; 3577 3578 /* Combine the S1 and S2 perms. */ 3579 result->f.prot = s1_prot & result->s2prot; 3580 3581 /* If S2 fails, return early. */ 3582 if (ret) { 3583 return ret; 3584 } 3585 3586 /* 3587 * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE, 3588 * this means "don't put this in the TLB"; in this case, return a 3589 * result with lg_page_size == 0 to achieve that. Otherwise, 3590 * use the maximum of the S1 & S2 page size, so that invalidation 3591 * of pages > TARGET_PAGE_SIZE works correctly. (This works even though 3592 * we know the combined result permissions etc only cover the minimum 3593 * of the S1 and S2 page size, because we know that the common TLB code 3594 * never actually creates TLB entries bigger than TARGET_PAGE_SIZE, 3595 * and passing a larger page size value only affects invalidations.) 3596 */ 3597 if (result->f.lg_page_size < TARGET_PAGE_BITS || 3598 s1_lgpgsz < TARGET_PAGE_BITS) { 3599 result->f.lg_page_size = 0; 3600 } else if (result->f.lg_page_size < s1_lgpgsz) { 3601 result->f.lg_page_size = s1_lgpgsz; 3602 } 3603 3604 /* Combine the S1 and S2 cache attributes. */ 3605 hcr = arm_hcr_el2_eff_secstate(env, in_space); 3606 if (hcr & HCR_DC) { 3607 /* 3608 * HCR.DC forces the first stage attributes to 3609 * Normal Non-Shareable, 3610 * Inner Write-Back Read-Allocate Write-Allocate, 3611 * Outer Write-Back Read-Allocate Write-Allocate. 3612 * Do not overwrite Tagged within attrs. 3613 */ 3614 if (cacheattrs1.attrs != 0xf0) { 3615 cacheattrs1.attrs = 0xff; 3616 } 3617 cacheattrs1.shareability = 0; 3618 } 3619 result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1, 3620 result->cacheattrs); 3621 3622 /* No BTI GP information in stage 2, we just use the S1 value */ 3623 result->f.extra.arm.guarded = s1_guarded; 3624 3625 /* 3626 * Check if IPA translates to secure or non-secure PA space. 3627 * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. 3628 */ 3629 if (in_space == ARMSS_Secure) { 3630 result->f.attrs.secure = 3631 !(env->cp15.vstcr_el2 & (R_VSTCR_SA_MASK | R_VSTCR_SW_MASK)) 3632 && (ipa_secure 3633 || !(env->cp15.vtcr_el2 & (R_VTCR_NSA_MASK | R_VTCR_NSW_MASK))); 3634 result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); 3635 } 3636 3637 return false; 3638 } 3639 3640 static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, 3641 vaddr address, 3642 MMUAccessType access_type, MemOp memop, 3643 GetPhysAddrResult *result, 3644 ARMMMUFaultInfo *fi) 3645 { 3646 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 3647 ARMMMUIdx s1_mmu_idx; 3648 3649 /* 3650 * The page table entries may downgrade Secure to NonSecure, but 3651 * cannot upgrade a NonSecure translation regime's attributes 3652 * to Secure or Realm. 3653 */ 3654 ptw->cur_space = ptw->in_space; 3655 result->f.attrs.space = ptw->in_space; 3656 result->f.attrs.secure = arm_space_is_secure(ptw->in_space); 3657 3658 switch (mmu_idx) { 3659 case ARMMMUIdx_Phys_S: 3660 case ARMMMUIdx_Phys_NS: 3661 case ARMMMUIdx_Phys_Root: 3662 case ARMMMUIdx_Phys_Realm: 3663 /* Checking Phys early avoids special casing later vs regime_el. */ 3664 return get_phys_addr_disabled(env, ptw, address, access_type, 3665 result, fi); 3666 3667 case ARMMMUIdx_Stage1_E0: 3668 case ARMMMUIdx_Stage1_E1: 3669 case ARMMMUIdx_Stage1_E1_PAN: 3670 /* 3671 * First stage lookup uses second stage for ptw; only 3672 * Secure has both S and NS IPA and starts with Stage2_S. 3673 */ 3674 ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ? 3675 ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; 3676 break; 3677 3678 case ARMMMUIdx_Stage2: 3679 case ARMMMUIdx_Stage2_S: 3680 /* 3681 * Second stage lookup uses physical for ptw; whether this is S or 3682 * NS may depend on the SW/NSW bits if this is a stage 2 lookup for 3683 * the Secure EL2&0 regime. 3684 */ 3685 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx); 3686 break; 3687 3688 case ARMMMUIdx_E10_0: 3689 s1_mmu_idx = ARMMMUIdx_Stage1_E0; 3690 goto do_twostage; 3691 case ARMMMUIdx_E10_1: 3692 s1_mmu_idx = ARMMMUIdx_Stage1_E1; 3693 goto do_twostage; 3694 case ARMMMUIdx_E10_1_PAN: 3695 s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN; 3696 do_twostage: 3697 /* 3698 * Call ourselves recursively to do the stage 1 and then stage 2 3699 * translations if mmu_idx is a two-stage regime, and EL2 present. 3700 * Otherwise, a stage1+stage2 translation is just stage 1. 3701 */ 3702 ptw->in_mmu_idx = mmu_idx = s1_mmu_idx; 3703 if (arm_feature(env, ARM_FEATURE_EL2) && 3704 !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) { 3705 return get_phys_addr_twostage(env, ptw, address, access_type, 3706 memop, result, fi); 3707 } 3708 /* fall through */ 3709 3710 default: 3711 /* Single stage uses physical for ptw. */ 3712 ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space); 3713 break; 3714 } 3715 3716 result->f.attrs.user = regime_is_user(mmu_idx); 3717 3718 /* 3719 * Fast Context Switch Extension. This doesn't exist at all in v8. 3720 * In v7 and earlier it affects all stage 1 translations. 3721 */ 3722 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 3723 && !arm_feature(env, ARM_FEATURE_V8)) { 3724 if (regime_el(mmu_idx) == 3) { 3725 address += env->cp15.fcseidr_s; 3726 } else { 3727 address += env->cp15.fcseidr_ns; 3728 } 3729 } 3730 3731 if (arm_feature(env, ARM_FEATURE_PMSA)) { 3732 bool ret; 3733 result->f.lg_page_size = TARGET_PAGE_BITS; 3734 3735 if (arm_feature(env, ARM_FEATURE_V8)) { 3736 /* PMSAv8 */ 3737 ret = get_phys_addr_pmsav8(env, ptw, address, access_type, 3738 result, fi); 3739 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3740 /* PMSAv7 */ 3741 ret = get_phys_addr_pmsav7(env, ptw, address, access_type, 3742 result, fi); 3743 } else { 3744 /* Pre-v7 MPU */ 3745 ret = get_phys_addr_pmsav5(env, ptw, address, access_type, 3746 result, fi); 3747 } 3748 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 3749 " mmu_idx %u -> %s (prot %c%c%c)\n", 3750 access_type == MMU_DATA_LOAD ? "reading" : 3751 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 3752 (uint32_t)address, mmu_idx, 3753 ret ? "Miss" : "Hit", 3754 result->f.prot & PAGE_READ ? 'r' : '-', 3755 result->f.prot & PAGE_WRITE ? 'w' : '-', 3756 result->f.prot & PAGE_EXEC ? 'x' : '-'); 3757 3758 return ret; 3759 } 3760 3761 /* Definitely a real MMU, not an MPU */ 3762 3763 if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { 3764 return get_phys_addr_disabled(env, ptw, address, access_type, 3765 result, fi); 3766 } 3767 3768 if (regime_using_lpae_format(env, mmu_idx)) { 3769 return get_phys_addr_lpae(env, ptw, address, access_type, 3770 memop, result, fi); 3771 } else if (arm_feature(env, ARM_FEATURE_V7) || 3772 regime_sctlr(env, mmu_idx) & SCTLR_XP) { 3773 return get_phys_addr_v6(env, ptw, address, access_type, result, fi); 3774 } else { 3775 return get_phys_addr_v5(env, ptw, address, access_type, result, fi); 3776 } 3777 } 3778 3779 static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, 3780 vaddr address, 3781 MMUAccessType access_type, MemOp memop, 3782 GetPhysAddrResult *result, 3783 ARMMMUFaultInfo *fi) 3784 { 3785 if (get_phys_addr_nogpc(env, ptw, address, access_type, 3786 memop, result, fi)) { 3787 return true; 3788 } 3789 if (!granule_protection_check(env, result->f.phys_addr, 3790 result->f.attrs.space, ptw->in_space, fi)) { 3791 fi->type = ARMFault_GPCFOnOutput; 3792 return true; 3793 } 3794 return false; 3795 } 3796 3797 bool get_phys_addr_for_at(CPUARMState *env, vaddr address, 3798 unsigned prot_check, ARMMMUIdx mmu_idx, 3799 ARMSecuritySpace space, GetPhysAddrResult *result, 3800 ARMMMUFaultInfo *fi) 3801 { 3802 S1Translate ptw = { 3803 .in_mmu_idx = mmu_idx, 3804 .in_space = space, 3805 .in_at = true, 3806 .in_prot_check = prot_check, 3807 }; 3808 /* 3809 * I_MXTJT: Granule protection checks are not performed on the final 3810 * address of a successful translation. This is a translation not a 3811 * memory reference, so MMU_DATA_LOAD is arbitrary (the exact protection 3812 * check is handled or bypassed by .in_prot_check) and "memop = MO_8" 3813 * bypasses any alignment check. 3814 */ 3815 return get_phys_addr_nogpc(env, &ptw, address, 3816 MMU_DATA_LOAD, MO_8, result, fi); 3817 } 3818 3819 static ARMSecuritySpace 3820 arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx) 3821 { 3822 ARMSecuritySpace ss; 3823 3824 switch (mmu_idx) { 3825 case ARMMMUIdx_E10_0: 3826 case ARMMMUIdx_E10_0_GCS: 3827 case ARMMMUIdx_E10_1: 3828 case ARMMMUIdx_E10_1_PAN: 3829 case ARMMMUIdx_E10_1_GCS: 3830 case ARMMMUIdx_E20_0: 3831 case ARMMMUIdx_E20_0_GCS: 3832 case ARMMMUIdx_E20_2: 3833 case ARMMMUIdx_E20_2_PAN: 3834 case ARMMMUIdx_E20_2_GCS: 3835 case ARMMMUIdx_Stage1_E0: 3836 case ARMMMUIdx_Stage1_E0_GCS: 3837 case ARMMMUIdx_Stage1_E1: 3838 case ARMMMUIdx_Stage1_E1_PAN: 3839 case ARMMMUIdx_Stage1_E1_GCS: 3840 case ARMMMUIdx_E2: 3841 case ARMMMUIdx_E2_GCS: 3842 ss = arm_security_space_below_el3(env); 3843 break; 3844 case ARMMMUIdx_Stage2: 3845 /* 3846 * For Secure EL2, we need this index to be NonSecure; 3847 * otherwise this will already be NonSecure or Realm. 3848 */ 3849 ss = arm_security_space_below_el3(env); 3850 if (ss == ARMSS_Secure) { 3851 ss = ARMSS_NonSecure; 3852 } 3853 break; 3854 case ARMMMUIdx_Phys_NS: 3855 case ARMMMUIdx_MPrivNegPri: 3856 case ARMMMUIdx_MUserNegPri: 3857 case ARMMMUIdx_MPriv: 3858 case ARMMMUIdx_MUser: 3859 ss = ARMSS_NonSecure; 3860 break; 3861 case ARMMMUIdx_Stage2_S: 3862 case ARMMMUIdx_Phys_S: 3863 case ARMMMUIdx_MSPrivNegPri: 3864 case ARMMMUIdx_MSUserNegPri: 3865 case ARMMMUIdx_MSPriv: 3866 case ARMMMUIdx_MSUser: 3867 ss = ARMSS_Secure; 3868 break; 3869 case ARMMMUIdx_E3: 3870 case ARMMMUIdx_E3_GCS: 3871 case ARMMMUIdx_E30_0: 3872 case ARMMMUIdx_E30_3_PAN: 3873 if (arm_feature(env, ARM_FEATURE_AARCH64) && 3874 cpu_isar_feature(aa64_rme, env_archcpu(env))) { 3875 ss = ARMSS_Root; 3876 } else { 3877 ss = ARMSS_Secure; 3878 } 3879 break; 3880 case ARMMMUIdx_Phys_Root: 3881 ss = ARMSS_Root; 3882 break; 3883 case ARMMMUIdx_Phys_Realm: 3884 ss = ARMSS_Realm; 3885 break; 3886 default: 3887 g_assert_not_reached(); 3888 } 3889 3890 return ss; 3891 } 3892 3893 bool get_phys_addr(CPUARMState *env, vaddr address, 3894 MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 3895 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 3896 { 3897 S1Translate ptw = { 3898 .in_mmu_idx = mmu_idx, 3899 .in_space = arm_mmu_idx_to_security_space(env, mmu_idx), 3900 .in_prot_check = 1 << access_type, 3901 }; 3902 3903 return get_phys_addr_gpc(env, &ptw, address, access_type, 3904 memop, result, fi); 3905 } 3906 3907 static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr, 3908 MemTxAttrs *attrs, ARMMMUIdx mmu_idx) 3909 { 3910 S1Translate ptw = { 3911 .in_mmu_idx = mmu_idx, 3912 .in_space = arm_mmu_idx_to_security_space(env, mmu_idx), 3913 .in_debug = true, 3914 .in_at = true, 3915 .in_prot_check = 0, 3916 }; 3917 GetPhysAddrResult res = {}; 3918 ARMMMUFaultInfo fi = {}; 3919 bool ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi); 3920 *attrs = res.f.attrs; 3921 3922 if (ret) { 3923 return -1; 3924 } 3925 return res.f.phys_addr; 3926 } 3927 3928 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 3929 MemTxAttrs *attrs) 3930 { 3931 ARMCPU *cpu = ARM_CPU(cs); 3932 CPUARMState *env = &cpu->env; 3933 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 3934 3935 hwaddr res = arm_cpu_get_phys_page(env, addr, attrs, mmu_idx); 3936 3937 if (res != -1) { 3938 return res; 3939 } 3940 3941 /* 3942 * Memory may be accessible for an "unprivileged load/store" variant. 3943 * In this case, get_a64_user_mem_index function generates an op using an 3944 * unprivileged mmu idx, so we need to try with it. 3945 */ 3946 switch (mmu_idx) { 3947 case ARMMMUIdx_E10_1: 3948 case ARMMMUIdx_E10_1_PAN: 3949 return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E10_0); 3950 case ARMMMUIdx_E20_2: 3951 case ARMMMUIdx_E20_2_PAN: 3952 return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E20_0); 3953 default: 3954 return -1; 3955 } 3956 } 3957