1 /* 2 * ARM page table walking. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "qemu/range.h" 12 #include "qemu/main-loop.h" 13 #include "exec/exec-all.h" 14 #include "exec/page-protection.h" 15 #include "cpu.h" 16 #include "internals.h" 17 #include "cpu-features.h" 18 #include "idau.h" 19 #ifdef CONFIG_TCG 20 # include "tcg/oversized-guest.h" 21 #endif 22 23 typedef struct S1Translate { 24 /* 25 * in_mmu_idx : specifies which TTBR, TCR, etc to use for the walk. 26 * Together with in_space, specifies the architectural translation regime. 27 */ 28 ARMMMUIdx in_mmu_idx; 29 /* 30 * in_ptw_idx: specifies which mmuidx to use for the actual 31 * page table descriptor load operations. This will be one of the 32 * ARMMMUIdx_Stage2* or one of the ARMMMUIdx_Phys_* indexes. 33 * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, 34 * this field is updated accordingly. 35 */ 36 ARMMMUIdx in_ptw_idx; 37 /* 38 * in_space: the security space for this walk. This plus 39 * the in_mmu_idx specify the architectural translation regime. 40 * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, 41 * this field is updated accordingly. 42 * 43 * Note that the security space for the in_ptw_idx may be different 44 * from that for the in_mmu_idx. We do not need to explicitly track 45 * the in_ptw_idx security space because: 46 * - if the in_ptw_idx is an ARMMMUIdx_Phys_* then the mmuidx 47 * itself specifies the security space 48 * - if the in_ptw_idx is an ARMMMUIdx_Stage2* then the security 49 * space used for ptw reads is the same as that of the security 50 * space of the stage 1 translation for all cases except where 51 * stage 1 is Secure; in that case the only possibilities for 52 * the ptw read are Secure and NonSecure, and the in_ptw_idx 53 * value being Stage2 vs Stage2_S distinguishes those. 54 */ 55 ARMSecuritySpace in_space; 56 /* 57 * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug 58 * accesses will not update the guest page table access flags 59 * and will not change the state of the softmmu TLBs. 60 */ 61 bool in_debug; 62 /* 63 * If this is stage 2 of a stage 1+2 page table walk, then this must 64 * be true if stage 1 is an EL0 access; otherwise this is ignored. 65 * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}. 66 */ 67 bool in_s1_is_el0; 68 bool out_rw; 69 bool out_be; 70 ARMSecuritySpace out_space; 71 hwaddr out_virt; 72 hwaddr out_phys; 73 void *out_host; 74 } S1Translate; 75 76 static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, 77 vaddr address, 78 MMUAccessType access_type, MemOp memop, 79 GetPhysAddrResult *result, 80 ARMMMUFaultInfo *fi); 81 82 static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, 83 vaddr address, 84 MMUAccessType access_type, MemOp memop, 85 GetPhysAddrResult *result, 86 ARMMMUFaultInfo *fi); 87 88 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 89 int user_rw, int prot_rw, int xn, int pxn, 90 ARMSecuritySpace in_pa, ARMSecuritySpace out_pa); 91 92 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ 93 static const uint8_t pamax_map[] = { 94 [0] = 32, 95 [1] = 36, 96 [2] = 40, 97 [3] = 42, 98 [4] = 44, 99 [5] = 48, 100 [6] = 52, 101 }; 102 103 uint8_t round_down_to_parange_index(uint8_t bit_size) 104 { 105 for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) { 106 if (pamax_map[i] <= bit_size) { 107 return i; 108 } 109 } 110 g_assert_not_reached(); 111 } 112 113 uint8_t round_down_to_parange_bit_size(uint8_t bit_size) 114 { 115 return pamax_map[round_down_to_parange_index(bit_size)]; 116 } 117 118 /* 119 * The cpu-specific constant value of PAMax; also used by hw/arm/virt. 120 * Note that machvirt_init calls this on a CPU that is inited but not realized! 121 */ 122 unsigned int arm_pamax(ARMCPU *cpu) 123 { 124 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 125 unsigned int parange = 126 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); 127 128 /* 129 * id_aa64mmfr0 is a read-only register so values outside of the 130 * supported mappings can be considered an implementation error. 131 */ 132 assert(parange < ARRAY_SIZE(pamax_map)); 133 return pamax_map[parange]; 134 } 135 136 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 137 /* v7 or v8 with LPAE */ 138 return 40; 139 } 140 /* Anything else */ 141 return 32; 142 } 143 144 /* 145 * Convert a possible stage1+2 MMU index into the appropriate stage 1 MMU index 146 */ 147 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 148 { 149 switch (mmu_idx) { 150 case ARMMMUIdx_E10_0: 151 return ARMMMUIdx_Stage1_E0; 152 case ARMMMUIdx_E10_1: 153 return ARMMMUIdx_Stage1_E1; 154 case ARMMMUIdx_E10_1_PAN: 155 return ARMMMUIdx_Stage1_E1_PAN; 156 default: 157 return mmu_idx; 158 } 159 } 160 161 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 162 { 163 return stage_1_mmu_idx(arm_mmu_idx(env)); 164 } 165 166 /* 167 * Return where we should do ptw loads from for a stage 2 walk. 168 * This depends on whether the address we are looking up is a 169 * Secure IPA or a NonSecure IPA, which we know from whether this is 170 * Stage2 or Stage2_S. 171 * If this is the Secure EL1&0 regime we need to check the NSW and SW bits. 172 */ 173 static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx) 174 { 175 bool s2walk_secure; 176 177 /* 178 * We're OK to check the current state of the CPU here because 179 * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit 180 * changes. 181 * (2) there's no way to do a lookup that cares about Stage 2 for a 182 * different security state to the current one for AArch64, and AArch32 183 * never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do 184 * an NS stage 1+2 lookup while the NS bit is 0.) 185 */ 186 if (!arm_el_is_aa64(env, 3)) { 187 return ARMMMUIdx_Phys_NS; 188 } 189 190 switch (arm_security_space_below_el3(env)) { 191 case ARMSS_NonSecure: 192 return ARMMMUIdx_Phys_NS; 193 case ARMSS_Realm: 194 return ARMMMUIdx_Phys_Realm; 195 case ARMSS_Secure: 196 if (stage2idx == ARMMMUIdx_Stage2_S) { 197 s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW); 198 } else { 199 s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW); 200 } 201 return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; 202 default: 203 g_assert_not_reached(); 204 } 205 } 206 207 static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) 208 { 209 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 210 } 211 212 /* Return the TTBR associated with this translation regime */ 213 static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) 214 { 215 if (mmu_idx == ARMMMUIdx_Stage2) { 216 return env->cp15.vttbr_el2; 217 } 218 if (mmu_idx == ARMMMUIdx_Stage2_S) { 219 return env->cp15.vsttbr_el2; 220 } 221 if (ttbrn == 0) { 222 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 223 } else { 224 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 225 } 226 } 227 228 /* Return true if the specified stage of address translation is disabled */ 229 static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, 230 ARMSecuritySpace space) 231 { 232 uint64_t hcr_el2; 233 234 if (arm_feature(env, ARM_FEATURE_M)) { 235 bool is_secure = arm_space_is_secure(space); 236 switch (env->v7m.mpu_ctrl[is_secure] & 237 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 238 case R_V7M_MPU_CTRL_ENABLE_MASK: 239 /* Enabled, but not for HardFault and NMI */ 240 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 241 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 242 /* Enabled for all cases */ 243 return false; 244 case 0: 245 default: 246 /* 247 * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 248 * we warned about that in armv7m_nvic.c when the guest set it. 249 */ 250 return true; 251 } 252 } 253 254 255 switch (mmu_idx) { 256 case ARMMMUIdx_Stage2: 257 case ARMMMUIdx_Stage2_S: 258 /* HCR.DC means HCR.VM behaves as 1 */ 259 hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 260 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; 261 262 case ARMMMUIdx_E10_0: 263 case ARMMMUIdx_E10_1: 264 case ARMMMUIdx_E10_1_PAN: 265 /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */ 266 hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 267 if (hcr_el2 & HCR_TGE) { 268 return true; 269 } 270 break; 271 272 case ARMMMUIdx_Stage1_E0: 273 case ARMMMUIdx_Stage1_E1: 274 case ARMMMUIdx_Stage1_E1_PAN: 275 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 276 hcr_el2 = arm_hcr_el2_eff_secstate(env, space); 277 if (hcr_el2 & HCR_DC) { 278 return true; 279 } 280 break; 281 282 case ARMMMUIdx_E20_0: 283 case ARMMMUIdx_E20_2: 284 case ARMMMUIdx_E20_2_PAN: 285 case ARMMMUIdx_E2: 286 case ARMMMUIdx_E3: 287 case ARMMMUIdx_E30_0: 288 case ARMMMUIdx_E30_3_PAN: 289 break; 290 291 case ARMMMUIdx_Phys_S: 292 case ARMMMUIdx_Phys_NS: 293 case ARMMMUIdx_Phys_Root: 294 case ARMMMUIdx_Phys_Realm: 295 /* No translation for physical address spaces. */ 296 return true; 297 298 default: 299 g_assert_not_reached(); 300 } 301 302 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 303 } 304 305 static bool granule_protection_check(CPUARMState *env, uint64_t paddress, 306 ARMSecuritySpace pspace, 307 ARMMMUFaultInfo *fi) 308 { 309 MemTxAttrs attrs = { 310 .secure = true, 311 .space = ARMSS_Root, 312 }; 313 ARMCPU *cpu = env_archcpu(env); 314 uint64_t gpccr = env->cp15.gpccr_el3; 315 unsigned pps, pgs, l0gptsz, level = 0; 316 uint64_t tableaddr, pps_mask, align, entry, index; 317 AddressSpace *as; 318 MemTxResult result; 319 int gpi; 320 321 if (!FIELD_EX64(gpccr, GPCCR, GPC)) { 322 return true; 323 } 324 325 /* 326 * GPC Priority 1 (R_GMGRR): 327 * R_JWCSM: If the configuration of GPCCR_EL3 is invalid, 328 * the access fails as GPT walk fault at level 0. 329 */ 330 331 /* 332 * Configuration of PPS to a value exceeding the implemented 333 * physical address size is invalid. 334 */ 335 pps = FIELD_EX64(gpccr, GPCCR, PPS); 336 if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) { 337 goto fault_walk; 338 } 339 pps = pamax_map[pps]; 340 pps_mask = MAKE_64BIT_MASK(0, pps); 341 342 switch (FIELD_EX64(gpccr, GPCCR, SH)) { 343 case 0b10: /* outer shareable */ 344 break; 345 case 0b00: /* non-shareable */ 346 case 0b11: /* inner shareable */ 347 /* Inner and Outer non-cacheable requires Outer shareable. */ 348 if (FIELD_EX64(gpccr, GPCCR, ORGN) == 0 && 349 FIELD_EX64(gpccr, GPCCR, IRGN) == 0) { 350 goto fault_walk; 351 } 352 break; 353 default: /* reserved */ 354 goto fault_walk; 355 } 356 357 switch (FIELD_EX64(gpccr, GPCCR, PGS)) { 358 case 0b00: /* 4KB */ 359 pgs = 12; 360 break; 361 case 0b01: /* 64KB */ 362 pgs = 16; 363 break; 364 case 0b10: /* 16KB */ 365 pgs = 14; 366 break; 367 default: /* reserved */ 368 goto fault_walk; 369 } 370 371 /* Note this field is read-only and fixed at reset. */ 372 l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ); 373 374 /* 375 * GPC Priority 2: Secure, Realm or Root address exceeds PPS. 376 * R_CPDSB: A NonSecure physical address input exceeding PPS 377 * does not experience any fault. 378 */ 379 if (paddress & ~pps_mask) { 380 if (pspace == ARMSS_NonSecure) { 381 return true; 382 } 383 goto fault_size; 384 } 385 386 /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */ 387 tableaddr = env->cp15.gptbr_el3 << 12; 388 if (tableaddr & ~pps_mask) { 389 goto fault_size; 390 } 391 392 /* 393 * BADDR is aligned per a function of PPS and L0GPTSZ. 394 * These bits of GPTBR_EL3 are RES0, but are not a configuration error, 395 * unlike the RES0 bits of the GPT entries (R_XNKFZ). 396 */ 397 align = MAX(pps - l0gptsz + 3, 12); 398 align = MAKE_64BIT_MASK(0, align); 399 tableaddr &= ~align; 400 401 as = arm_addressspace(env_cpu(env), attrs); 402 403 /* Level 0 lookup. */ 404 index = extract64(paddress, l0gptsz, pps - l0gptsz); 405 tableaddr += index * 8; 406 entry = address_space_ldq_le(as, tableaddr, attrs, &result); 407 if (result != MEMTX_OK) { 408 goto fault_eabt; 409 } 410 411 switch (extract32(entry, 0, 4)) { 412 case 1: /* block descriptor */ 413 if (entry >> 8) { 414 goto fault_walk; /* RES0 bits not 0 */ 415 } 416 gpi = extract32(entry, 4, 4); 417 goto found; 418 case 3: /* table descriptor */ 419 tableaddr = entry & ~0xf; 420 align = MAX(l0gptsz - pgs - 1, 12); 421 align = MAKE_64BIT_MASK(0, align); 422 if (tableaddr & (~pps_mask | align)) { 423 goto fault_walk; /* RES0 bits not 0 */ 424 } 425 break; 426 default: /* invalid */ 427 goto fault_walk; 428 } 429 430 /* Level 1 lookup */ 431 level = 1; 432 index = extract64(paddress, pgs + 4, l0gptsz - pgs - 4); 433 tableaddr += index * 8; 434 entry = address_space_ldq_le(as, tableaddr, attrs, &result); 435 if (result != MEMTX_OK) { 436 goto fault_eabt; 437 } 438 439 switch (extract32(entry, 0, 4)) { 440 case 1: /* contiguous descriptor */ 441 if (entry >> 10) { 442 goto fault_walk; /* RES0 bits not 0 */ 443 } 444 /* 445 * Because the softmmu tlb only works on units of TARGET_PAGE_SIZE, 446 * and because we cannot invalidate by pa, and thus will always 447 * flush entire tlbs, we don't actually care about the range here 448 * and can simply extract the GPI as the result. 449 */ 450 if (extract32(entry, 8, 2) == 0) { 451 goto fault_walk; /* reserved contig */ 452 } 453 gpi = extract32(entry, 4, 4); 454 break; 455 default: 456 index = extract64(paddress, pgs, 4); 457 gpi = extract64(entry, index * 4, 4); 458 break; 459 } 460 461 found: 462 switch (gpi) { 463 case 0b0000: /* no access */ 464 break; 465 case 0b1111: /* all access */ 466 return true; 467 case 0b1000: 468 case 0b1001: 469 case 0b1010: 470 case 0b1011: 471 if (pspace == (gpi & 3)) { 472 return true; 473 } 474 break; 475 default: 476 goto fault_walk; /* reserved */ 477 } 478 479 fi->gpcf = GPCF_Fail; 480 goto fault_common; 481 fault_eabt: 482 fi->gpcf = GPCF_EABT; 483 goto fault_common; 484 fault_size: 485 fi->gpcf = GPCF_AddressSize; 486 goto fault_common; 487 fault_walk: 488 fi->gpcf = GPCF_Walk; 489 fault_common: 490 fi->level = level; 491 fi->paddr = paddress; 492 fi->paddr_space = pspace; 493 return false; 494 } 495 496 static bool S1_attrs_are_device(uint8_t attrs) 497 { 498 /* 499 * This slightly under-decodes the MAIR_ELx field: 500 * 0b0000dd01 is Device with FEAT_XS, otherwise UNPREDICTABLE; 501 * 0b0000dd1x is UNPREDICTABLE. 502 */ 503 return (attrs & 0xf0) == 0; 504 } 505 506 static bool S2_attrs_are_device(uint64_t hcr, uint8_t attrs) 507 { 508 /* 509 * For an S1 page table walk, the stage 1 attributes are always 510 * some form of "this is Normal memory". The combined S1+S2 511 * attributes are therefore only Device if stage 2 specifies Device. 512 * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00, 513 * ie when cacheattrs.attrs bits [3:2] are 0b00. 514 * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie 515 * when cacheattrs.attrs bit [2] is 0. 516 */ 517 if (hcr & HCR_FWB) { 518 return (attrs & 0x4) == 0; 519 } else { 520 return (attrs & 0xc) == 0; 521 } 522 } 523 524 static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space, 525 ARMMMUIdx s2_mmu_idx) 526 { 527 /* 528 * Return the security space to use for stage 2 when doing 529 * the S1 page table descriptor load. 530 */ 531 if (regime_is_stage2(s2_mmu_idx)) { 532 /* 533 * The security space for ptw reads is almost always the same 534 * as that of the security space of the stage 1 translation. 535 * The only exception is when stage 1 is Secure; in that case 536 * the ptw read might be to the Secure or the NonSecure space 537 * (but never Realm or Root), and the s2_mmu_idx tells us which. 538 * Root translations are always single-stage. 539 */ 540 if (s1_space == ARMSS_Secure) { 541 return arm_secure_to_space(s2_mmu_idx == ARMMMUIdx_Stage2_S); 542 } else { 543 assert(s2_mmu_idx != ARMMMUIdx_Stage2_S); 544 assert(s1_space != ARMSS_Root); 545 return s1_space; 546 } 547 } else { 548 /* ptw loads are from phys: the mmu idx itself says which space */ 549 return arm_phys_to_space(s2_mmu_idx); 550 } 551 } 552 553 static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx) 554 { 555 /* 556 * For stage 2 faults in Secure EL22, S1NS indicates 557 * whether the faulting IPA is in the Secure or NonSecure 558 * IPA space. For all other kinds of fault, it is false. 559 */ 560 return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx) 561 && s2_mmu_idx == ARMMMUIdx_Stage2_S; 562 } 563 564 /* Translate a S1 pagetable walk through S2 if needed. */ 565 static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, 566 hwaddr addr, ARMMMUFaultInfo *fi) 567 { 568 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 569 ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx; 570 uint8_t pte_attrs; 571 572 ptw->out_virt = addr; 573 574 if (unlikely(ptw->in_debug)) { 575 /* 576 * From gdbstub, do not use softmmu so that we don't modify the 577 * state of the cpu at all, including softmmu tlb contents. 578 */ 579 ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); 580 S1Translate s2ptw = { 581 .in_mmu_idx = s2_mmu_idx, 582 .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), 583 .in_space = s2_space, 584 .in_debug = true, 585 }; 586 GetPhysAddrResult s2 = { }; 587 588 if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) { 589 goto fail; 590 } 591 592 ptw->out_phys = s2.f.phys_addr; 593 pte_attrs = s2.cacheattrs.attrs; 594 ptw->out_host = NULL; 595 ptw->out_rw = false; 596 ptw->out_space = s2.f.attrs.space; 597 } else { 598 #ifdef CONFIG_TCG 599 CPUTLBEntryFull *full; 600 int flags; 601 602 env->tlb_fi = fi; 603 flags = probe_access_full_mmu(env, addr, 0, MMU_DATA_LOAD, 604 arm_to_core_mmu_idx(s2_mmu_idx), 605 &ptw->out_host, &full); 606 env->tlb_fi = NULL; 607 608 if (unlikely(flags & TLB_INVALID_MASK)) { 609 goto fail; 610 } 611 ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK); 612 ptw->out_rw = full->prot & PAGE_WRITE; 613 pte_attrs = full->extra.arm.pte_attrs; 614 ptw->out_space = full->attrs.space; 615 #else 616 g_assert_not_reached(); 617 #endif 618 } 619 620 if (regime_is_stage2(s2_mmu_idx)) { 621 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); 622 623 if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) { 624 /* 625 * PTW set and S1 walk touched S2 Device memory: 626 * generate Permission fault. 627 */ 628 fi->type = ARMFault_Permission; 629 fi->s2addr = addr; 630 fi->stage2 = true; 631 fi->s1ptw = true; 632 fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); 633 return false; 634 } 635 } 636 637 ptw->out_be = regime_translation_big_endian(env, mmu_idx); 638 return true; 639 640 fail: 641 assert(fi->type != ARMFault_None); 642 if (fi->type == ARMFault_GPCFOnOutput) { 643 fi->type = ARMFault_GPCFOnWalk; 644 } 645 fi->s2addr = addr; 646 fi->stage2 = regime_is_stage2(s2_mmu_idx); 647 fi->s1ptw = fi->stage2; 648 fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); 649 return false; 650 } 651 652 /* All loads done in the course of a page table walk go through here. */ 653 static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw, 654 ARMMMUFaultInfo *fi) 655 { 656 CPUState *cs = env_cpu(env); 657 void *host = ptw->out_host; 658 uint32_t data; 659 660 if (likely(host)) { 661 /* Page tables are in RAM, and we have the host address. */ 662 data = qatomic_read((uint32_t *)host); 663 if (ptw->out_be) { 664 data = be32_to_cpu(data); 665 } else { 666 data = le32_to_cpu(data); 667 } 668 } else { 669 /* Page tables are in MMIO. */ 670 MemTxAttrs attrs = { 671 .space = ptw->out_space, 672 .secure = arm_space_is_secure(ptw->out_space), 673 }; 674 AddressSpace *as = arm_addressspace(cs, attrs); 675 MemTxResult result = MEMTX_OK; 676 677 if (ptw->out_be) { 678 data = address_space_ldl_be(as, ptw->out_phys, attrs, &result); 679 } else { 680 data = address_space_ldl_le(as, ptw->out_phys, attrs, &result); 681 } 682 if (unlikely(result != MEMTX_OK)) { 683 fi->type = ARMFault_SyncExternalOnWalk; 684 fi->ea = arm_extabort_type(result); 685 return 0; 686 } 687 } 688 return data; 689 } 690 691 static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw, 692 ARMMMUFaultInfo *fi) 693 { 694 CPUState *cs = env_cpu(env); 695 void *host = ptw->out_host; 696 uint64_t data; 697 698 if (likely(host)) { 699 /* Page tables are in RAM, and we have the host address. */ 700 #ifdef CONFIG_ATOMIC64 701 data = qatomic_read__nocheck((uint64_t *)host); 702 if (ptw->out_be) { 703 data = be64_to_cpu(data); 704 } else { 705 data = le64_to_cpu(data); 706 } 707 #else 708 if (ptw->out_be) { 709 data = ldq_be_p(host); 710 } else { 711 data = ldq_le_p(host); 712 } 713 #endif 714 } else { 715 /* Page tables are in MMIO. */ 716 MemTxAttrs attrs = { 717 .space = ptw->out_space, 718 .secure = arm_space_is_secure(ptw->out_space), 719 }; 720 AddressSpace *as = arm_addressspace(cs, attrs); 721 MemTxResult result = MEMTX_OK; 722 723 if (ptw->out_be) { 724 data = address_space_ldq_be(as, ptw->out_phys, attrs, &result); 725 } else { 726 data = address_space_ldq_le(as, ptw->out_phys, attrs, &result); 727 } 728 if (unlikely(result != MEMTX_OK)) { 729 fi->type = ARMFault_SyncExternalOnWalk; 730 fi->ea = arm_extabort_type(result); 731 return 0; 732 } 733 } 734 return data; 735 } 736 737 static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, 738 uint64_t new_val, S1Translate *ptw, 739 ARMMMUFaultInfo *fi) 740 { 741 #if defined(TARGET_AARCH64) && defined(CONFIG_TCG) 742 uint64_t cur_val; 743 void *host = ptw->out_host; 744 745 if (unlikely(!host)) { 746 /* Page table in MMIO Memory Region */ 747 CPUState *cs = env_cpu(env); 748 MemTxAttrs attrs = { 749 .space = ptw->out_space, 750 .secure = arm_space_is_secure(ptw->out_space), 751 }; 752 AddressSpace *as = arm_addressspace(cs, attrs); 753 MemTxResult result = MEMTX_OK; 754 bool need_lock = !bql_locked(); 755 756 if (need_lock) { 757 bql_lock(); 758 } 759 if (ptw->out_be) { 760 cur_val = address_space_ldq_be(as, ptw->out_phys, attrs, &result); 761 if (unlikely(result != MEMTX_OK)) { 762 fi->type = ARMFault_SyncExternalOnWalk; 763 fi->ea = arm_extabort_type(result); 764 if (need_lock) { 765 bql_unlock(); 766 } 767 return old_val; 768 } 769 if (cur_val == old_val) { 770 address_space_stq_be(as, ptw->out_phys, new_val, attrs, &result); 771 if (unlikely(result != MEMTX_OK)) { 772 fi->type = ARMFault_SyncExternalOnWalk; 773 fi->ea = arm_extabort_type(result); 774 if (need_lock) { 775 bql_unlock(); 776 } 777 return old_val; 778 } 779 cur_val = new_val; 780 } 781 } else { 782 cur_val = address_space_ldq_le(as, ptw->out_phys, attrs, &result); 783 if (unlikely(result != MEMTX_OK)) { 784 fi->type = ARMFault_SyncExternalOnWalk; 785 fi->ea = arm_extabort_type(result); 786 if (need_lock) { 787 bql_unlock(); 788 } 789 return old_val; 790 } 791 if (cur_val == old_val) { 792 address_space_stq_le(as, ptw->out_phys, new_val, attrs, &result); 793 if (unlikely(result != MEMTX_OK)) { 794 fi->type = ARMFault_SyncExternalOnWalk; 795 fi->ea = arm_extabort_type(result); 796 if (need_lock) { 797 bql_unlock(); 798 } 799 return old_val; 800 } 801 cur_val = new_val; 802 } 803 } 804 if (need_lock) { 805 bql_unlock(); 806 } 807 return cur_val; 808 } 809 810 /* 811 * Raising a stage2 Protection fault for an atomic update to a read-only 812 * page is delayed until it is certain that there is a change to make. 813 */ 814 if (unlikely(!ptw->out_rw)) { 815 int flags; 816 817 env->tlb_fi = fi; 818 flags = probe_access_full_mmu(env, ptw->out_virt, 0, 819 MMU_DATA_STORE, 820 arm_to_core_mmu_idx(ptw->in_ptw_idx), 821 NULL, NULL); 822 env->tlb_fi = NULL; 823 824 if (unlikely(flags & TLB_INVALID_MASK)) { 825 /* 826 * We know this must be a stage 2 fault because the granule 827 * protection table does not separately track read and write 828 * permission, so all GPC faults are caught in S1_ptw_translate(): 829 * we only get here for "readable but not writeable". 830 */ 831 assert(fi->type != ARMFault_None); 832 fi->s2addr = ptw->out_virt; 833 fi->stage2 = true; 834 fi->s1ptw = true; 835 fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx); 836 return 0; 837 } 838 839 /* In case CAS mismatches and we loop, remember writability. */ 840 ptw->out_rw = true; 841 } 842 843 #ifdef CONFIG_ATOMIC64 844 if (ptw->out_be) { 845 old_val = cpu_to_be64(old_val); 846 new_val = cpu_to_be64(new_val); 847 cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); 848 cur_val = be64_to_cpu(cur_val); 849 } else { 850 old_val = cpu_to_le64(old_val); 851 new_val = cpu_to_le64(new_val); 852 cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); 853 cur_val = le64_to_cpu(cur_val); 854 } 855 #else 856 /* 857 * We can't support the full 64-bit atomic cmpxchg on the host. 858 * Because this is only used for FEAT_HAFDBS, which is only for AA64, 859 * we know that TCG_OVERSIZED_GUEST is set, which means that we are 860 * running in round-robin mode and could only race with dma i/o. 861 */ 862 #if !TCG_OVERSIZED_GUEST 863 # error "Unexpected configuration" 864 #endif 865 bool locked = bql_locked(); 866 if (!locked) { 867 bql_lock(); 868 } 869 if (ptw->out_be) { 870 cur_val = ldq_be_p(host); 871 if (cur_val == old_val) { 872 stq_be_p(host, new_val); 873 } 874 } else { 875 cur_val = ldq_le_p(host); 876 if (cur_val == old_val) { 877 stq_le_p(host, new_val); 878 } 879 } 880 if (!locked) { 881 bql_unlock(); 882 } 883 #endif 884 885 return cur_val; 886 #else 887 /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */ 888 g_assert_not_reached(); 889 #endif 890 } 891 892 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 893 uint32_t *table, uint32_t address) 894 { 895 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 896 uint64_t tcr = regime_tcr(env, mmu_idx); 897 int maskshift = extract32(tcr, 0, 3); 898 uint32_t mask = ~(((uint32_t)0xffffffffu) >> maskshift); 899 uint32_t base_mask; 900 901 if (address & mask) { 902 if (tcr & TTBCR_PD1) { 903 /* Translation table walk disabled for TTBR1 */ 904 return false; 905 } 906 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 907 } else { 908 if (tcr & TTBCR_PD0) { 909 /* Translation table walk disabled for TTBR0 */ 910 return false; 911 } 912 base_mask = ~((uint32_t)0x3fffu >> maskshift); 913 *table = regime_ttbr(env, mmu_idx, 0) & base_mask; 914 } 915 *table |= (address >> 18) & 0x3ffc; 916 return true; 917 } 918 919 /* 920 * Translate section/page access permissions to page R/W protection flags 921 * @env: CPUARMState 922 * @mmu_idx: MMU index indicating required translation regime 923 * @ap: The 3-bit access permissions (AP[2:0]) 924 * @domain_prot: The 2-bit domain access permissions 925 * @is_user: TRUE if accessing from PL0 926 */ 927 static int ap_to_rw_prot_is_user(CPUARMState *env, ARMMMUIdx mmu_idx, 928 int ap, int domain_prot, bool is_user) 929 { 930 if (domain_prot == 3) { 931 return PAGE_READ | PAGE_WRITE; 932 } 933 934 switch (ap) { 935 case 0: 936 if (arm_feature(env, ARM_FEATURE_V7)) { 937 return 0; 938 } 939 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 940 case SCTLR_S: 941 return is_user ? 0 : PAGE_READ; 942 case SCTLR_R: 943 return PAGE_READ; 944 default: 945 return 0; 946 } 947 case 1: 948 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 949 case 2: 950 if (is_user) { 951 return PAGE_READ; 952 } else { 953 return PAGE_READ | PAGE_WRITE; 954 } 955 case 3: 956 return PAGE_READ | PAGE_WRITE; 957 case 4: /* Reserved. */ 958 return 0; 959 case 5: 960 return is_user ? 0 : PAGE_READ; 961 case 6: 962 return PAGE_READ; 963 case 7: 964 if (!arm_feature(env, ARM_FEATURE_V6K)) { 965 return 0; 966 } 967 return PAGE_READ; 968 default: 969 g_assert_not_reached(); 970 } 971 } 972 973 /* 974 * Translate section/page access permissions to page R/W protection flags 975 * @env: CPUARMState 976 * @mmu_idx: MMU index indicating required translation regime 977 * @ap: The 3-bit access permissions (AP[2:0]) 978 * @domain_prot: The 2-bit domain access permissions 979 */ 980 static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 981 int ap, int domain_prot) 982 { 983 return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 984 regime_is_user(env, mmu_idx)); 985 } 986 987 /* 988 * Translate section/page access permissions to page R/W protection flags. 989 * @ap: The 2-bit simple AP (AP[2:1]) 990 * @is_user: TRUE if accessing from PL0 991 */ 992 static int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 993 { 994 switch (ap) { 995 case 0: 996 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 997 case 1: 998 return PAGE_READ | PAGE_WRITE; 999 case 2: 1000 return is_user ? 0 : PAGE_READ; 1001 case 3: 1002 return PAGE_READ; 1003 default: 1004 g_assert_not_reached(); 1005 } 1006 } 1007 1008 static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 1009 { 1010 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 1011 } 1012 1013 static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw, 1014 uint32_t address, MMUAccessType access_type, 1015 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1016 { 1017 int level = 1; 1018 uint32_t table; 1019 uint32_t desc; 1020 int type; 1021 int ap; 1022 int domain = 0; 1023 int domain_prot; 1024 hwaddr phys_addr; 1025 uint32_t dacr; 1026 1027 /* Pagetable walk. */ 1028 /* Lookup l1 descriptor. */ 1029 if (!get_level1_table_address(env, ptw->in_mmu_idx, &table, address)) { 1030 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 1031 fi->type = ARMFault_Translation; 1032 goto do_fault; 1033 } 1034 if (!S1_ptw_translate(env, ptw, table, fi)) { 1035 goto do_fault; 1036 } 1037 desc = arm_ldl_ptw(env, ptw, fi); 1038 if (fi->type != ARMFault_None) { 1039 goto do_fault; 1040 } 1041 type = (desc & 3); 1042 domain = (desc >> 5) & 0x0f; 1043 if (regime_el(env, ptw->in_mmu_idx) == 1) { 1044 dacr = env->cp15.dacr_ns; 1045 } else { 1046 dacr = env->cp15.dacr_s; 1047 } 1048 domain_prot = (dacr >> (domain * 2)) & 3; 1049 if (type == 0) { 1050 /* Section translation fault. */ 1051 fi->type = ARMFault_Translation; 1052 goto do_fault; 1053 } 1054 if (type != 2) { 1055 level = 2; 1056 } 1057 if (domain_prot == 0 || domain_prot == 2) { 1058 fi->type = ARMFault_Domain; 1059 goto do_fault; 1060 } 1061 if (type == 2) { 1062 /* 1Mb section. */ 1063 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 1064 ap = (desc >> 10) & 3; 1065 result->f.lg_page_size = 20; /* 1MB */ 1066 } else { 1067 /* Lookup l2 entry. */ 1068 if (type == 1) { 1069 /* Coarse pagetable. */ 1070 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 1071 } else { 1072 /* Fine pagetable. */ 1073 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 1074 } 1075 if (!S1_ptw_translate(env, ptw, table, fi)) { 1076 goto do_fault; 1077 } 1078 desc = arm_ldl_ptw(env, ptw, fi); 1079 if (fi->type != ARMFault_None) { 1080 goto do_fault; 1081 } 1082 switch (desc & 3) { 1083 case 0: /* Page translation fault. */ 1084 fi->type = ARMFault_Translation; 1085 goto do_fault; 1086 case 1: /* 64k page. */ 1087 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 1088 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 1089 result->f.lg_page_size = 16; 1090 break; 1091 case 2: /* 4k page. */ 1092 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1093 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 1094 result->f.lg_page_size = 12; 1095 break; 1096 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 1097 if (type == 1) { 1098 /* ARMv6/XScale extended small page format */ 1099 if (arm_feature(env, ARM_FEATURE_XSCALE) 1100 || arm_feature(env, ARM_FEATURE_V6)) { 1101 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1102 result->f.lg_page_size = 12; 1103 } else { 1104 /* 1105 * UNPREDICTABLE in ARMv5; we choose to take a 1106 * page translation fault. 1107 */ 1108 fi->type = ARMFault_Translation; 1109 goto do_fault; 1110 } 1111 } else { 1112 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 1113 result->f.lg_page_size = 10; 1114 } 1115 ap = (desc >> 4) & 3; 1116 break; 1117 default: 1118 /* Never happens, but compiler isn't smart enough to tell. */ 1119 g_assert_not_reached(); 1120 } 1121 } 1122 result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot); 1123 result->f.prot |= result->f.prot ? PAGE_EXEC : 0; 1124 if (!(result->f.prot & (1 << access_type))) { 1125 /* Access permission fault. */ 1126 fi->type = ARMFault_Permission; 1127 goto do_fault; 1128 } 1129 result->f.phys_addr = phys_addr; 1130 return false; 1131 do_fault: 1132 fi->domain = domain; 1133 fi->level = level; 1134 return true; 1135 } 1136 1137 static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, 1138 uint32_t address, MMUAccessType access_type, 1139 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1140 { 1141 ARMCPU *cpu = env_archcpu(env); 1142 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 1143 int level = 1; 1144 uint32_t table; 1145 uint32_t desc; 1146 uint32_t xn; 1147 uint32_t pxn = 0; 1148 int type; 1149 int ap; 1150 int domain = 0; 1151 int domain_prot; 1152 hwaddr phys_addr; 1153 uint32_t dacr; 1154 bool ns; 1155 ARMSecuritySpace out_space; 1156 1157 /* Pagetable walk. */ 1158 /* Lookup l1 descriptor. */ 1159 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 1160 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 1161 fi->type = ARMFault_Translation; 1162 goto do_fault; 1163 } 1164 if (!S1_ptw_translate(env, ptw, table, fi)) { 1165 goto do_fault; 1166 } 1167 desc = arm_ldl_ptw(env, ptw, fi); 1168 if (fi->type != ARMFault_None) { 1169 goto do_fault; 1170 } 1171 type = (desc & 3); 1172 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) { 1173 /* Section translation fault, or attempt to use the encoding 1174 * which is Reserved on implementations without PXN. 1175 */ 1176 fi->type = ARMFault_Translation; 1177 goto do_fault; 1178 } 1179 if ((type == 1) || !(desc & (1 << 18))) { 1180 /* Page or Section. */ 1181 domain = (desc >> 5) & 0x0f; 1182 } 1183 if (regime_el(env, mmu_idx) == 1) { 1184 dacr = env->cp15.dacr_ns; 1185 } else { 1186 dacr = env->cp15.dacr_s; 1187 } 1188 if (type == 1) { 1189 level = 2; 1190 } 1191 domain_prot = (dacr >> (domain * 2)) & 3; 1192 if (domain_prot == 0 || domain_prot == 2) { 1193 /* Section or Page domain fault */ 1194 fi->type = ARMFault_Domain; 1195 goto do_fault; 1196 } 1197 if (type != 1) { 1198 if (desc & (1 << 18)) { 1199 /* Supersection. */ 1200 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 1201 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 1202 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 1203 result->f.lg_page_size = 24; /* 16MB */ 1204 } else { 1205 /* Section. */ 1206 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 1207 result->f.lg_page_size = 20; /* 1MB */ 1208 } 1209 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 1210 xn = desc & (1 << 4); 1211 pxn = desc & 1; 1212 ns = extract32(desc, 19, 1); 1213 } else { 1214 if (cpu_isar_feature(aa32_pxn, cpu)) { 1215 pxn = (desc >> 2) & 1; 1216 } 1217 ns = extract32(desc, 3, 1); 1218 /* Lookup l2 entry. */ 1219 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 1220 if (!S1_ptw_translate(env, ptw, table, fi)) { 1221 goto do_fault; 1222 } 1223 desc = arm_ldl_ptw(env, ptw, fi); 1224 if (fi->type != ARMFault_None) { 1225 goto do_fault; 1226 } 1227 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 1228 switch (desc & 3) { 1229 case 0: /* Page translation fault. */ 1230 fi->type = ARMFault_Translation; 1231 goto do_fault; 1232 case 1: /* 64k page. */ 1233 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 1234 xn = desc & (1 << 15); 1235 result->f.lg_page_size = 16; 1236 break; 1237 case 2: case 3: /* 4k page. */ 1238 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 1239 xn = desc & 1; 1240 result->f.lg_page_size = 12; 1241 break; 1242 default: 1243 /* Never happens, but compiler isn't smart enough to tell. */ 1244 g_assert_not_reached(); 1245 } 1246 } 1247 out_space = ptw->in_space; 1248 if (ns) { 1249 /* 1250 * The NS bit will (as required by the architecture) have no effect if 1251 * the CPU doesn't support TZ or this is a non-secure translation 1252 * regime, because the output space will already be non-secure. 1253 */ 1254 out_space = ARMSS_NonSecure; 1255 } 1256 if (domain_prot == 3) { 1257 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 1258 } else { 1259 int user_rw, prot_rw; 1260 1261 if (arm_feature(env, ARM_FEATURE_V6K) && 1262 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 1263 /* The simplified model uses AP[0] as an access control bit. */ 1264 if ((ap & 1) == 0) { 1265 /* Access flag fault. */ 1266 fi->type = ARMFault_AccessFlag; 1267 goto do_fault; 1268 } 1269 prot_rw = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 1270 user_rw = simple_ap_to_rw_prot_is_user(ap >> 1, 1); 1271 } else { 1272 prot_rw = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 1273 user_rw = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1); 1274 } 1275 1276 result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw, 1277 xn, pxn, result->f.attrs.space, out_space); 1278 if (!(result->f.prot & (1 << access_type))) { 1279 /* Access permission fault. */ 1280 fi->type = ARMFault_Permission; 1281 goto do_fault; 1282 } 1283 } 1284 result->f.attrs.space = out_space; 1285 result->f.attrs.secure = arm_space_is_secure(out_space); 1286 result->f.phys_addr = phys_addr; 1287 return false; 1288 do_fault: 1289 fi->domain = domain; 1290 fi->level = level; 1291 return true; 1292 } 1293 1294 /* 1295 * Translate S2 section/page access permissions to protection flags 1296 * @env: CPUARMState 1297 * @s2ap: The 2-bit stage2 access permissions (S2AP) 1298 * @xn: XN (execute-never) bits 1299 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 1300 */ 1301 static int get_S2prot_noexecute(int s2ap) 1302 { 1303 int prot = 0; 1304 1305 if (s2ap & 1) { 1306 prot |= PAGE_READ; 1307 } 1308 if (s2ap & 2) { 1309 prot |= PAGE_WRITE; 1310 } 1311 return prot; 1312 } 1313 1314 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) 1315 { 1316 int prot = get_S2prot_noexecute(s2ap); 1317 1318 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { 1319 switch (xn) { 1320 case 0: 1321 prot |= PAGE_EXEC; 1322 break; 1323 case 1: 1324 if (s1_is_el0) { 1325 prot |= PAGE_EXEC; 1326 } 1327 break; 1328 case 2: 1329 break; 1330 case 3: 1331 if (!s1_is_el0) { 1332 prot |= PAGE_EXEC; 1333 } 1334 break; 1335 default: 1336 g_assert_not_reached(); 1337 } 1338 } else { 1339 if (!extract32(xn, 1, 1)) { 1340 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 1341 prot |= PAGE_EXEC; 1342 } 1343 } 1344 } 1345 return prot; 1346 } 1347 1348 /* 1349 * Translate section/page access permissions to protection flags 1350 * @env: CPUARMState 1351 * @mmu_idx: MMU index indicating required translation regime 1352 * @is_aa64: TRUE if AArch64 1353 * @user_rw: Translated AP for user access 1354 * @prot_rw: Translated AP for privileged access 1355 * @xn: XN (execute-never) bit 1356 * @pxn: PXN (privileged execute-never) bit 1357 * @in_pa: The original input pa space 1358 * @out_pa: The output pa space, modified by NSTable, NS, and NSE 1359 */ 1360 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 1361 int user_rw, int prot_rw, int xn, int pxn, 1362 ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) 1363 { 1364 ARMCPU *cpu = env_archcpu(env); 1365 bool is_user = regime_is_user(env, mmu_idx); 1366 bool have_wxn; 1367 int wxn = 0; 1368 1369 assert(!regime_is_stage2(mmu_idx)); 1370 1371 if (is_user) { 1372 prot_rw = user_rw; 1373 } else { 1374 /* 1375 * PAN controls can forbid data accesses but don't affect insn fetch. 1376 * Plain PAN forbids data accesses if EL0 has data permissions; 1377 * PAN3 forbids data accesses if EL0 has either data or exec perms. 1378 * Note that for AArch64 the 'user can exec' case is exactly !xn. 1379 * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0 1380 * do not affect EPAN. 1381 */ 1382 if (user_rw && regime_is_pan(env, mmu_idx)) { 1383 prot_rw = 0; 1384 } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 && 1385 regime_is_pan(env, mmu_idx) && 1386 (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) { 1387 prot_rw = 0; 1388 } 1389 } 1390 1391 if (in_pa != out_pa) { 1392 switch (in_pa) { 1393 case ARMSS_Root: 1394 /* 1395 * R_ZWRVD: permission fault for insn fetched from non-Root, 1396 * I_WWBFB: SIF has no effect in EL3. 1397 */ 1398 return prot_rw; 1399 case ARMSS_Realm: 1400 /* 1401 * R_PKTDS: permission fault for insn fetched from non-Realm, 1402 * for Realm EL2 or EL2&0. The corresponding fault for EL1&0 1403 * happens during any stage2 translation. 1404 */ 1405 switch (mmu_idx) { 1406 case ARMMMUIdx_E2: 1407 case ARMMMUIdx_E20_0: 1408 case ARMMMUIdx_E20_2: 1409 case ARMMMUIdx_E20_2_PAN: 1410 return prot_rw; 1411 default: 1412 break; 1413 } 1414 break; 1415 case ARMSS_Secure: 1416 if (env->cp15.scr_el3 & SCR_SIF) { 1417 return prot_rw; 1418 } 1419 break; 1420 default: 1421 /* Input NonSecure must have output NonSecure. */ 1422 g_assert_not_reached(); 1423 } 1424 } 1425 1426 /* TODO have_wxn should be replaced with 1427 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 1428 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 1429 * compatible processors have EL2, which is required for [U]WXN. 1430 */ 1431 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 1432 1433 if (have_wxn) { 1434 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 1435 } 1436 1437 if (is_aa64) { 1438 if (regime_has_2_ranges(mmu_idx) && !is_user) { 1439 xn = pxn || (user_rw & PAGE_WRITE); 1440 } 1441 } else if (arm_feature(env, ARM_FEATURE_V7)) { 1442 switch (regime_el(env, mmu_idx)) { 1443 case 1: 1444 case 3: 1445 if (is_user) { 1446 xn = xn || !(user_rw & PAGE_READ); 1447 } else { 1448 int uwxn = 0; 1449 if (have_wxn) { 1450 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 1451 } 1452 xn = xn || !(prot_rw & PAGE_READ) || pxn || 1453 (uwxn && (user_rw & PAGE_WRITE)); 1454 } 1455 break; 1456 case 2: 1457 break; 1458 } 1459 } else { 1460 xn = wxn = 0; 1461 } 1462 1463 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 1464 return prot_rw; 1465 } 1466 return prot_rw | PAGE_EXEC; 1467 } 1468 1469 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 1470 ARMMMUIdx mmu_idx) 1471 { 1472 uint64_t tcr = regime_tcr(env, mmu_idx); 1473 uint32_t el = regime_el(env, mmu_idx); 1474 int select, tsz; 1475 bool epd, hpd; 1476 1477 assert(mmu_idx != ARMMMUIdx_Stage2_S); 1478 1479 if (mmu_idx == ARMMMUIdx_Stage2) { 1480 /* VTCR */ 1481 bool sext = extract32(tcr, 4, 1); 1482 bool sign = extract32(tcr, 3, 1); 1483 1484 /* 1485 * If the sign-extend bit is not the same as t0sz[3], the result 1486 * is unpredictable. Flag this as a guest error. 1487 */ 1488 if (sign != sext) { 1489 qemu_log_mask(LOG_GUEST_ERROR, 1490 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 1491 } 1492 tsz = sextract32(tcr, 0, 4) + 8; 1493 select = 0; 1494 hpd = false; 1495 epd = false; 1496 } else if (el == 2) { 1497 /* HTCR */ 1498 tsz = extract32(tcr, 0, 3); 1499 select = 0; 1500 hpd = extract64(tcr, 24, 1); 1501 epd = false; 1502 } else { 1503 int t0sz = extract32(tcr, 0, 3); 1504 int t1sz = extract32(tcr, 16, 3); 1505 1506 if (t1sz == 0) { 1507 select = va > (0xffffffffu >> t0sz); 1508 } else { 1509 /* Note that we will detect errors later. */ 1510 select = va >= ~(0xffffffffu >> t1sz); 1511 } 1512 if (!select) { 1513 tsz = t0sz; 1514 epd = extract32(tcr, 7, 1); 1515 hpd = extract64(tcr, 41, 1); 1516 } else { 1517 tsz = t1sz; 1518 epd = extract32(tcr, 23, 1); 1519 hpd = extract64(tcr, 42, 1); 1520 } 1521 /* For aarch32, hpd0 is not enabled without t2e as well. */ 1522 hpd &= extract32(tcr, 6, 1); 1523 } 1524 1525 return (ARMVAParameters) { 1526 .tsz = tsz, 1527 .select = select, 1528 .epd = epd, 1529 .hpd = hpd, 1530 }; 1531 } 1532 1533 /* 1534 * check_s2_mmu_setup 1535 * @cpu: ARMCPU 1536 * @is_aa64: True if the translation regime is in AArch64 state 1537 * @tcr: VTCR_EL2 or VSTCR_EL2 1538 * @ds: Effective value of TCR.DS. 1539 * @iasize: Bitsize of IPAs 1540 * @stride: Page-table stride (See the ARM ARM) 1541 * 1542 * Decode the starting level of the S2 lookup, returning INT_MIN if 1543 * the configuration is invalid. 1544 */ 1545 static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr, 1546 bool ds, int iasize, int stride) 1547 { 1548 int sl0, sl2, startlevel, granulebits, levels; 1549 int s1_min_iasize, s1_max_iasize; 1550 1551 sl0 = extract32(tcr, 6, 2); 1552 if (is_aa64) { 1553 /* 1554 * AArch64.S2InvalidSL: Interpretation of SL depends on the page size, 1555 * so interleave AArch64.S2StartLevel. 1556 */ 1557 switch (stride) { 1558 case 9: /* 4KB */ 1559 /* SL2 is RES0 unless DS=1 & 4KB granule. */ 1560 sl2 = extract64(tcr, 33, 1); 1561 if (ds && sl2) { 1562 if (sl0 != 0) { 1563 goto fail; 1564 } 1565 startlevel = -1; 1566 } else { 1567 startlevel = 2 - sl0; 1568 switch (sl0) { 1569 case 2: 1570 if (arm_pamax(cpu) < 44) { 1571 goto fail; 1572 } 1573 break; 1574 case 3: 1575 if (!cpu_isar_feature(aa64_st, cpu)) { 1576 goto fail; 1577 } 1578 startlevel = 3; 1579 break; 1580 } 1581 } 1582 break; 1583 case 11: /* 16KB */ 1584 switch (sl0) { 1585 case 2: 1586 if (arm_pamax(cpu) < 42) { 1587 goto fail; 1588 } 1589 break; 1590 case 3: 1591 if (!ds) { 1592 goto fail; 1593 } 1594 break; 1595 } 1596 startlevel = 3 - sl0; 1597 break; 1598 case 13: /* 64KB */ 1599 switch (sl0) { 1600 case 2: 1601 if (arm_pamax(cpu) < 44) { 1602 goto fail; 1603 } 1604 break; 1605 case 3: 1606 goto fail; 1607 } 1608 startlevel = 3 - sl0; 1609 break; 1610 default: 1611 g_assert_not_reached(); 1612 } 1613 } else { 1614 /* 1615 * Things are simpler for AArch32 EL2, with only 4k pages. 1616 * There is no separate S2InvalidSL function, but AArch32.S2Walk 1617 * begins with walkparms.sl0 in {'1x'}. 1618 */ 1619 assert(stride == 9); 1620 if (sl0 >= 2) { 1621 goto fail; 1622 } 1623 startlevel = 2 - sl0; 1624 } 1625 1626 /* AArch{64,32}.S2InconsistentSL are functionally equivalent. */ 1627 levels = 3 - startlevel; 1628 granulebits = stride + 3; 1629 1630 s1_min_iasize = levels * stride + granulebits + 1; 1631 s1_max_iasize = s1_min_iasize + (stride - 1) + 4; 1632 1633 if (iasize >= s1_min_iasize && iasize <= s1_max_iasize) { 1634 return startlevel; 1635 } 1636 1637 fail: 1638 return INT_MIN; 1639 } 1640 1641 static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds, 1642 ARMGranuleSize gran, int level) 1643 { 1644 /* 1645 * See pseudocode AArch46.BlockDescSupported(): block descriptors 1646 * are not valid at all levels, depending on the page size. 1647 */ 1648 switch (gran) { 1649 case Gran4K: 1650 return (level == 0 && ds) || level == 1 || level == 2; 1651 case Gran16K: 1652 return (level == 1 && ds) || level == 2; 1653 case Gran64K: 1654 return (level == 1 && arm_pamax(cpu) == 52) || level == 2; 1655 default: 1656 g_assert_not_reached(); 1657 } 1658 } 1659 1660 static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw) 1661 { 1662 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); 1663 return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1); 1664 } 1665 1666 /** 1667 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format 1668 * 1669 * Returns false if the translation was successful. Otherwise, phys_ptr, 1670 * attrs, prot and page_size may not be filled in, and the populated fsr 1671 * value provides information on why the translation aborted, in the format 1672 * of a long-format DFSR/IFSR fault register, with the following caveat: 1673 * the WnR bit is never set (the caller must do this). 1674 * 1675 * @env: CPUARMState 1676 * @ptw: Current and next stage parameters for the walk. 1677 * @address: virtual address to get physical address for 1678 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH 1679 * @memop: memory operation feeding this access, or 0 for none 1680 * @result: set on translation success, 1681 * @fi: set to fault info if the translation fails 1682 */ 1683 static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, 1684 uint64_t address, 1685 MMUAccessType access_type, MemOp memop, 1686 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1687 { 1688 ARMCPU *cpu = env_archcpu(env); 1689 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 1690 int32_t level; 1691 ARMVAParameters param; 1692 uint64_t ttbr; 1693 hwaddr descaddr, indexmask, indexmask_grainsize; 1694 uint32_t tableattrs; 1695 target_ulong page_size; 1696 uint64_t attrs; 1697 int32_t stride; 1698 int addrsize, inputsize, outputsize; 1699 uint64_t tcr = regime_tcr(env, mmu_idx); 1700 int ap, xn, pxn; 1701 uint32_t el = regime_el(env, mmu_idx); 1702 uint64_t descaddrmask; 1703 bool aarch64 = arm_el_is_aa64(env, el); 1704 uint64_t descriptor, new_descriptor; 1705 ARMSecuritySpace out_space; 1706 bool device; 1707 1708 /* TODO: This code does not support shareability levels. */ 1709 if (aarch64) { 1710 int ps; 1711 1712 param = aa64_va_parameters(env, address, mmu_idx, 1713 access_type != MMU_INST_FETCH, 1714 !arm_el_is_aa64(env, 1)); 1715 level = 0; 1716 1717 /* 1718 * If TxSZ is programmed to a value larger than the maximum, 1719 * or smaller than the effective minimum, it is IMPLEMENTATION 1720 * DEFINED whether we behave as if the field were programmed 1721 * within bounds, or if a level 0 Translation fault is generated. 1722 * 1723 * With FEAT_LVA, fault on less than minimum becomes required, 1724 * so our choice is to always raise the fault. 1725 */ 1726 if (param.tsz_oob) { 1727 goto do_translation_fault; 1728 } 1729 1730 addrsize = 64 - 8 * param.tbi; 1731 inputsize = 64 - param.tsz; 1732 1733 /* 1734 * Bound PS by PARANGE to find the effective output address size. 1735 * ID_AA64MMFR0 is a read-only register so values outside of the 1736 * supported mappings can be considered an implementation error. 1737 */ 1738 ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); 1739 ps = MIN(ps, param.ps); 1740 assert(ps < ARRAY_SIZE(pamax_map)); 1741 outputsize = pamax_map[ps]; 1742 1743 /* 1744 * With LPA2, the effective output address (OA) size is at most 48 bits 1745 * unless TCR.DS == 1 1746 */ 1747 if (!param.ds && param.gran != Gran64K) { 1748 outputsize = MIN(outputsize, 48); 1749 } 1750 } else { 1751 param = aa32_va_parameters(env, address, mmu_idx); 1752 level = 1; 1753 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 1754 inputsize = addrsize - param.tsz; 1755 outputsize = 40; 1756 } 1757 1758 /* 1759 * We determined the region when collecting the parameters, but we 1760 * have not yet validated that the address is valid for the region. 1761 * Extract the top bits and verify that they all match select. 1762 * 1763 * For aa32, if inputsize == addrsize, then we have selected the 1764 * region by exclusion in aa32_va_parameters and there is no more 1765 * validation to do here. 1766 */ 1767 if (inputsize < addrsize) { 1768 target_ulong top_bits = sextract64(address, inputsize, 1769 addrsize - inputsize); 1770 if (-top_bits != param.select) { 1771 /* The gap between the two regions is a Translation fault */ 1772 goto do_translation_fault; 1773 } 1774 } 1775 1776 stride = arm_granule_bits(param.gran) - 3; 1777 1778 /* 1779 * Note that QEMU ignores shareability and cacheability attributes, 1780 * so we don't need to do anything with the SH, ORGN, IRGN fields 1781 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 1782 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 1783 * implement any ASID-like capability so we can ignore it (instead 1784 * we will always flush the TLB any time the ASID is changed). 1785 */ 1786 ttbr = regime_ttbr(env, mmu_idx, param.select); 1787 1788 /* 1789 * Here we should have set up all the parameters for the translation: 1790 * inputsize, ttbr, epd, stride, tbi 1791 */ 1792 1793 if (param.epd) { 1794 /* 1795 * Translation table walk disabled => Translation fault on TLB miss 1796 * Note: This is always 0 on 64-bit EL2 and EL3. 1797 */ 1798 goto do_translation_fault; 1799 } 1800 1801 if (!regime_is_stage2(mmu_idx)) { 1802 /* 1803 * The starting level depends on the virtual address size (which can 1804 * be up to 48 bits) and the translation granule size. It indicates 1805 * the number of strides (stride bits at a time) needed to 1806 * consume the bits of the input address. In the pseudocode this is: 1807 * level = 4 - RoundUp((inputsize - grainsize) / stride) 1808 * where their 'inputsize' is our 'inputsize', 'grainsize' is 1809 * our 'stride + 3' and 'stride' is our 'stride'. 1810 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 1811 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 1812 * = 4 - (inputsize - 4) / stride; 1813 */ 1814 level = 4 - (inputsize - 4) / stride; 1815 } else { 1816 int startlevel = check_s2_mmu_setup(cpu, aarch64, tcr, param.ds, 1817 inputsize, stride); 1818 if (startlevel == INT_MIN) { 1819 level = 0; 1820 goto do_translation_fault; 1821 } 1822 level = startlevel; 1823 } 1824 1825 indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3); 1826 indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level))); 1827 1828 /* Now we can extract the actual base address from the TTBR */ 1829 descaddr = extract64(ttbr, 0, 48); 1830 1831 /* 1832 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR. 1833 * 1834 * Otherwise, if the base address is out of range, raise AddressSizeFault. 1835 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>), 1836 * but we've just cleared the bits above 47, so simplify the test. 1837 */ 1838 if (outputsize > 48) { 1839 descaddr |= extract64(ttbr, 2, 4) << 48; 1840 } else if (descaddr >> outputsize) { 1841 level = 0; 1842 fi->type = ARMFault_AddressSize; 1843 goto do_fault; 1844 } 1845 1846 /* 1847 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR 1848 * and also to mask out CnP (bit 0) which could validly be non-zero. 1849 */ 1850 descaddr &= ~indexmask; 1851 1852 /* 1853 * For AArch32, the address field in the descriptor goes up to bit 39 1854 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0 1855 * or an AddressSize fault is raised. So for v8 we extract those SBZ 1856 * bits as part of the address, which will be checked via outputsize. 1857 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2; 1858 * the highest bits of a 52-bit output are placed elsewhere. 1859 */ 1860 if (param.ds) { 1861 descaddrmask = MAKE_64BIT_MASK(0, 50); 1862 } else if (arm_feature(env, ARM_FEATURE_V8)) { 1863 descaddrmask = MAKE_64BIT_MASK(0, 48); 1864 } else { 1865 descaddrmask = MAKE_64BIT_MASK(0, 40); 1866 } 1867 descaddrmask &= ~indexmask_grainsize; 1868 tableattrs = 0; 1869 1870 next_level: 1871 descaddr |= (address >> (stride * (4 - level))) & indexmask; 1872 descaddr &= ~7ULL; 1873 1874 /* 1875 * Process the NSTable bit from the previous level. This changes 1876 * the table address space and the output space from Secure to 1877 * NonSecure. With RME, the EL3 translation regime does not change 1878 * from Root to NonSecure. 1879 */ 1880 if (ptw->in_space == ARMSS_Secure 1881 && !regime_is_stage2(mmu_idx) 1882 && extract32(tableattrs, 4, 1)) { 1883 /* 1884 * Stage2_S -> Stage2 or Phys_S -> Phys_NS 1885 * Assert the relative order of the secure/non-secure indexes. 1886 */ 1887 QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS); 1888 QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2); 1889 ptw->in_ptw_idx += 1; 1890 ptw->in_space = ARMSS_NonSecure; 1891 } 1892 1893 if (!S1_ptw_translate(env, ptw, descaddr, fi)) { 1894 goto do_fault; 1895 } 1896 descriptor = arm_ldq_ptw(env, ptw, fi); 1897 if (fi->type != ARMFault_None) { 1898 goto do_fault; 1899 } 1900 new_descriptor = descriptor; 1901 1902 restart_atomic_update: 1903 if (!(descriptor & 1) || 1904 (!(descriptor & 2) && 1905 !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) { 1906 /* Invalid, or a block descriptor at an invalid level */ 1907 goto do_translation_fault; 1908 } 1909 1910 descaddr = descriptor & descaddrmask; 1911 1912 /* 1913 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12] 1914 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of 1915 * descaddr are in [9:8]. Otherwise, if descaddr is out of range, 1916 * raise AddressSizeFault. 1917 */ 1918 if (outputsize > 48) { 1919 if (param.ds) { 1920 descaddr |= extract64(descriptor, 8, 2) << 50; 1921 } else { 1922 descaddr |= extract64(descriptor, 12, 4) << 48; 1923 } 1924 } else if (descaddr >> outputsize) { 1925 fi->type = ARMFault_AddressSize; 1926 goto do_fault; 1927 } 1928 1929 if ((descriptor & 2) && (level < 3)) { 1930 /* 1931 * Table entry. The top five bits are attributes which may 1932 * propagate down through lower levels of the table (and 1933 * which are all arranged so that 0 means "no effect", so 1934 * we can gather them up by ORing in the bits at each level). 1935 */ 1936 tableattrs |= extract64(descriptor, 59, 5); 1937 level++; 1938 indexmask = indexmask_grainsize; 1939 goto next_level; 1940 } 1941 1942 /* 1943 * Block entry at level 1 or 2, or page entry at level 3. 1944 * These are basically the same thing, although the number 1945 * of bits we pull in from the vaddr varies. Note that although 1946 * descaddrmask masks enough of the low bits of the descriptor 1947 * to give a correct page or table address, the address field 1948 * in a block descriptor is smaller; so we need to explicitly 1949 * clear the lower bits here before ORing in the low vaddr bits. 1950 * 1951 * Afterward, descaddr is the final physical address. 1952 */ 1953 page_size = (1ULL << ((stride * (4 - level)) + 3)); 1954 descaddr &= ~(hwaddr)(page_size - 1); 1955 descaddr |= (address & (page_size - 1)); 1956 1957 if (likely(!ptw->in_debug)) { 1958 /* 1959 * Access flag. 1960 * If HA is enabled, prepare to update the descriptor below. 1961 * Otherwise, pass the access fault on to software. 1962 */ 1963 if (!(descriptor & (1 << 10))) { 1964 if (param.ha) { 1965 new_descriptor |= 1 << 10; /* AF */ 1966 } else { 1967 fi->type = ARMFault_AccessFlag; 1968 goto do_fault; 1969 } 1970 } 1971 1972 /* 1973 * Dirty Bit. 1974 * If HD is enabled, pre-emptively set/clear the appropriate AP/S2AP 1975 * bit for writeback. The actual write protection test may still be 1976 * overridden by tableattrs, to be merged below. 1977 */ 1978 if (param.hd 1979 && extract64(descriptor, 51, 1) /* DBM */ 1980 && access_type == MMU_DATA_STORE) { 1981 if (regime_is_stage2(mmu_idx)) { 1982 new_descriptor |= 1ull << 7; /* set S2AP[1] */ 1983 } else { 1984 new_descriptor &= ~(1ull << 7); /* clear AP[2] */ 1985 } 1986 } 1987 } 1988 1989 /* 1990 * Extract attributes from the (modified) descriptor, and apply 1991 * table descriptors. Stage 2 table descriptors do not include 1992 * any attribute fields. HPD disables all the table attributes 1993 * except NSTable (which we have already handled). 1994 */ 1995 attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14)); 1996 if (!regime_is_stage2(mmu_idx)) { 1997 if (!param.hpd) { 1998 attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */ 1999 /* 2000 * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 2001 * means "force PL1 access only", which means forcing AP[1] to 0. 2002 */ 2003 attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */ 2004 attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */ 2005 } 2006 } 2007 2008 ap = extract32(attrs, 6, 2); 2009 out_space = ptw->in_space; 2010 if (regime_is_stage2(mmu_idx)) { 2011 /* 2012 * R_GYNXY: For stage2 in Realm security state, bit 55 is NS. 2013 * The bit remains ignored for other security states. 2014 * R_YMCSL: Executing an insn fetched from non-Realm causes 2015 * a stage2 permission fault. 2016 */ 2017 if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) { 2018 out_space = ARMSS_NonSecure; 2019 result->f.prot = get_S2prot_noexecute(ap); 2020 } else { 2021 xn = extract64(attrs, 53, 2); 2022 result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); 2023 } 2024 2025 result->cacheattrs.is_s2_format = true; 2026 result->cacheattrs.attrs = extract32(attrs, 2, 4); 2027 /* 2028 * Security state does not really affect HCR_EL2.FWB; 2029 * we only need to filter FWB for aa32 or other FEAT. 2030 */ 2031 device = S2_attrs_are_device(arm_hcr_el2_eff(env), 2032 result->cacheattrs.attrs); 2033 } else { 2034 int nse, ns = extract32(attrs, 5, 1); 2035 uint8_t attrindx; 2036 uint64_t mair; 2037 int user_rw, prot_rw; 2038 2039 switch (out_space) { 2040 case ARMSS_Root: 2041 /* 2042 * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime. 2043 * R_XTYPW: NSE and NS together select the output pa space. 2044 */ 2045 nse = extract32(attrs, 11, 1); 2046 out_space = (nse << 1) | ns; 2047 if (out_space == ARMSS_Secure && 2048 !cpu_isar_feature(aa64_sel2, cpu)) { 2049 out_space = ARMSS_NonSecure; 2050 } 2051 break; 2052 case ARMSS_Secure: 2053 if (ns) { 2054 out_space = ARMSS_NonSecure; 2055 } 2056 break; 2057 case ARMSS_Realm: 2058 switch (mmu_idx) { 2059 case ARMMMUIdx_Stage1_E0: 2060 case ARMMMUIdx_Stage1_E1: 2061 case ARMMMUIdx_Stage1_E1_PAN: 2062 /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */ 2063 break; 2064 case ARMMMUIdx_E2: 2065 case ARMMMUIdx_E20_0: 2066 case ARMMMUIdx_E20_2: 2067 case ARMMMUIdx_E20_2_PAN: 2068 /* 2069 * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1, 2070 * NS changes the output to non-secure space. 2071 */ 2072 if (ns) { 2073 out_space = ARMSS_NonSecure; 2074 } 2075 break; 2076 default: 2077 g_assert_not_reached(); 2078 } 2079 break; 2080 case ARMSS_NonSecure: 2081 /* R_QRMFF: For NonSecure state, the NS bit is RES0. */ 2082 break; 2083 default: 2084 g_assert_not_reached(); 2085 } 2086 xn = extract64(attrs, 54, 1); 2087 pxn = extract64(attrs, 53, 1); 2088 2089 if (el == 1 && nv_nv1_enabled(env, ptw)) { 2090 /* 2091 * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page 2092 * descriptor bit 54 holds PXN, 53 is RES0, and the effective value 2093 * of UXN is 0. Similarly for bits 59 and 60 in table descriptors 2094 * (which we have already folded into bits 53 and 54 of attrs). 2095 * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0. 2096 * Similarly, APTable[0] from the table descriptor is treated as 0; 2097 * we already folded this into AP[1] and squashing that to 0 does 2098 * the right thing. 2099 */ 2100 pxn = xn; 2101 xn = 0; 2102 ap &= ~1; 2103 } 2104 2105 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 2106 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 2107 /* 2108 * Note that we modified ptw->in_space earlier for NSTable, but 2109 * result->f.attrs retains a copy of the original security space. 2110 */ 2111 result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw, 2112 xn, pxn, result->f.attrs.space, out_space); 2113 2114 /* Index into MAIR registers for cache attributes */ 2115 attrindx = extract32(attrs, 2, 3); 2116 mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 2117 assert(attrindx <= 7); 2118 result->cacheattrs.is_s2_format = false; 2119 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); 2120 2121 /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ 2122 if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { 2123 result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ 2124 } 2125 device = S1_attrs_are_device(result->cacheattrs.attrs); 2126 } 2127 2128 /* 2129 * Enable alignment checks on Device memory. 2130 * 2131 * Per R_XCHFJ, the correct ordering for alignment, permission, 2132 * and stage 2 faults is: 2133 * - Alignment fault caused by the memory type 2134 * - Permission fault 2135 * - A stage 2 fault on the memory access 2136 * Perform the alignment check now, so that we recognize it in 2137 * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent 2138 * softmmu tlb hit will also check the alignment; clear along the 2139 * non-device path so that tlb_fill_flags is consistent in the 2140 * event of restart_atomic_update. 2141 * 2142 * In v7, for a CPU without the Virtualization Extensions this 2143 * access is UNPREDICTABLE; we choose to make it take the alignment 2144 * fault as is required for a v7VE CPU. (QEMU doesn't emulate any 2145 * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) 2146 */ 2147 if (device) { 2148 unsigned a_bits = memop_atomicity_bits(memop); 2149 if (address & ((1 << a_bits) - 1)) { 2150 fi->type = ARMFault_Alignment; 2151 goto do_fault; 2152 } 2153 result->f.tlb_fill_flags = TLB_CHECK_ALIGNED; 2154 } else { 2155 result->f.tlb_fill_flags = 0; 2156 } 2157 2158 if (!(result->f.prot & (1 << access_type))) { 2159 fi->type = ARMFault_Permission; 2160 goto do_fault; 2161 } 2162 2163 /* If FEAT_HAFDBS has made changes, update the PTE. */ 2164 if (new_descriptor != descriptor) { 2165 new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi); 2166 if (fi->type != ARMFault_None) { 2167 goto do_fault; 2168 } 2169 /* 2170 * I_YZSVV says that if the in-memory descriptor has changed, 2171 * then we must use the information in that new value 2172 * (which might include a different output address, different 2173 * attributes, or generate a fault). 2174 * Restart the handling of the descriptor value from scratch. 2175 */ 2176 if (new_descriptor != descriptor) { 2177 descriptor = new_descriptor; 2178 goto restart_atomic_update; 2179 } 2180 } 2181 2182 result->f.attrs.space = out_space; 2183 result->f.attrs.secure = arm_space_is_secure(out_space); 2184 2185 /* 2186 * For FEAT_LPA2 and effective DS, the SH field in the attributes 2187 * was re-purposed for output address bits. The SH attribute in 2188 * that case comes from TCR_ELx, which we extracted earlier. 2189 */ 2190 if (param.ds) { 2191 result->cacheattrs.shareability = param.sh; 2192 } else { 2193 result->cacheattrs.shareability = extract32(attrs, 8, 2); 2194 } 2195 2196 result->f.phys_addr = descaddr; 2197 result->f.lg_page_size = ctz64(page_size); 2198 return false; 2199 2200 do_translation_fault: 2201 fi->type = ARMFault_Translation; 2202 do_fault: 2203 if (fi->s1ptw) { 2204 /* Retain the existing stage 2 fi->level */ 2205 assert(fi->stage2); 2206 } else { 2207 fi->level = level; 2208 fi->stage2 = regime_is_stage2(mmu_idx); 2209 } 2210 fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx); 2211 return true; 2212 } 2213 2214 static bool get_phys_addr_pmsav5(CPUARMState *env, 2215 S1Translate *ptw, 2216 uint32_t address, 2217 MMUAccessType access_type, 2218 GetPhysAddrResult *result, 2219 ARMMMUFaultInfo *fi) 2220 { 2221 int n; 2222 uint32_t mask; 2223 uint32_t base; 2224 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 2225 bool is_user = regime_is_user(env, mmu_idx); 2226 2227 if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { 2228 /* MPU disabled. */ 2229 result->f.phys_addr = address; 2230 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 2231 return false; 2232 } 2233 2234 result->f.phys_addr = address; 2235 for (n = 7; n >= 0; n--) { 2236 base = env->cp15.c6_region[n]; 2237 if ((base & 1) == 0) { 2238 continue; 2239 } 2240 mask = 1 << ((base >> 1) & 0x1f); 2241 /* Keep this shift separate from the above to avoid an 2242 (undefined) << 32. */ 2243 mask = (mask << 1) - 1; 2244 if (((base ^ address) & ~mask) == 0) { 2245 break; 2246 } 2247 } 2248 if (n < 0) { 2249 fi->type = ARMFault_Background; 2250 return true; 2251 } 2252 2253 if (access_type == MMU_INST_FETCH) { 2254 mask = env->cp15.pmsav5_insn_ap; 2255 } else { 2256 mask = env->cp15.pmsav5_data_ap; 2257 } 2258 mask = (mask >> (n * 4)) & 0xf; 2259 switch (mask) { 2260 case 0: 2261 fi->type = ARMFault_Permission; 2262 fi->level = 1; 2263 return true; 2264 case 1: 2265 if (is_user) { 2266 fi->type = ARMFault_Permission; 2267 fi->level = 1; 2268 return true; 2269 } 2270 result->f.prot = PAGE_READ | PAGE_WRITE; 2271 break; 2272 case 2: 2273 result->f.prot = PAGE_READ; 2274 if (!is_user) { 2275 result->f.prot |= PAGE_WRITE; 2276 } 2277 break; 2278 case 3: 2279 result->f.prot = PAGE_READ | PAGE_WRITE; 2280 break; 2281 case 5: 2282 if (is_user) { 2283 fi->type = ARMFault_Permission; 2284 fi->level = 1; 2285 return true; 2286 } 2287 result->f.prot = PAGE_READ; 2288 break; 2289 case 6: 2290 result->f.prot = PAGE_READ; 2291 break; 2292 default: 2293 /* Bad permission. */ 2294 fi->type = ARMFault_Permission; 2295 fi->level = 1; 2296 return true; 2297 } 2298 result->f.prot |= PAGE_EXEC; 2299 return false; 2300 } 2301 2302 static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx, 2303 int32_t address, uint8_t *prot) 2304 { 2305 if (!arm_feature(env, ARM_FEATURE_M)) { 2306 *prot = PAGE_READ | PAGE_WRITE; 2307 switch (address) { 2308 case 0xF0000000 ... 0xFFFFFFFF: 2309 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 2310 /* hivecs execing is ok */ 2311 *prot |= PAGE_EXEC; 2312 } 2313 break; 2314 case 0x00000000 ... 0x7FFFFFFF: 2315 *prot |= PAGE_EXEC; 2316 break; 2317 } 2318 } else { 2319 /* Default system address map for M profile cores. 2320 * The architecture specifies which regions are execute-never; 2321 * at the MPU level no other checks are defined. 2322 */ 2323 switch (address) { 2324 case 0x00000000 ... 0x1fffffff: /* ROM */ 2325 case 0x20000000 ... 0x3fffffff: /* SRAM */ 2326 case 0x60000000 ... 0x7fffffff: /* RAM */ 2327 case 0x80000000 ... 0x9fffffff: /* RAM */ 2328 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 2329 break; 2330 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 2331 case 0xa0000000 ... 0xbfffffff: /* Device */ 2332 case 0xc0000000 ... 0xdfffffff: /* Device */ 2333 case 0xe0000000 ... 0xffffffff: /* System */ 2334 *prot = PAGE_READ | PAGE_WRITE; 2335 break; 2336 default: 2337 g_assert_not_reached(); 2338 } 2339 } 2340 } 2341 2342 static bool m_is_ppb_region(CPUARMState *env, uint32_t address) 2343 { 2344 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 2345 return arm_feature(env, ARM_FEATURE_M) && 2346 extract32(address, 20, 12) == 0xe00; 2347 } 2348 2349 static bool m_is_system_region(CPUARMState *env, uint32_t address) 2350 { 2351 /* 2352 * True if address is in the M profile system region 2353 * 0xe0000000 - 0xffffffff 2354 */ 2355 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 2356 } 2357 2358 static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx, 2359 bool is_secure, bool is_user) 2360 { 2361 /* 2362 * Return true if we should use the default memory map as a 2363 * "background" region if there are no hits against any MPU regions. 2364 */ 2365 CPUARMState *env = &cpu->env; 2366 2367 if (is_user) { 2368 return false; 2369 } 2370 2371 if (arm_feature(env, ARM_FEATURE_M)) { 2372 return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 2373 } 2374 2375 if (mmu_idx == ARMMMUIdx_Stage2) { 2376 return false; 2377 } 2378 2379 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 2380 } 2381 2382 static bool get_phys_addr_pmsav7(CPUARMState *env, 2383 S1Translate *ptw, 2384 uint32_t address, 2385 MMUAccessType access_type, 2386 GetPhysAddrResult *result, 2387 ARMMMUFaultInfo *fi) 2388 { 2389 ARMCPU *cpu = env_archcpu(env); 2390 int n; 2391 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 2392 bool is_user = regime_is_user(env, mmu_idx); 2393 bool secure = arm_space_is_secure(ptw->in_space); 2394 2395 result->f.phys_addr = address; 2396 result->f.lg_page_size = TARGET_PAGE_BITS; 2397 result->f.prot = 0; 2398 2399 if (regime_translation_disabled(env, mmu_idx, ptw->in_space) || 2400 m_is_ppb_region(env, address)) { 2401 /* 2402 * MPU disabled or M profile PPB access: use default memory map. 2403 * The other case which uses the default memory map in the 2404 * v7M ARM ARM pseudocode is exception vector reads from the vector 2405 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 2406 * which always does a direct read using address_space_ldl(), rather 2407 * than going via this function, so we don't need to check that here. 2408 */ 2409 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); 2410 } else { /* MPU enabled */ 2411 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 2412 /* region search */ 2413 uint32_t base = env->pmsav7.drbar[n]; 2414 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 2415 uint32_t rmask; 2416 bool srdis = false; 2417 2418 if (!(env->pmsav7.drsr[n] & 0x1)) { 2419 continue; 2420 } 2421 2422 if (!rsize) { 2423 qemu_log_mask(LOG_GUEST_ERROR, 2424 "DRSR[%d]: Rsize field cannot be 0\n", n); 2425 continue; 2426 } 2427 rsize++; 2428 rmask = (1ull << rsize) - 1; 2429 2430 if (base & rmask) { 2431 qemu_log_mask(LOG_GUEST_ERROR, 2432 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 2433 "to DRSR region size, mask = 0x%" PRIx32 "\n", 2434 n, base, rmask); 2435 continue; 2436 } 2437 2438 if (address < base || address > base + rmask) { 2439 /* 2440 * Address not in this region. We must check whether the 2441 * region covers addresses in the same page as our address. 2442 * In that case we must not report a size that covers the 2443 * whole page for a subsequent hit against a different MPU 2444 * region or the background region, because it would result in 2445 * incorrect TLB hits for subsequent accesses to addresses that 2446 * are in this MPU region. 2447 */ 2448 if (ranges_overlap(base, rmask, 2449 address & TARGET_PAGE_MASK, 2450 TARGET_PAGE_SIZE)) { 2451 result->f.lg_page_size = 0; 2452 } 2453 continue; 2454 } 2455 2456 /* Region matched */ 2457 2458 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 2459 int i, snd; 2460 uint32_t srdis_mask; 2461 2462 rsize -= 3; /* sub region size (power of 2) */ 2463 snd = ((address - base) >> rsize) & 0x7; 2464 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 2465 2466 srdis_mask = srdis ? 0x3 : 0x0; 2467 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 2468 /* 2469 * This will check in groups of 2, 4 and then 8, whether 2470 * the subregion bits are consistent. rsize is incremented 2471 * back up to give the region size, considering consistent 2472 * adjacent subregions as one region. Stop testing if rsize 2473 * is already big enough for an entire QEMU page. 2474 */ 2475 int snd_rounded = snd & ~(i - 1); 2476 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 2477 snd_rounded + 8, i); 2478 if (srdis_mask ^ srdis_multi) { 2479 break; 2480 } 2481 srdis_mask = (srdis_mask << i) | srdis_mask; 2482 rsize++; 2483 } 2484 } 2485 if (srdis) { 2486 continue; 2487 } 2488 if (rsize < TARGET_PAGE_BITS) { 2489 result->f.lg_page_size = rsize; 2490 } 2491 break; 2492 } 2493 2494 if (n == -1) { /* no hits */ 2495 if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { 2496 /* background fault */ 2497 fi->type = ARMFault_Background; 2498 return true; 2499 } 2500 get_phys_addr_pmsav7_default(env, mmu_idx, address, 2501 &result->f.prot); 2502 } else { /* a MPU hit! */ 2503 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 2504 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 2505 2506 if (m_is_system_region(env, address)) { 2507 /* System space is always execute never */ 2508 xn = 1; 2509 } 2510 2511 if (is_user) { /* User mode AP bit decoding */ 2512 switch (ap) { 2513 case 0: 2514 case 1: 2515 case 5: 2516 break; /* no access */ 2517 case 3: 2518 result->f.prot |= PAGE_WRITE; 2519 /* fall through */ 2520 case 2: 2521 case 6: 2522 result->f.prot |= PAGE_READ | PAGE_EXEC; 2523 break; 2524 case 7: 2525 /* for v7M, same as 6; for R profile a reserved value */ 2526 if (arm_feature(env, ARM_FEATURE_M)) { 2527 result->f.prot |= PAGE_READ | PAGE_EXEC; 2528 break; 2529 } 2530 /* fall through */ 2531 default: 2532 qemu_log_mask(LOG_GUEST_ERROR, 2533 "DRACR[%d]: Bad value for AP bits: 0x%" 2534 PRIx32 "\n", n, ap); 2535 } 2536 } else { /* Priv. mode AP bits decoding */ 2537 switch (ap) { 2538 case 0: 2539 break; /* no access */ 2540 case 1: 2541 case 2: 2542 case 3: 2543 result->f.prot |= PAGE_WRITE; 2544 /* fall through */ 2545 case 5: 2546 case 6: 2547 result->f.prot |= PAGE_READ | PAGE_EXEC; 2548 break; 2549 case 7: 2550 /* for v7M, same as 6; for R profile a reserved value */ 2551 if (arm_feature(env, ARM_FEATURE_M)) { 2552 result->f.prot |= PAGE_READ | PAGE_EXEC; 2553 break; 2554 } 2555 /* fall through */ 2556 default: 2557 qemu_log_mask(LOG_GUEST_ERROR, 2558 "DRACR[%d]: Bad value for AP bits: 0x%" 2559 PRIx32 "\n", n, ap); 2560 } 2561 } 2562 2563 /* execute never */ 2564 if (xn) { 2565 result->f.prot &= ~PAGE_EXEC; 2566 } 2567 } 2568 } 2569 2570 fi->type = ARMFault_Permission; 2571 fi->level = 1; 2572 return !(result->f.prot & (1 << access_type)); 2573 } 2574 2575 static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx, 2576 uint32_t secure) 2577 { 2578 if (regime_el(env, mmu_idx) == 2) { 2579 return env->pmsav8.hprbar; 2580 } else { 2581 return env->pmsav8.rbar[secure]; 2582 } 2583 } 2584 2585 static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx, 2586 uint32_t secure) 2587 { 2588 if (regime_el(env, mmu_idx) == 2) { 2589 return env->pmsav8.hprlar; 2590 } else { 2591 return env->pmsav8.rlar[secure]; 2592 } 2593 } 2594 2595 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 2596 MMUAccessType access_type, ARMMMUIdx mmu_idx, 2597 bool secure, GetPhysAddrResult *result, 2598 ARMMMUFaultInfo *fi, uint32_t *mregion) 2599 { 2600 /* 2601 * Perform a PMSAv8 MPU lookup (without also doing the SAU check 2602 * that a full phys-to-virt translation does). 2603 * mregion is (if not NULL) set to the region number which matched, 2604 * or -1 if no region number is returned (MPU off, address did not 2605 * hit a region, address hit in multiple regions). 2606 * If the region hit doesn't cover the entire TARGET_PAGE the address 2607 * is within, then we set the result page_size to 1 to force the 2608 * memory system to use a subpage. 2609 */ 2610 ARMCPU *cpu = env_archcpu(env); 2611 bool is_user = regime_is_user(env, mmu_idx); 2612 int n; 2613 int matchregion = -1; 2614 bool hit = false; 2615 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 2616 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 2617 int region_counter; 2618 2619 if (regime_el(env, mmu_idx) == 2) { 2620 region_counter = cpu->pmsav8r_hdregion; 2621 } else { 2622 region_counter = cpu->pmsav7_dregion; 2623 } 2624 2625 result->f.lg_page_size = TARGET_PAGE_BITS; 2626 result->f.phys_addr = address; 2627 result->f.prot = 0; 2628 if (mregion) { 2629 *mregion = -1; 2630 } 2631 2632 if (mmu_idx == ARMMMUIdx_Stage2) { 2633 fi->stage2 = true; 2634 } 2635 2636 /* 2637 * Unlike the ARM ARM pseudocode, we don't need to check whether this 2638 * was an exception vector read from the vector table (which is always 2639 * done using the default system address map), because those accesses 2640 * are done in arm_v7m_load_vector(), which always does a direct 2641 * read using address_space_ldl(), rather than going via this function. 2642 */ 2643 if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) { 2644 /* MPU disabled */ 2645 hit = true; 2646 } else if (m_is_ppb_region(env, address)) { 2647 hit = true; 2648 } else { 2649 if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) { 2650 hit = true; 2651 } 2652 2653 uint32_t bitmask; 2654 if (arm_feature(env, ARM_FEATURE_M)) { 2655 bitmask = 0x1f; 2656 } else { 2657 bitmask = 0x3f; 2658 fi->level = 0; 2659 } 2660 2661 for (n = region_counter - 1; n >= 0; n--) { 2662 /* region search */ 2663 /* 2664 * Note that the base address is bits [31:x] from the register 2665 * with bits [x-1:0] all zeroes, but the limit address is bits 2666 * [31:x] from the register with bits [x:0] all ones. Where x is 2667 * 5 for Cortex-M and 6 for Cortex-R 2668 */ 2669 uint32_t base = regime_rbar(env, mmu_idx, secure)[n] & ~bitmask; 2670 uint32_t limit = regime_rlar(env, mmu_idx, secure)[n] | bitmask; 2671 2672 if (!(regime_rlar(env, mmu_idx, secure)[n] & 0x1)) { 2673 /* Region disabled */ 2674 continue; 2675 } 2676 2677 if (address < base || address > limit) { 2678 /* 2679 * Address not in this region. We must check whether the 2680 * region covers addresses in the same page as our address. 2681 * In that case we must not report a size that covers the 2682 * whole page for a subsequent hit against a different MPU 2683 * region or the background region, because it would result in 2684 * incorrect TLB hits for subsequent accesses to addresses that 2685 * are in this MPU region. 2686 */ 2687 if (limit >= base && 2688 ranges_overlap(base, limit - base + 1, 2689 addr_page_base, 2690 TARGET_PAGE_SIZE)) { 2691 result->f.lg_page_size = 0; 2692 } 2693 continue; 2694 } 2695 2696 if (base > addr_page_base || limit < addr_page_limit) { 2697 result->f.lg_page_size = 0; 2698 } 2699 2700 if (matchregion != -1) { 2701 /* 2702 * Multiple regions match -- always a failure (unlike 2703 * PMSAv7 where highest-numbered-region wins) 2704 */ 2705 fi->type = ARMFault_Permission; 2706 if (arm_feature(env, ARM_FEATURE_M)) { 2707 fi->level = 1; 2708 } 2709 return true; 2710 } 2711 2712 matchregion = n; 2713 hit = true; 2714 } 2715 } 2716 2717 if (!hit) { 2718 if (arm_feature(env, ARM_FEATURE_M)) { 2719 fi->type = ARMFault_Background; 2720 } else { 2721 fi->type = ARMFault_Permission; 2722 } 2723 return true; 2724 } 2725 2726 if (matchregion == -1) { 2727 /* hit using the background region */ 2728 get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->f.prot); 2729 } else { 2730 uint32_t matched_rbar = regime_rbar(env, mmu_idx, secure)[matchregion]; 2731 uint32_t matched_rlar = regime_rlar(env, mmu_idx, secure)[matchregion]; 2732 uint32_t ap = extract32(matched_rbar, 1, 2); 2733 uint32_t xn = extract32(matched_rbar, 0, 1); 2734 bool pxn = false; 2735 2736 if (arm_feature(env, ARM_FEATURE_V8_1M)) { 2737 pxn = extract32(matched_rlar, 4, 1); 2738 } 2739 2740 if (m_is_system_region(env, address)) { 2741 /* System space is always execute never */ 2742 xn = 1; 2743 } 2744 2745 if (regime_el(env, mmu_idx) == 2) { 2746 result->f.prot = simple_ap_to_rw_prot_is_user(ap, 2747 mmu_idx != ARMMMUIdx_E2); 2748 } else { 2749 result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 2750 } 2751 2752 if (!arm_feature(env, ARM_FEATURE_M)) { 2753 uint8_t attrindx = extract32(matched_rlar, 1, 3); 2754 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 2755 uint8_t sh = extract32(matched_rlar, 3, 2); 2756 2757 if (regime_sctlr(env, mmu_idx) & SCTLR_WXN && 2758 result->f.prot & PAGE_WRITE && mmu_idx != ARMMMUIdx_Stage2) { 2759 xn = 0x1; 2760 } 2761 2762 if ((regime_el(env, mmu_idx) == 1) && 2763 regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) { 2764 pxn = 0x1; 2765 } 2766 2767 result->cacheattrs.is_s2_format = false; 2768 result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); 2769 result->cacheattrs.shareability = sh; 2770 } 2771 2772 if (result->f.prot && !xn && !(pxn && !is_user)) { 2773 result->f.prot |= PAGE_EXEC; 2774 } 2775 2776 if (mregion) { 2777 *mregion = matchregion; 2778 } 2779 } 2780 2781 fi->type = ARMFault_Permission; 2782 if (arm_feature(env, ARM_FEATURE_M)) { 2783 fi->level = 1; 2784 } 2785 return !(result->f.prot & (1 << access_type)); 2786 } 2787 2788 static bool v8m_is_sau_exempt(CPUARMState *env, 2789 uint32_t address, MMUAccessType access_type) 2790 { 2791 /* 2792 * The architecture specifies that certain address ranges are 2793 * exempt from v8M SAU/IDAU checks. 2794 */ 2795 return 2796 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 2797 (address >= 0xe0000000 && address <= 0xe0002fff) || 2798 (address >= 0xe000e000 && address <= 0xe000efff) || 2799 (address >= 0xe002e000 && address <= 0xe002efff) || 2800 (address >= 0xe0040000 && address <= 0xe0041fff) || 2801 (address >= 0xe00ff000 && address <= 0xe00fffff); 2802 } 2803 2804 void v8m_security_lookup(CPUARMState *env, uint32_t address, 2805 MMUAccessType access_type, ARMMMUIdx mmu_idx, 2806 bool is_secure, V8M_SAttributes *sattrs) 2807 { 2808 /* 2809 * Look up the security attributes for this address. Compare the 2810 * pseudocode SecurityCheck() function. 2811 * We assume the caller has zero-initialized *sattrs. 2812 */ 2813 ARMCPU *cpu = env_archcpu(env); 2814 int r; 2815 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 2816 int idau_region = IREGION_NOTVALID; 2817 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 2818 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 2819 2820 if (cpu->idau) { 2821 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 2822 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 2823 2824 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 2825 &idau_nsc); 2826 } 2827 2828 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 2829 /* 0xf0000000..0xffffffff is always S for insn fetches */ 2830 return; 2831 } 2832 2833 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 2834 sattrs->ns = !is_secure; 2835 return; 2836 } 2837 2838 if (idau_region != IREGION_NOTVALID) { 2839 sattrs->irvalid = true; 2840 sattrs->iregion = idau_region; 2841 } 2842 2843 switch (env->sau.ctrl & 3) { 2844 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 2845 break; 2846 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 2847 sattrs->ns = true; 2848 break; 2849 default: /* SAU.ENABLE == 1 */ 2850 for (r = 0; r < cpu->sau_sregion; r++) { 2851 if (env->sau.rlar[r] & 1) { 2852 uint32_t base = env->sau.rbar[r] & ~0x1f; 2853 uint32_t limit = env->sau.rlar[r] | 0x1f; 2854 2855 if (base <= address && limit >= address) { 2856 if (base > addr_page_base || limit < addr_page_limit) { 2857 sattrs->subpage = true; 2858 } 2859 if (sattrs->srvalid) { 2860 /* 2861 * If we hit in more than one region then we must report 2862 * as Secure, not NS-Callable, with no valid region 2863 * number info. 2864 */ 2865 sattrs->ns = false; 2866 sattrs->nsc = false; 2867 sattrs->sregion = 0; 2868 sattrs->srvalid = false; 2869 break; 2870 } else { 2871 if (env->sau.rlar[r] & 2) { 2872 sattrs->nsc = true; 2873 } else { 2874 sattrs->ns = true; 2875 } 2876 sattrs->srvalid = true; 2877 sattrs->sregion = r; 2878 } 2879 } else { 2880 /* 2881 * Address not in this region. We must check whether the 2882 * region covers addresses in the same page as our address. 2883 * In that case we must not report a size that covers the 2884 * whole page for a subsequent hit against a different MPU 2885 * region or the background region, because it would result 2886 * in incorrect TLB hits for subsequent accesses to 2887 * addresses that are in this MPU region. 2888 */ 2889 if (limit >= base && 2890 ranges_overlap(base, limit - base + 1, 2891 addr_page_base, 2892 TARGET_PAGE_SIZE)) { 2893 sattrs->subpage = true; 2894 } 2895 } 2896 } 2897 } 2898 break; 2899 } 2900 2901 /* 2902 * The IDAU will override the SAU lookup results if it specifies 2903 * higher security than the SAU does. 2904 */ 2905 if (!idau_ns) { 2906 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 2907 sattrs->ns = false; 2908 sattrs->nsc = idau_nsc; 2909 } 2910 } 2911 } 2912 2913 static bool get_phys_addr_pmsav8(CPUARMState *env, 2914 S1Translate *ptw, 2915 uint32_t address, 2916 MMUAccessType access_type, 2917 GetPhysAddrResult *result, 2918 ARMMMUFaultInfo *fi) 2919 { 2920 V8M_SAttributes sattrs = {}; 2921 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 2922 bool secure = arm_space_is_secure(ptw->in_space); 2923 bool ret; 2924 2925 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 2926 v8m_security_lookup(env, address, access_type, mmu_idx, 2927 secure, &sattrs); 2928 if (access_type == MMU_INST_FETCH) { 2929 /* 2930 * Instruction fetches always use the MMU bank and the 2931 * transaction attribute determined by the fetch address, 2932 * regardless of CPU state. This is painful for QEMU 2933 * to handle, because it would mean we need to encode 2934 * into the mmu_idx not just the (user, negpri) information 2935 * for the current security state but also that for the 2936 * other security state, which would balloon the number 2937 * of mmu_idx values needed alarmingly. 2938 * Fortunately we can avoid this because it's not actually 2939 * possible to arbitrarily execute code from memory with 2940 * the wrong security attribute: it will always generate 2941 * an exception of some kind or another, apart from the 2942 * special case of an NS CPU executing an SG instruction 2943 * in S&NSC memory. So we always just fail the translation 2944 * here and sort things out in the exception handler 2945 * (including possibly emulating an SG instruction). 2946 */ 2947 if (sattrs.ns != !secure) { 2948 if (sattrs.nsc) { 2949 fi->type = ARMFault_QEMU_NSCExec; 2950 } else { 2951 fi->type = ARMFault_QEMU_SFault; 2952 } 2953 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; 2954 result->f.phys_addr = address; 2955 result->f.prot = 0; 2956 return true; 2957 } 2958 } else { 2959 /* 2960 * For data accesses we always use the MMU bank indicated 2961 * by the current CPU state, but the security attributes 2962 * might downgrade a secure access to nonsecure. 2963 */ 2964 if (sattrs.ns) { 2965 result->f.attrs.secure = false; 2966 result->f.attrs.space = ARMSS_NonSecure; 2967 } else if (!secure) { 2968 /* 2969 * NS access to S memory must fault. 2970 * Architecturally we should first check whether the 2971 * MPU information for this address indicates that we 2972 * are doing an unaligned access to Device memory, which 2973 * should generate a UsageFault instead. QEMU does not 2974 * currently check for that kind of unaligned access though. 2975 * If we added it we would need to do so as a special case 2976 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 2977 */ 2978 fi->type = ARMFault_QEMU_SFault; 2979 result->f.lg_page_size = sattrs.subpage ? 0 : TARGET_PAGE_BITS; 2980 result->f.phys_addr = address; 2981 result->f.prot = 0; 2982 return true; 2983 } 2984 } 2985 } 2986 2987 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure, 2988 result, fi, NULL); 2989 if (sattrs.subpage) { 2990 result->f.lg_page_size = 0; 2991 } 2992 return ret; 2993 } 2994 2995 /* 2996 * Translate from the 4-bit stage 2 representation of 2997 * memory attributes (without cache-allocation hints) to 2998 * the 8-bit representation of the stage 1 MAIR registers 2999 * (which includes allocation hints). 3000 * 3001 * ref: shared/translation/attrs/S2AttrDecode() 3002 * .../S2ConvertAttrsHints() 3003 */ 3004 static uint8_t convert_stage2_attrs(uint64_t hcr, uint8_t s2attrs) 3005 { 3006 uint8_t hiattr = extract32(s2attrs, 2, 2); 3007 uint8_t loattr = extract32(s2attrs, 0, 2); 3008 uint8_t hihint = 0, lohint = 0; 3009 3010 if (hiattr != 0) { /* normal memory */ 3011 if (hcr & HCR_CD) { /* cache disabled */ 3012 hiattr = loattr = 1; /* non-cacheable */ 3013 } else { 3014 if (hiattr != 1) { /* Write-through or write-back */ 3015 hihint = 3; /* RW allocate */ 3016 } 3017 if (loattr != 1) { /* Write-through or write-back */ 3018 lohint = 3; /* RW allocate */ 3019 } 3020 } 3021 } 3022 3023 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 3024 } 3025 3026 /* 3027 * Combine either inner or outer cacheability attributes for normal 3028 * memory, according to table D4-42 and pseudocode procedure 3029 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 3030 * 3031 * NB: only stage 1 includes allocation hints (RW bits), leading to 3032 * some asymmetry. 3033 */ 3034 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 3035 { 3036 if (s1 == 4 || s2 == 4) { 3037 /* non-cacheable has precedence */ 3038 return 4; 3039 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 3040 /* stage 1 write-through takes precedence */ 3041 return s1; 3042 } else if (extract32(s2, 2, 2) == 2) { 3043 /* stage 2 write-through takes precedence, but the allocation hint 3044 * is still taken from stage 1 3045 */ 3046 return (2 << 2) | extract32(s1, 0, 2); 3047 } else { /* write-back */ 3048 return s1; 3049 } 3050 } 3051 3052 /* 3053 * Combine the memory type and cacheability attributes of 3054 * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the 3055 * combined attributes in MAIR_EL1 format. 3056 */ 3057 static uint8_t combined_attrs_nofwb(uint64_t hcr, 3058 ARMCacheAttrs s1, ARMCacheAttrs s2) 3059 { 3060 uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs; 3061 3062 if (s2.is_s2_format) { 3063 s2_mair_attrs = convert_stage2_attrs(hcr, s2.attrs); 3064 } else { 3065 s2_mair_attrs = s2.attrs; 3066 } 3067 3068 s1lo = extract32(s1.attrs, 0, 4); 3069 s2lo = extract32(s2_mair_attrs, 0, 4); 3070 s1hi = extract32(s1.attrs, 4, 4); 3071 s2hi = extract32(s2_mair_attrs, 4, 4); 3072 3073 /* Combine memory type and cacheability attributes */ 3074 if (s1hi == 0 || s2hi == 0) { 3075 /* Device has precedence over normal */ 3076 if (s1lo == 0 || s2lo == 0) { 3077 /* nGnRnE has precedence over anything */ 3078 ret_attrs = 0; 3079 } else if (s1lo == 4 || s2lo == 4) { 3080 /* non-Reordering has precedence over Reordering */ 3081 ret_attrs = 4; /* nGnRE */ 3082 } else if (s1lo == 8 || s2lo == 8) { 3083 /* non-Gathering has precedence over Gathering */ 3084 ret_attrs = 8; /* nGRE */ 3085 } else { 3086 ret_attrs = 0xc; /* GRE */ 3087 } 3088 } else { /* Normal memory */ 3089 /* Outer/inner cacheability combine independently */ 3090 ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 3091 | combine_cacheattr_nibble(s1lo, s2lo); 3092 } 3093 return ret_attrs; 3094 } 3095 3096 static uint8_t force_cacheattr_nibble_wb(uint8_t attr) 3097 { 3098 /* 3099 * Given the 4 bits specifying the outer or inner cacheability 3100 * in MAIR format, return a value specifying Normal Write-Back, 3101 * with the allocation and transient hints taken from the input 3102 * if the input specified some kind of cacheable attribute. 3103 */ 3104 if (attr == 0 || attr == 4) { 3105 /* 3106 * 0 == an UNPREDICTABLE encoding 3107 * 4 == Non-cacheable 3108 * Either way, force Write-Back RW allocate non-transient 3109 */ 3110 return 0xf; 3111 } 3112 /* Change WriteThrough to WriteBack, keep allocation and transient hints */ 3113 return attr | 4; 3114 } 3115 3116 /* 3117 * Combine the memory type and cacheability attributes of 3118 * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the 3119 * combined attributes in MAIR_EL1 format. 3120 */ 3121 static uint8_t combined_attrs_fwb(ARMCacheAttrs s1, ARMCacheAttrs s2) 3122 { 3123 assert(s2.is_s2_format && !s1.is_s2_format); 3124 3125 switch (s2.attrs) { 3126 case 7: 3127 /* Use stage 1 attributes */ 3128 return s1.attrs; 3129 case 6: 3130 /* 3131 * Force Normal Write-Back. Note that if S1 is Normal cacheable 3132 * then we take the allocation hints from it; otherwise it is 3133 * RW allocate, non-transient. 3134 */ 3135 if ((s1.attrs & 0xf0) == 0) { 3136 /* S1 is Device */ 3137 return 0xff; 3138 } 3139 /* Need to check the Inner and Outer nibbles separately */ 3140 return force_cacheattr_nibble_wb(s1.attrs & 0xf) | 3141 force_cacheattr_nibble_wb(s1.attrs >> 4) << 4; 3142 case 5: 3143 /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */ 3144 if ((s1.attrs & 0xf0) == 0) { 3145 return s1.attrs; 3146 } 3147 return 0x44; 3148 case 0 ... 3: 3149 /* Force Device, of subtype specified by S2 */ 3150 return s2.attrs << 2; 3151 default: 3152 /* 3153 * RESERVED values (including RES0 descriptor bit [5] being nonzero); 3154 * arbitrarily force Device. 3155 */ 3156 return 0; 3157 } 3158 } 3159 3160 /* 3161 * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 3162 * and CombineS1S2Desc() 3163 * 3164 * @env: CPUARMState 3165 * @s1: Attributes from stage 1 walk 3166 * @s2: Attributes from stage 2 walk 3167 */ 3168 static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, 3169 ARMCacheAttrs s1, ARMCacheAttrs s2) 3170 { 3171 ARMCacheAttrs ret; 3172 bool tagged = false; 3173 3174 assert(!s1.is_s2_format); 3175 ret.is_s2_format = false; 3176 3177 if (s1.attrs == 0xf0) { 3178 tagged = true; 3179 s1.attrs = 0xff; 3180 } 3181 3182 /* Combine shareability attributes (table D4-43) */ 3183 if (s1.shareability == 2 || s2.shareability == 2) { 3184 /* if either are outer-shareable, the result is outer-shareable */ 3185 ret.shareability = 2; 3186 } else if (s1.shareability == 3 || s2.shareability == 3) { 3187 /* if either are inner-shareable, the result is inner-shareable */ 3188 ret.shareability = 3; 3189 } else { 3190 /* both non-shareable */ 3191 ret.shareability = 0; 3192 } 3193 3194 /* Combine memory type and cacheability attributes */ 3195 if (hcr & HCR_FWB) { 3196 ret.attrs = combined_attrs_fwb(s1, s2); 3197 } else { 3198 ret.attrs = combined_attrs_nofwb(hcr, s1, s2); 3199 } 3200 3201 /* 3202 * Any location for which the resultant memory type is any 3203 * type of Device memory is always treated as Outer Shareable. 3204 * Any location for which the resultant memory type is Normal 3205 * Inner Non-cacheable, Outer Non-cacheable is always treated 3206 * as Outer Shareable. 3207 * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC 3208 */ 3209 if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) { 3210 ret.shareability = 2; 3211 } 3212 3213 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */ 3214 if (tagged && ret.attrs == 0xff) { 3215 ret.attrs = 0xf0; 3216 } 3217 3218 return ret; 3219 } 3220 3221 /* 3222 * MMU disabled. S1 addresses within aa64 translation regimes are 3223 * still checked for bounds -- see AArch64.S1DisabledOutput(). 3224 */ 3225 static bool get_phys_addr_disabled(CPUARMState *env, 3226 S1Translate *ptw, 3227 vaddr address, 3228 MMUAccessType access_type, 3229 GetPhysAddrResult *result, 3230 ARMMMUFaultInfo *fi) 3231 { 3232 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 3233 uint8_t memattr = 0x00; /* Device nGnRnE */ 3234 uint8_t shareability = 0; /* non-shareable */ 3235 int r_el; 3236 3237 switch (mmu_idx) { 3238 case ARMMMUIdx_Stage2: 3239 case ARMMMUIdx_Stage2_S: 3240 case ARMMMUIdx_Phys_S: 3241 case ARMMMUIdx_Phys_NS: 3242 case ARMMMUIdx_Phys_Root: 3243 case ARMMMUIdx_Phys_Realm: 3244 break; 3245 3246 default: 3247 r_el = regime_el(env, mmu_idx); 3248 if (arm_el_is_aa64(env, r_el)) { 3249 int pamax = arm_pamax(env_archcpu(env)); 3250 uint64_t tcr = env->cp15.tcr_el[r_el]; 3251 int addrtop, tbi; 3252 3253 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 3254 if (access_type == MMU_INST_FETCH) { 3255 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 3256 } 3257 tbi = (tbi >> extract64(address, 55, 1)) & 1; 3258 addrtop = (tbi ? 55 : 63); 3259 3260 if (extract64(address, pamax, addrtop - pamax + 1) != 0) { 3261 fi->type = ARMFault_AddressSize; 3262 fi->level = 0; 3263 fi->stage2 = false; 3264 return 1; 3265 } 3266 3267 /* 3268 * When TBI is disabled, we've just validated that all of the 3269 * bits above PAMax are zero, so logically we only need to 3270 * clear the top byte for TBI. But it's clearer to follow 3271 * the pseudocode set of addrdesc.paddress. 3272 */ 3273 address = extract64(address, 0, 52); 3274 } 3275 3276 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ 3277 if (r_el == 1) { 3278 uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); 3279 if (hcr & HCR_DC) { 3280 if (hcr & HCR_DCT) { 3281 memattr = 0xf0; /* Tagged, Normal, WB, RWA */ 3282 } else { 3283 memattr = 0xff; /* Normal, WB, RWA */ 3284 } 3285 } 3286 } 3287 if (memattr == 0) { 3288 if (access_type == MMU_INST_FETCH) { 3289 if (regime_sctlr(env, mmu_idx) & SCTLR_I) { 3290 memattr = 0xee; /* Normal, WT, RA, NT */ 3291 } else { 3292 memattr = 0x44; /* Normal, NC, No */ 3293 } 3294 } 3295 shareability = 2; /* outer shareable */ 3296 } 3297 result->cacheattrs.is_s2_format = false; 3298 break; 3299 } 3300 3301 result->f.phys_addr = address; 3302 result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 3303 result->f.lg_page_size = TARGET_PAGE_BITS; 3304 result->cacheattrs.shareability = shareability; 3305 result->cacheattrs.attrs = memattr; 3306 return false; 3307 } 3308 3309 static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, 3310 vaddr address, 3311 MMUAccessType access_type, MemOp memop, 3312 GetPhysAddrResult *result, 3313 ARMMMUFaultInfo *fi) 3314 { 3315 hwaddr ipa; 3316 int s1_prot, s1_lgpgsz; 3317 ARMSecuritySpace in_space = ptw->in_space; 3318 bool ret, ipa_secure, s1_guarded; 3319 ARMCacheAttrs cacheattrs1; 3320 ARMSecuritySpace ipa_space; 3321 uint64_t hcr; 3322 3323 ret = get_phys_addr_nogpc(env, ptw, address, access_type, 3324 memop, result, fi); 3325 3326 /* If S1 fails, return early. */ 3327 if (ret) { 3328 return ret; 3329 } 3330 3331 ipa = result->f.phys_addr; 3332 ipa_secure = result->f.attrs.secure; 3333 ipa_space = result->f.attrs.space; 3334 3335 ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0; 3336 ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; 3337 ptw->in_space = ipa_space; 3338 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx); 3339 3340 /* 3341 * S1 is done, now do S2 translation. 3342 * Save the stage1 results so that we may merge prot and cacheattrs later. 3343 */ 3344 s1_prot = result->f.prot; 3345 s1_lgpgsz = result->f.lg_page_size; 3346 s1_guarded = result->f.extra.arm.guarded; 3347 cacheattrs1 = result->cacheattrs; 3348 memset(result, 0, sizeof(*result)); 3349 3350 ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, 3351 memop, result, fi); 3352 fi->s2addr = ipa; 3353 3354 /* Combine the S1 and S2 perms. */ 3355 result->f.prot &= s1_prot; 3356 3357 /* If S2 fails, return early. */ 3358 if (ret) { 3359 return ret; 3360 } 3361 3362 /* 3363 * If either S1 or S2 returned a result smaller than TARGET_PAGE_SIZE, 3364 * this means "don't put this in the TLB"; in this case, return a 3365 * result with lg_page_size == 0 to achieve that. Otherwise, 3366 * use the maximum of the S1 & S2 page size, so that invalidation 3367 * of pages > TARGET_PAGE_SIZE works correctly. (This works even though 3368 * we know the combined result permissions etc only cover the minimum 3369 * of the S1 and S2 page size, because we know that the common TLB code 3370 * never actually creates TLB entries bigger than TARGET_PAGE_SIZE, 3371 * and passing a larger page size value only affects invalidations.) 3372 */ 3373 if (result->f.lg_page_size < TARGET_PAGE_BITS || 3374 s1_lgpgsz < TARGET_PAGE_BITS) { 3375 result->f.lg_page_size = 0; 3376 } else if (result->f.lg_page_size < s1_lgpgsz) { 3377 result->f.lg_page_size = s1_lgpgsz; 3378 } 3379 3380 /* Combine the S1 and S2 cache attributes. */ 3381 hcr = arm_hcr_el2_eff_secstate(env, in_space); 3382 if (hcr & HCR_DC) { 3383 /* 3384 * HCR.DC forces the first stage attributes to 3385 * Normal Non-Shareable, 3386 * Inner Write-Back Read-Allocate Write-Allocate, 3387 * Outer Write-Back Read-Allocate Write-Allocate. 3388 * Do not overwrite Tagged within attrs. 3389 */ 3390 if (cacheattrs1.attrs != 0xf0) { 3391 cacheattrs1.attrs = 0xff; 3392 } 3393 cacheattrs1.shareability = 0; 3394 } 3395 result->cacheattrs = combine_cacheattrs(hcr, cacheattrs1, 3396 result->cacheattrs); 3397 3398 /* No BTI GP information in stage 2, we just use the S1 value */ 3399 result->f.extra.arm.guarded = s1_guarded; 3400 3401 /* 3402 * Check if IPA translates to secure or non-secure PA space. 3403 * Note that VSTCR overrides VTCR and {N}SW overrides {N}SA. 3404 */ 3405 if (in_space == ARMSS_Secure) { 3406 result->f.attrs.secure = 3407 !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) 3408 && (ipa_secure 3409 || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); 3410 result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); 3411 } 3412 3413 return false; 3414 } 3415 3416 static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, 3417 vaddr address, 3418 MMUAccessType access_type, MemOp memop, 3419 GetPhysAddrResult *result, 3420 ARMMMUFaultInfo *fi) 3421 { 3422 ARMMMUIdx mmu_idx = ptw->in_mmu_idx; 3423 ARMMMUIdx s1_mmu_idx; 3424 3425 /* 3426 * The page table entries may downgrade Secure to NonSecure, but 3427 * cannot upgrade a NonSecure translation regime's attributes 3428 * to Secure or Realm. 3429 */ 3430 result->f.attrs.space = ptw->in_space; 3431 result->f.attrs.secure = arm_space_is_secure(ptw->in_space); 3432 3433 switch (mmu_idx) { 3434 case ARMMMUIdx_Phys_S: 3435 case ARMMMUIdx_Phys_NS: 3436 case ARMMMUIdx_Phys_Root: 3437 case ARMMMUIdx_Phys_Realm: 3438 /* Checking Phys early avoids special casing later vs regime_el. */ 3439 return get_phys_addr_disabled(env, ptw, address, access_type, 3440 result, fi); 3441 3442 case ARMMMUIdx_Stage1_E0: 3443 case ARMMMUIdx_Stage1_E1: 3444 case ARMMMUIdx_Stage1_E1_PAN: 3445 /* 3446 * First stage lookup uses second stage for ptw; only 3447 * Secure has both S and NS IPA and starts with Stage2_S. 3448 */ 3449 ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ? 3450 ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; 3451 break; 3452 3453 case ARMMMUIdx_Stage2: 3454 case ARMMMUIdx_Stage2_S: 3455 /* 3456 * Second stage lookup uses physical for ptw; whether this is S or 3457 * NS may depend on the SW/NSW bits if this is a stage 2 lookup for 3458 * the Secure EL2&0 regime. 3459 */ 3460 ptw->in_ptw_idx = ptw_idx_for_stage_2(env, mmu_idx); 3461 break; 3462 3463 case ARMMMUIdx_E10_0: 3464 s1_mmu_idx = ARMMMUIdx_Stage1_E0; 3465 goto do_twostage; 3466 case ARMMMUIdx_E10_1: 3467 s1_mmu_idx = ARMMMUIdx_Stage1_E1; 3468 goto do_twostage; 3469 case ARMMMUIdx_E10_1_PAN: 3470 s1_mmu_idx = ARMMMUIdx_Stage1_E1_PAN; 3471 do_twostage: 3472 /* 3473 * Call ourselves recursively to do the stage 1 and then stage 2 3474 * translations if mmu_idx is a two-stage regime, and EL2 present. 3475 * Otherwise, a stage1+stage2 translation is just stage 1. 3476 */ 3477 ptw->in_mmu_idx = mmu_idx = s1_mmu_idx; 3478 if (arm_feature(env, ARM_FEATURE_EL2) && 3479 !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) { 3480 return get_phys_addr_twostage(env, ptw, address, access_type, 3481 memop, result, fi); 3482 } 3483 /* fall through */ 3484 3485 default: 3486 /* Single stage uses physical for ptw. */ 3487 ptw->in_ptw_idx = arm_space_to_phys(ptw->in_space); 3488 break; 3489 } 3490 3491 result->f.attrs.user = regime_is_user(env, mmu_idx); 3492 3493 /* 3494 * Fast Context Switch Extension. This doesn't exist at all in v8. 3495 * In v7 and earlier it affects all stage 1 translations. 3496 */ 3497 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 3498 && !arm_feature(env, ARM_FEATURE_V8)) { 3499 if (regime_el(env, mmu_idx) == 3) { 3500 address += env->cp15.fcseidr_s; 3501 } else { 3502 address += env->cp15.fcseidr_ns; 3503 } 3504 } 3505 3506 if (arm_feature(env, ARM_FEATURE_PMSA)) { 3507 bool ret; 3508 result->f.lg_page_size = TARGET_PAGE_BITS; 3509 3510 if (arm_feature(env, ARM_FEATURE_V8)) { 3511 /* PMSAv8 */ 3512 ret = get_phys_addr_pmsav8(env, ptw, address, access_type, 3513 result, fi); 3514 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3515 /* PMSAv7 */ 3516 ret = get_phys_addr_pmsav7(env, ptw, address, access_type, 3517 result, fi); 3518 } else { 3519 /* Pre-v7 MPU */ 3520 ret = get_phys_addr_pmsav5(env, ptw, address, access_type, 3521 result, fi); 3522 } 3523 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 3524 " mmu_idx %u -> %s (prot %c%c%c)\n", 3525 access_type == MMU_DATA_LOAD ? "reading" : 3526 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 3527 (uint32_t)address, mmu_idx, 3528 ret ? "Miss" : "Hit", 3529 result->f.prot & PAGE_READ ? 'r' : '-', 3530 result->f.prot & PAGE_WRITE ? 'w' : '-', 3531 result->f.prot & PAGE_EXEC ? 'x' : '-'); 3532 3533 return ret; 3534 } 3535 3536 /* Definitely a real MMU, not an MPU */ 3537 3538 if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { 3539 return get_phys_addr_disabled(env, ptw, address, access_type, 3540 result, fi); 3541 } 3542 3543 if (regime_using_lpae_format(env, mmu_idx)) { 3544 return get_phys_addr_lpae(env, ptw, address, access_type, 3545 memop, result, fi); 3546 } else if (arm_feature(env, ARM_FEATURE_V7) || 3547 regime_sctlr(env, mmu_idx) & SCTLR_XP) { 3548 return get_phys_addr_v6(env, ptw, address, access_type, result, fi); 3549 } else { 3550 return get_phys_addr_v5(env, ptw, address, access_type, result, fi); 3551 } 3552 } 3553 3554 static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, 3555 vaddr address, 3556 MMUAccessType access_type, MemOp memop, 3557 GetPhysAddrResult *result, 3558 ARMMMUFaultInfo *fi) 3559 { 3560 if (get_phys_addr_nogpc(env, ptw, address, access_type, 3561 memop, result, fi)) { 3562 return true; 3563 } 3564 if (!granule_protection_check(env, result->f.phys_addr, 3565 result->f.attrs.space, fi)) { 3566 fi->type = ARMFault_GPCFOnOutput; 3567 return true; 3568 } 3569 return false; 3570 } 3571 3572 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 3573 MMUAccessType access_type, MemOp memop, 3574 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 3575 GetPhysAddrResult *result, 3576 ARMMMUFaultInfo *fi) 3577 { 3578 S1Translate ptw = { 3579 .in_mmu_idx = mmu_idx, 3580 .in_space = space, 3581 }; 3582 return get_phys_addr_nogpc(env, &ptw, address, access_type, 3583 memop, result, fi); 3584 } 3585 3586 bool get_phys_addr(CPUARMState *env, vaddr address, 3587 MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 3588 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 3589 { 3590 S1Translate ptw = { 3591 .in_mmu_idx = mmu_idx, 3592 }; 3593 ARMSecuritySpace ss; 3594 3595 switch (mmu_idx) { 3596 case ARMMMUIdx_E10_0: 3597 case ARMMMUIdx_E10_1: 3598 case ARMMMUIdx_E10_1_PAN: 3599 case ARMMMUIdx_E20_0: 3600 case ARMMMUIdx_E20_2: 3601 case ARMMMUIdx_E20_2_PAN: 3602 case ARMMMUIdx_Stage1_E0: 3603 case ARMMMUIdx_Stage1_E1: 3604 case ARMMMUIdx_Stage1_E1_PAN: 3605 case ARMMMUIdx_E2: 3606 ss = arm_security_space_below_el3(env); 3607 break; 3608 case ARMMMUIdx_Stage2: 3609 /* 3610 * For Secure EL2, we need this index to be NonSecure; 3611 * otherwise this will already be NonSecure or Realm. 3612 */ 3613 ss = arm_security_space_below_el3(env); 3614 if (ss == ARMSS_Secure) { 3615 ss = ARMSS_NonSecure; 3616 } 3617 break; 3618 case ARMMMUIdx_Phys_NS: 3619 case ARMMMUIdx_MPrivNegPri: 3620 case ARMMMUIdx_MUserNegPri: 3621 case ARMMMUIdx_MPriv: 3622 case ARMMMUIdx_MUser: 3623 ss = ARMSS_NonSecure; 3624 break; 3625 case ARMMMUIdx_Stage2_S: 3626 case ARMMMUIdx_Phys_S: 3627 case ARMMMUIdx_MSPrivNegPri: 3628 case ARMMMUIdx_MSUserNegPri: 3629 case ARMMMUIdx_MSPriv: 3630 case ARMMMUIdx_MSUser: 3631 ss = ARMSS_Secure; 3632 break; 3633 case ARMMMUIdx_E3: 3634 case ARMMMUIdx_E30_0: 3635 case ARMMMUIdx_E30_3_PAN: 3636 if (arm_feature(env, ARM_FEATURE_AARCH64) && 3637 cpu_isar_feature(aa64_rme, env_archcpu(env))) { 3638 ss = ARMSS_Root; 3639 } else { 3640 ss = ARMSS_Secure; 3641 } 3642 break; 3643 case ARMMMUIdx_Phys_Root: 3644 ss = ARMSS_Root; 3645 break; 3646 case ARMMMUIdx_Phys_Realm: 3647 ss = ARMSS_Realm; 3648 break; 3649 default: 3650 g_assert_not_reached(); 3651 } 3652 3653 ptw.in_space = ss; 3654 return get_phys_addr_gpc(env, &ptw, address, access_type, 3655 memop, result, fi); 3656 } 3657 3658 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 3659 MemTxAttrs *attrs) 3660 { 3661 ARMCPU *cpu = ARM_CPU(cs); 3662 CPUARMState *env = &cpu->env; 3663 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 3664 ARMSecuritySpace ss = arm_security_space(env); 3665 S1Translate ptw = { 3666 .in_mmu_idx = mmu_idx, 3667 .in_space = ss, 3668 .in_debug = true, 3669 }; 3670 GetPhysAddrResult res = {}; 3671 ARMMMUFaultInfo fi = {}; 3672 bool ret; 3673 3674 ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi); 3675 *attrs = res.f.attrs; 3676 3677 if (ret) { 3678 return -1; 3679 } 3680 return res.f.phys_addr; 3681 } 3682