1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "hw/registerfields.h" 29 #include "tcg/tcg-gvec-desc.h" 30 #include "syndrome.h" 31 32 /* register banks for CPU modes */ 33 #define BANK_USRSYS 0 34 #define BANK_SVC 1 35 #define BANK_ABT 2 36 #define BANK_UND 3 37 #define BANK_IRQ 4 38 #define BANK_FIQ 5 39 #define BANK_HYP 6 40 #define BANK_MON 7 41 42 static inline bool excp_is_internal(int excp) 43 { 44 /* Return true if this exception number represents a QEMU-internal 45 * exception that will not be passed to the guest. 46 */ 47 return excp == EXCP_INTERRUPT 48 || excp == EXCP_HLT 49 || excp == EXCP_DEBUG 50 || excp == EXCP_HALTED 51 || excp == EXCP_EXCEPTION_EXIT 52 || excp == EXCP_KERNEL_TRAP 53 || excp == EXCP_SEMIHOST; 54 } 55 56 /* Scale factor for generic timers, ie number of ns per tick. 57 * This gives a 62.5MHz timer. 58 */ 59 #define GTIMER_SCALE 16 60 61 /* Bit definitions for the v7M CONTROL register */ 62 FIELD(V7M_CONTROL, NPRIV, 0, 1) 63 FIELD(V7M_CONTROL, SPSEL, 1, 1) 64 FIELD(V7M_CONTROL, FPCA, 2, 1) 65 FIELD(V7M_CONTROL, SFPA, 3, 1) 66 67 /* Bit definitions for v7M exception return payload */ 68 FIELD(V7M_EXCRET, ES, 0, 1) 69 FIELD(V7M_EXCRET, RES0, 1, 1) 70 FIELD(V7M_EXCRET, SPSEL, 2, 1) 71 FIELD(V7M_EXCRET, MODE, 3, 1) 72 FIELD(V7M_EXCRET, FTYPE, 4, 1) 73 FIELD(V7M_EXCRET, DCRS, 5, 1) 74 FIELD(V7M_EXCRET, S, 6, 1) 75 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 76 77 /* Minimum value which is a magic number for exception return */ 78 #define EXC_RETURN_MIN_MAGIC 0xff000000 79 /* Minimum number which is a magic number for function or exception return 80 * when using v8M security extension 81 */ 82 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 83 84 /* We use a few fake FSR values for internal purposes in M profile. 85 * M profile cores don't have A/R format FSRs, but currently our 86 * get_phys_addr() code assumes A/R profile and reports failures via 87 * an A/R format FSR value. We then translate that into the proper 88 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 89 * Mostly the FSR values we use for this are those defined for v7PMSA, 90 * since we share some of that codepath. A few kinds of fault are 91 * only for M profile and have no A/R equivalent, though, so we have 92 * to pick a value from the reserved range (which we never otherwise 93 * generate) to use for these. 94 * These values will never be visible to the guest. 95 */ 96 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 97 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 98 99 /** 100 * raise_exception: Raise the specified exception. 101 * Raise a guest exception with the specified value, syndrome register 102 * and target exception level. This should be called from helper functions, 103 * and never returns because we will longjump back up to the CPU main loop. 104 */ 105 void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp, 106 uint32_t syndrome, uint32_t target_el); 107 108 /* 109 * Similarly, but also use unwinding to restore cpu state. 110 */ 111 void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp, 112 uint32_t syndrome, uint32_t target_el, 113 uintptr_t ra); 114 115 /* 116 * For AArch64, map a given EL to an index in the banked_spsr array. 117 * Note that this mapping and the AArch32 mapping defined in bank_number() 118 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 119 * mandated mapping between each other. 120 */ 121 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 122 { 123 static const unsigned int map[4] = { 124 [1] = BANK_SVC, /* EL1. */ 125 [2] = BANK_HYP, /* EL2. */ 126 [3] = BANK_MON, /* EL3. */ 127 }; 128 assert(el >= 1 && el <= 3); 129 return map[el]; 130 } 131 132 /* Map CPU modes onto saved register banks. */ 133 static inline int bank_number(int mode) 134 { 135 switch (mode) { 136 case ARM_CPU_MODE_USR: 137 case ARM_CPU_MODE_SYS: 138 return BANK_USRSYS; 139 case ARM_CPU_MODE_SVC: 140 return BANK_SVC; 141 case ARM_CPU_MODE_ABT: 142 return BANK_ABT; 143 case ARM_CPU_MODE_UND: 144 return BANK_UND; 145 case ARM_CPU_MODE_IRQ: 146 return BANK_IRQ; 147 case ARM_CPU_MODE_FIQ: 148 return BANK_FIQ; 149 case ARM_CPU_MODE_HYP: 150 return BANK_HYP; 151 case ARM_CPU_MODE_MON: 152 return BANK_MON; 153 } 154 g_assert_not_reached(); 155 } 156 157 /** 158 * r14_bank_number: Map CPU mode onto register bank for r14 159 * 160 * Given an AArch32 CPU mode, return the index into the saved register 161 * banks to use for the R14 (LR) in that mode. This is the same as 162 * bank_number(), except for the special case of Hyp mode, where 163 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 164 * This should be used as the index into env->banked_r14[], and 165 * bank_number() used for the index into env->banked_r13[] and 166 * env->banked_spsr[]. 167 */ 168 static inline int r14_bank_number(int mode) 169 { 170 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 171 } 172 173 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 174 void arm_translate_init(void); 175 176 #ifdef CONFIG_TCG 177 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 178 #endif /* CONFIG_TCG */ 179 180 181 enum arm_fprounding { 182 FPROUNDING_TIEEVEN, 183 FPROUNDING_POSINF, 184 FPROUNDING_NEGINF, 185 FPROUNDING_ZERO, 186 FPROUNDING_TIEAWAY, 187 FPROUNDING_ODD 188 }; 189 190 int arm_rmode_to_sf(int rmode); 191 192 static inline void aarch64_save_sp(CPUARMState *env, int el) 193 { 194 if (env->pstate & PSTATE_SP) { 195 env->sp_el[el] = env->xregs[31]; 196 } else { 197 env->sp_el[0] = env->xregs[31]; 198 } 199 } 200 201 static inline void aarch64_restore_sp(CPUARMState *env, int el) 202 { 203 if (env->pstate & PSTATE_SP) { 204 env->xregs[31] = env->sp_el[el]; 205 } else { 206 env->xregs[31] = env->sp_el[0]; 207 } 208 } 209 210 static inline void update_spsel(CPUARMState *env, uint32_t imm) 211 { 212 unsigned int cur_el = arm_current_el(env); 213 /* Update PSTATE SPSel bit; this requires us to update the 214 * working stack pointer in xregs[31]. 215 */ 216 if (!((imm ^ env->pstate) & PSTATE_SP)) { 217 return; 218 } 219 aarch64_save_sp(env, cur_el); 220 env->pstate = deposit32(env->pstate, 0, 1, imm); 221 222 /* We rely on illegal updates to SPsel from EL0 to get trapped 223 * at translation time. 224 */ 225 assert(cur_el >= 1 && cur_el <= 3); 226 aarch64_restore_sp(env, cur_el); 227 } 228 229 /* 230 * arm_pamax 231 * @cpu: ARMCPU 232 * 233 * Returns the implementation defined bit-width of physical addresses. 234 * The ARMv8 reference manuals refer to this as PAMax(). 235 */ 236 static inline unsigned int arm_pamax(ARMCPU *cpu) 237 { 238 static const unsigned int pamax_map[] = { 239 [0] = 32, 240 [1] = 36, 241 [2] = 40, 242 [3] = 42, 243 [4] = 44, 244 [5] = 48, 245 }; 246 unsigned int parange = 247 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); 248 249 /* id_aa64mmfr0 is a read-only register so values outside of the 250 * supported mappings can be considered an implementation error. */ 251 assert(parange < ARRAY_SIZE(pamax_map)); 252 return pamax_map[parange]; 253 } 254 255 /* Return true if extended addresses are enabled. 256 * This is always the case if our translation regime is 64 bit, 257 * but depends on TTBCR.EAE for 32 bit. 258 */ 259 static inline bool extended_addresses_enabled(CPUARMState *env) 260 { 261 TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 262 return arm_el_is_aa64(env, 1) || 263 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE)); 264 } 265 266 /* Update a QEMU watchpoint based on the information the guest has set in the 267 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 268 */ 269 void hw_watchpoint_update(ARMCPU *cpu, int n); 270 /* Update the QEMU watchpoints for every guest watchpoint. This does a 271 * complete delete-and-reinstate of the QEMU watchpoint list and so is 272 * suitable for use after migration or on reset. 273 */ 274 void hw_watchpoint_update_all(ARMCPU *cpu); 275 /* Update a QEMU breakpoint based on the information the guest has set in the 276 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 277 */ 278 void hw_breakpoint_update(ARMCPU *cpu, int n); 279 /* Update the QEMU breakpoints for every guest breakpoint. This does a 280 * complete delete-and-reinstate of the QEMU breakpoint list and so is 281 * suitable for use after migration or on reset. 282 */ 283 void hw_breakpoint_update_all(ARMCPU *cpu); 284 285 /* Callback function for checking if a breakpoint should trigger. */ 286 bool arm_debug_check_breakpoint(CPUState *cs); 287 288 /* Callback function for checking if a watchpoint should trigger. */ 289 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 290 291 /* Adjust addresses (in BE32 mode) before testing against watchpoint 292 * addresses. 293 */ 294 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 295 296 /* Callback function for when a watchpoint or breakpoint triggers. */ 297 void arm_debug_excp_handler(CPUState *cs); 298 299 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 300 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 301 { 302 return false; 303 } 304 static inline void arm_handle_psci_call(ARMCPU *cpu) 305 { 306 g_assert_not_reached(); 307 } 308 #else 309 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 310 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 311 /* Actually handle a PSCI call */ 312 void arm_handle_psci_call(ARMCPU *cpu); 313 #endif 314 315 /** 316 * arm_clear_exclusive: clear the exclusive monitor 317 * @env: CPU env 318 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 319 */ 320 static inline void arm_clear_exclusive(CPUARMState *env) 321 { 322 env->exclusive_addr = -1; 323 } 324 325 /** 326 * ARMFaultType: type of an ARM MMU fault 327 * This corresponds to the v8A pseudocode's Fault enumeration, 328 * with extensions for QEMU internal conditions. 329 */ 330 typedef enum ARMFaultType { 331 ARMFault_None, 332 ARMFault_AccessFlag, 333 ARMFault_Alignment, 334 ARMFault_Background, 335 ARMFault_Domain, 336 ARMFault_Permission, 337 ARMFault_Translation, 338 ARMFault_AddressSize, 339 ARMFault_SyncExternal, 340 ARMFault_SyncExternalOnWalk, 341 ARMFault_SyncParity, 342 ARMFault_SyncParityOnWalk, 343 ARMFault_AsyncParity, 344 ARMFault_AsyncExternal, 345 ARMFault_Debug, 346 ARMFault_TLBConflict, 347 ARMFault_Lockdown, 348 ARMFault_Exclusive, 349 ARMFault_ICacheMaint, 350 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 351 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 352 } ARMFaultType; 353 354 /** 355 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 356 * @type: Type of fault 357 * @level: Table walk level (for translation, access flag and permission faults) 358 * @domain: Domain of the fault address (for non-LPAE CPUs only) 359 * @s2addr: Address that caused a fault at stage 2 360 * @stage2: True if we faulted at stage 2 361 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 362 * @s1ns: True if we faulted on a non-secure IPA while in secure state 363 * @ea: True if we should set the EA (external abort type) bit in syndrome 364 */ 365 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 366 struct ARMMMUFaultInfo { 367 ARMFaultType type; 368 target_ulong s2addr; 369 int level; 370 int domain; 371 bool stage2; 372 bool s1ptw; 373 bool s1ns; 374 bool ea; 375 }; 376 377 /** 378 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 379 * Compare pseudocode EncodeSDFSC(), though unlike that function 380 * we set up a whole FSR-format code including domain field and 381 * putting the high bit of the FSC into bit 10. 382 */ 383 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 384 { 385 uint32_t fsc; 386 387 switch (fi->type) { 388 case ARMFault_None: 389 return 0; 390 case ARMFault_AccessFlag: 391 fsc = fi->level == 1 ? 0x3 : 0x6; 392 break; 393 case ARMFault_Alignment: 394 fsc = 0x1; 395 break; 396 case ARMFault_Permission: 397 fsc = fi->level == 1 ? 0xd : 0xf; 398 break; 399 case ARMFault_Domain: 400 fsc = fi->level == 1 ? 0x9 : 0xb; 401 break; 402 case ARMFault_Translation: 403 fsc = fi->level == 1 ? 0x5 : 0x7; 404 break; 405 case ARMFault_SyncExternal: 406 fsc = 0x8 | (fi->ea << 12); 407 break; 408 case ARMFault_SyncExternalOnWalk: 409 fsc = fi->level == 1 ? 0xc : 0xe; 410 fsc |= (fi->ea << 12); 411 break; 412 case ARMFault_SyncParity: 413 fsc = 0x409; 414 break; 415 case ARMFault_SyncParityOnWalk: 416 fsc = fi->level == 1 ? 0x40c : 0x40e; 417 break; 418 case ARMFault_AsyncParity: 419 fsc = 0x408; 420 break; 421 case ARMFault_AsyncExternal: 422 fsc = 0x406 | (fi->ea << 12); 423 break; 424 case ARMFault_Debug: 425 fsc = 0x2; 426 break; 427 case ARMFault_TLBConflict: 428 fsc = 0x400; 429 break; 430 case ARMFault_Lockdown: 431 fsc = 0x404; 432 break; 433 case ARMFault_Exclusive: 434 fsc = 0x405; 435 break; 436 case ARMFault_ICacheMaint: 437 fsc = 0x4; 438 break; 439 case ARMFault_Background: 440 fsc = 0x0; 441 break; 442 case ARMFault_QEMU_NSCExec: 443 fsc = M_FAKE_FSR_NSC_EXEC; 444 break; 445 case ARMFault_QEMU_SFault: 446 fsc = M_FAKE_FSR_SFAULT; 447 break; 448 default: 449 /* Other faults can't occur in a context that requires a 450 * short-format status code. 451 */ 452 g_assert_not_reached(); 453 } 454 455 fsc |= (fi->domain << 4); 456 return fsc; 457 } 458 459 /** 460 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 461 * Compare pseudocode EncodeLDFSC(), though unlike that function 462 * we fill in also the LPAE bit 9 of a DFSR format. 463 */ 464 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 465 { 466 uint32_t fsc; 467 468 switch (fi->type) { 469 case ARMFault_None: 470 return 0; 471 case ARMFault_AddressSize: 472 fsc = fi->level & 3; 473 break; 474 case ARMFault_AccessFlag: 475 fsc = (fi->level & 3) | (0x2 << 2); 476 break; 477 case ARMFault_Permission: 478 fsc = (fi->level & 3) | (0x3 << 2); 479 break; 480 case ARMFault_Translation: 481 fsc = (fi->level & 3) | (0x1 << 2); 482 break; 483 case ARMFault_SyncExternal: 484 fsc = 0x10 | (fi->ea << 12); 485 break; 486 case ARMFault_SyncExternalOnWalk: 487 fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12); 488 break; 489 case ARMFault_SyncParity: 490 fsc = 0x18; 491 break; 492 case ARMFault_SyncParityOnWalk: 493 fsc = (fi->level & 3) | (0x7 << 2); 494 break; 495 case ARMFault_AsyncParity: 496 fsc = 0x19; 497 break; 498 case ARMFault_AsyncExternal: 499 fsc = 0x11 | (fi->ea << 12); 500 break; 501 case ARMFault_Alignment: 502 fsc = 0x21; 503 break; 504 case ARMFault_Debug: 505 fsc = 0x22; 506 break; 507 case ARMFault_TLBConflict: 508 fsc = 0x30; 509 break; 510 case ARMFault_Lockdown: 511 fsc = 0x34; 512 break; 513 case ARMFault_Exclusive: 514 fsc = 0x35; 515 break; 516 default: 517 /* Other faults can't occur in a context that requires a 518 * long-format status code. 519 */ 520 g_assert_not_reached(); 521 } 522 523 fsc |= 1 << 9; 524 return fsc; 525 } 526 527 static inline bool arm_extabort_type(MemTxResult result) 528 { 529 /* The EA bit in syndromes and fault status registers is an 530 * IMPDEF classification of external aborts. ARM implementations 531 * usually use this to indicate AXI bus Decode error (0) or 532 * Slave error (1); in QEMU we follow that. 533 */ 534 return result != MEMTX_DECODE_ERROR; 535 } 536 537 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 538 MMUAccessType access_type, int mmu_idx, 539 bool probe, uintptr_t retaddr); 540 541 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 542 { 543 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 544 } 545 546 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 547 { 548 if (arm_feature(env, ARM_FEATURE_M)) { 549 return mmu_idx | ARM_MMU_IDX_M; 550 } else { 551 return mmu_idx | ARM_MMU_IDX_A; 552 } 553 } 554 555 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 556 { 557 /* AArch64 is always a-profile. */ 558 return mmu_idx | ARM_MMU_IDX_A; 559 } 560 561 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 562 563 /* 564 * Return the MMU index for a v7M CPU with all relevant information 565 * manually specified. 566 */ 567 ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env, 568 bool secstate, bool priv, bool negpri); 569 570 /* 571 * Return the MMU index for a v7M CPU in the specified security and 572 * privilege state. 573 */ 574 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, 575 bool secstate, bool priv); 576 577 /* Return the MMU index for a v7M CPU in the specified security state */ 578 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 579 580 /* Return true if the stage 1 translation regime is using LPAE format page 581 * tables */ 582 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 583 584 /* Raise a data fault alignment exception for the specified virtual address */ 585 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 586 MMUAccessType access_type, 587 int mmu_idx, uintptr_t retaddr); 588 589 /* arm_cpu_do_transaction_failed: handle a memory system error response 590 * (eg "no device/memory present at address") by raising an external abort 591 * exception 592 */ 593 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 594 vaddr addr, unsigned size, 595 MMUAccessType access_type, 596 int mmu_idx, MemTxAttrs attrs, 597 MemTxResult response, uintptr_t retaddr); 598 599 /* Call any registered EL change hooks */ 600 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 601 { 602 ARMELChangeHook *hook, *next; 603 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 604 hook->hook(cpu, hook->opaque); 605 } 606 } 607 static inline void arm_call_el_change_hook(ARMCPU *cpu) 608 { 609 ARMELChangeHook *hook, *next; 610 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 611 hook->hook(cpu, hook->opaque); 612 } 613 } 614 615 /* Return true if this address translation regime has two ranges. */ 616 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 617 { 618 switch (mmu_idx) { 619 case ARMMMUIdx_Stage1_E0: 620 case ARMMMUIdx_Stage1_E1: 621 case ARMMMUIdx_Stage1_E1_PAN: 622 case ARMMMUIdx_Stage1_SE0: 623 case ARMMMUIdx_Stage1_SE1: 624 case ARMMMUIdx_Stage1_SE1_PAN: 625 case ARMMMUIdx_E10_0: 626 case ARMMMUIdx_E10_1: 627 case ARMMMUIdx_E10_1_PAN: 628 case ARMMMUIdx_E20_0: 629 case ARMMMUIdx_E20_2: 630 case ARMMMUIdx_E20_2_PAN: 631 case ARMMMUIdx_SE10_0: 632 case ARMMMUIdx_SE10_1: 633 case ARMMMUIdx_SE10_1_PAN: 634 case ARMMMUIdx_SE20_0: 635 case ARMMMUIdx_SE20_2: 636 case ARMMMUIdx_SE20_2_PAN: 637 return true; 638 default: 639 return false; 640 } 641 } 642 643 /* Return true if this address translation regime is secure */ 644 static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) 645 { 646 switch (mmu_idx) { 647 case ARMMMUIdx_E10_0: 648 case ARMMMUIdx_E10_1: 649 case ARMMMUIdx_E10_1_PAN: 650 case ARMMMUIdx_E20_0: 651 case ARMMMUIdx_E20_2: 652 case ARMMMUIdx_E20_2_PAN: 653 case ARMMMUIdx_Stage1_E0: 654 case ARMMMUIdx_Stage1_E1: 655 case ARMMMUIdx_Stage1_E1_PAN: 656 case ARMMMUIdx_E2: 657 case ARMMMUIdx_Stage2: 658 case ARMMMUIdx_MPrivNegPri: 659 case ARMMMUIdx_MUserNegPri: 660 case ARMMMUIdx_MPriv: 661 case ARMMMUIdx_MUser: 662 return false; 663 case ARMMMUIdx_SE3: 664 case ARMMMUIdx_SE10_0: 665 case ARMMMUIdx_SE10_1: 666 case ARMMMUIdx_SE10_1_PAN: 667 case ARMMMUIdx_SE20_0: 668 case ARMMMUIdx_SE20_2: 669 case ARMMMUIdx_SE20_2_PAN: 670 case ARMMMUIdx_Stage1_SE0: 671 case ARMMMUIdx_Stage1_SE1: 672 case ARMMMUIdx_Stage1_SE1_PAN: 673 case ARMMMUIdx_SE2: 674 case ARMMMUIdx_Stage2_S: 675 case ARMMMUIdx_MSPrivNegPri: 676 case ARMMMUIdx_MSUserNegPri: 677 case ARMMMUIdx_MSPriv: 678 case ARMMMUIdx_MSUser: 679 return true; 680 default: 681 g_assert_not_reached(); 682 } 683 } 684 685 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 686 { 687 switch (mmu_idx) { 688 case ARMMMUIdx_Stage1_E1_PAN: 689 case ARMMMUIdx_Stage1_SE1_PAN: 690 case ARMMMUIdx_E10_1_PAN: 691 case ARMMMUIdx_E20_2_PAN: 692 case ARMMMUIdx_SE10_1_PAN: 693 case ARMMMUIdx_SE20_2_PAN: 694 return true; 695 default: 696 return false; 697 } 698 } 699 700 /* Return the exception level which controls this address translation regime */ 701 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 702 { 703 switch (mmu_idx) { 704 case ARMMMUIdx_SE20_0: 705 case ARMMMUIdx_SE20_2: 706 case ARMMMUIdx_SE20_2_PAN: 707 case ARMMMUIdx_E20_0: 708 case ARMMMUIdx_E20_2: 709 case ARMMMUIdx_E20_2_PAN: 710 case ARMMMUIdx_Stage2: 711 case ARMMMUIdx_Stage2_S: 712 case ARMMMUIdx_SE2: 713 case ARMMMUIdx_E2: 714 return 2; 715 case ARMMMUIdx_SE3: 716 return 3; 717 case ARMMMUIdx_SE10_0: 718 case ARMMMUIdx_Stage1_SE0: 719 return arm_el_is_aa64(env, 3) ? 1 : 3; 720 case ARMMMUIdx_SE10_1: 721 case ARMMMUIdx_SE10_1_PAN: 722 case ARMMMUIdx_Stage1_E0: 723 case ARMMMUIdx_Stage1_E1: 724 case ARMMMUIdx_Stage1_E1_PAN: 725 case ARMMMUIdx_Stage1_SE1: 726 case ARMMMUIdx_Stage1_SE1_PAN: 727 case ARMMMUIdx_E10_0: 728 case ARMMMUIdx_E10_1: 729 case ARMMMUIdx_E10_1_PAN: 730 case ARMMMUIdx_MPrivNegPri: 731 case ARMMMUIdx_MUserNegPri: 732 case ARMMMUIdx_MPriv: 733 case ARMMMUIdx_MUser: 734 case ARMMMUIdx_MSPrivNegPri: 735 case ARMMMUIdx_MSUserNegPri: 736 case ARMMMUIdx_MSPriv: 737 case ARMMMUIdx_MSUser: 738 return 1; 739 default: 740 g_assert_not_reached(); 741 } 742 } 743 744 /* Return the TCR controlling this translation regime */ 745 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 746 { 747 if (mmu_idx == ARMMMUIdx_Stage2) { 748 return &env->cp15.vtcr_el2; 749 } 750 if (mmu_idx == ARMMMUIdx_Stage2_S) { 751 /* 752 * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but 753 * those are not currently used by QEMU, so just return VSTCR_EL2. 754 */ 755 return &env->cp15.vstcr_el2; 756 } 757 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 758 } 759 760 /* Return the FSR value for a debug exception (watchpoint, hardware 761 * breakpoint or BKPT insn) targeting the specified exception level. 762 */ 763 static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) 764 { 765 ARMMMUFaultInfo fi = { .type = ARMFault_Debug }; 766 int target_el = arm_debug_target_el(env); 767 bool using_lpae = false; 768 769 if (target_el == 2 || arm_el_is_aa64(env, target_el)) { 770 using_lpae = true; 771 } else { 772 if (arm_feature(env, ARM_FEATURE_LPAE) && 773 (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) { 774 using_lpae = true; 775 } 776 } 777 778 if (using_lpae) { 779 return arm_fi_to_lfsc(&fi); 780 } else { 781 return arm_fi_to_sfsc(&fi); 782 } 783 } 784 785 /** 786 * arm_num_brps: Return number of implemented breakpoints. 787 * Note that the ID register BRPS field is "number of bps - 1", 788 * and we return the actual number of breakpoints. 789 */ 790 static inline int arm_num_brps(ARMCPU *cpu) 791 { 792 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 793 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 794 } else { 795 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 796 } 797 } 798 799 /** 800 * arm_num_wrps: Return number of implemented watchpoints. 801 * Note that the ID register WRPS field is "number of wps - 1", 802 * and we return the actual number of watchpoints. 803 */ 804 static inline int arm_num_wrps(ARMCPU *cpu) 805 { 806 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 807 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 808 } else { 809 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 810 } 811 } 812 813 /** 814 * arm_num_ctx_cmps: Return number of implemented context comparators. 815 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 816 * and we return the actual number of comparators. 817 */ 818 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 819 { 820 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 821 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 822 } else { 823 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 824 } 825 } 826 827 /** 828 * v7m_using_psp: Return true if using process stack pointer 829 * Return true if the CPU is currently using the process stack 830 * pointer, or false if it is using the main stack pointer. 831 */ 832 static inline bool v7m_using_psp(CPUARMState *env) 833 { 834 /* Handler mode always uses the main stack; for thread mode 835 * the CONTROL.SPSEL bit determines the answer. 836 * Note that in v7M it is not possible to be in Handler mode with 837 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 838 */ 839 return !arm_v7m_is_handler_mode(env) && 840 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 841 } 842 843 /** 844 * v7m_sp_limit: Return SP limit for current CPU state 845 * Return the SP limit value for the current CPU security state 846 * and stack pointer. 847 */ 848 static inline uint32_t v7m_sp_limit(CPUARMState *env) 849 { 850 if (v7m_using_psp(env)) { 851 return env->v7m.psplim[env->v7m.secure]; 852 } else { 853 return env->v7m.msplim[env->v7m.secure]; 854 } 855 } 856 857 /** 858 * v7m_cpacr_pass: 859 * Return true if the v7M CPACR permits access to the FPU for the specified 860 * security state and privilege level. 861 */ 862 static inline bool v7m_cpacr_pass(CPUARMState *env, 863 bool is_secure, bool is_priv) 864 { 865 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 866 case 0: 867 case 2: /* UNPREDICTABLE: we treat like 0 */ 868 return false; 869 case 1: 870 return is_priv; 871 case 3: 872 return true; 873 default: 874 g_assert_not_reached(); 875 } 876 } 877 878 /** 879 * aarch32_mode_name(): Return name of the AArch32 CPU mode 880 * @psr: Program Status Register indicating CPU mode 881 * 882 * Returns, for debug logging purposes, a printable representation 883 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 884 * the low bits of the specified PSR. 885 */ 886 static inline const char *aarch32_mode_name(uint32_t psr) 887 { 888 static const char cpu_mode_names[16][4] = { 889 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 890 "???", "???", "hyp", "und", "???", "???", "???", "sys" 891 }; 892 893 return cpu_mode_names[psr & 0xf]; 894 } 895 896 /** 897 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 898 * 899 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 900 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 901 * Must be called with the iothread lock held. 902 */ 903 void arm_cpu_update_virq(ARMCPU *cpu); 904 905 /** 906 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 907 * 908 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 909 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 910 * Must be called with the iothread lock held. 911 */ 912 void arm_cpu_update_vfiq(ARMCPU *cpu); 913 914 /** 915 * arm_mmu_idx_el: 916 * @env: The cpu environment 917 * @el: The EL to use. 918 * 919 * Return the full ARMMMUIdx for the translation regime for EL. 920 */ 921 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 922 923 /** 924 * arm_mmu_idx: 925 * @env: The cpu environment 926 * 927 * Return the full ARMMMUIdx for the current translation regime. 928 */ 929 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 930 931 /** 932 * arm_stage1_mmu_idx: 933 * @env: The cpu environment 934 * 935 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 936 */ 937 #ifdef CONFIG_USER_ONLY 938 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 939 { 940 return ARMMMUIdx_Stage1_E0; 941 } 942 #else 943 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 944 #endif 945 946 /** 947 * arm_mmu_idx_is_stage1_of_2: 948 * @mmu_idx: The ARMMMUIdx to test 949 * 950 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 951 * first stage of a two stage regime. 952 */ 953 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 954 { 955 switch (mmu_idx) { 956 case ARMMMUIdx_Stage1_E0: 957 case ARMMMUIdx_Stage1_E1: 958 case ARMMMUIdx_Stage1_E1_PAN: 959 case ARMMMUIdx_Stage1_SE0: 960 case ARMMMUIdx_Stage1_SE1: 961 case ARMMMUIdx_Stage1_SE1_PAN: 962 return true; 963 default: 964 return false; 965 } 966 } 967 968 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 969 const ARMISARegisters *id) 970 { 971 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 972 973 if ((features >> ARM_FEATURE_V4T) & 1) { 974 valid |= CPSR_T; 975 } 976 if ((features >> ARM_FEATURE_V5) & 1) { 977 valid |= CPSR_Q; /* V5TE in reality*/ 978 } 979 if ((features >> ARM_FEATURE_V6) & 1) { 980 valid |= CPSR_E | CPSR_GE; 981 } 982 if ((features >> ARM_FEATURE_THUMB2) & 1) { 983 valid |= CPSR_IT; 984 } 985 if (isar_feature_aa32_jazelle(id)) { 986 valid |= CPSR_J; 987 } 988 if (isar_feature_aa32_pan(id)) { 989 valid |= CPSR_PAN; 990 } 991 if (isar_feature_aa32_dit(id)) { 992 valid |= CPSR_DIT; 993 } 994 if (isar_feature_aa32_ssbs(id)) { 995 valid |= CPSR_SSBS; 996 } 997 998 return valid; 999 } 1000 1001 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1002 { 1003 uint32_t valid; 1004 1005 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1006 if (isar_feature_aa64_bti(id)) { 1007 valid |= PSTATE_BTYPE; 1008 } 1009 if (isar_feature_aa64_pan(id)) { 1010 valid |= PSTATE_PAN; 1011 } 1012 if (isar_feature_aa64_uao(id)) { 1013 valid |= PSTATE_UAO; 1014 } 1015 if (isar_feature_aa64_dit(id)) { 1016 valid |= PSTATE_DIT; 1017 } 1018 if (isar_feature_aa64_ssbs(id)) { 1019 valid |= PSTATE_SSBS; 1020 } 1021 if (isar_feature_aa64_mte(id)) { 1022 valid |= PSTATE_TCO; 1023 } 1024 1025 return valid; 1026 } 1027 1028 /* 1029 * Parameters of a given virtual address, as extracted from the 1030 * translation control register (TCR) for a given regime. 1031 */ 1032 typedef struct ARMVAParameters { 1033 unsigned tsz : 8; 1034 unsigned select : 1; 1035 bool tbi : 1; 1036 bool epd : 1; 1037 bool hpd : 1; 1038 bool using16k : 1; 1039 bool using64k : 1; 1040 } ARMVAParameters; 1041 1042 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1043 ARMMMUIdx mmu_idx, bool data); 1044 1045 static inline int exception_target_el(CPUARMState *env) 1046 { 1047 int target_el = MAX(1, arm_current_el(env)); 1048 1049 /* 1050 * No such thing as secure EL1 if EL3 is aarch32, 1051 * so update the target EL to EL3 in this case. 1052 */ 1053 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) { 1054 target_el = 3; 1055 } 1056 1057 return target_el; 1058 } 1059 1060 /* Determine if allocation tags are available. */ 1061 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1062 uint64_t sctlr) 1063 { 1064 if (el < 3 1065 && arm_feature(env, ARM_FEATURE_EL3) 1066 && !(env->cp15.scr_el3 & SCR_ATA)) { 1067 return false; 1068 } 1069 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { 1070 uint64_t hcr = arm_hcr_el2_eff(env); 1071 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1072 return false; 1073 } 1074 } 1075 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1076 return sctlr != 0; 1077 } 1078 1079 #ifndef CONFIG_USER_ONLY 1080 1081 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1082 typedef struct V8M_SAttributes { 1083 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1084 bool ns; 1085 bool nsc; 1086 uint8_t sregion; 1087 bool srvalid; 1088 uint8_t iregion; 1089 bool irvalid; 1090 } V8M_SAttributes; 1091 1092 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1093 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1094 V8M_SAttributes *sattrs); 1095 1096 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1097 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1098 hwaddr *phys_ptr, MemTxAttrs *txattrs, 1099 int *prot, bool *is_subpage, 1100 ARMMMUFaultInfo *fi, uint32_t *mregion); 1101 1102 /* Cacheability and shareability attributes for a memory access */ 1103 typedef struct ARMCacheAttrs { 1104 unsigned int attrs:8; /* as in the MAIR register encoding */ 1105 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1106 } ARMCacheAttrs; 1107 1108 bool get_phys_addr(CPUARMState *env, target_ulong address, 1109 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1110 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 1111 target_ulong *page_size, 1112 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 1113 __attribute__((nonnull)); 1114 1115 void arm_log_exception(int idx); 1116 1117 #endif /* !CONFIG_USER_ONLY */ 1118 1119 /* 1120 * The log2 of the words in the tag block, for GMID_EL1.BS. 1121 * The is the maximum, 256 bytes, which manipulates 64-bits of tags. 1122 */ 1123 #define GMID_EL1_BS 6 1124 1125 /* We associate one allocation tag per 16 bytes, the minimum. */ 1126 #define LOG2_TAG_GRANULE 4 1127 #define TAG_GRANULE (1 << LOG2_TAG_GRANULE) 1128 1129 /* 1130 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1131 * the same simd_desc() encoding due to restrictions on size. 1132 * Use these instead. 1133 */ 1134 FIELD(PREDDESC, OPRSZ, 0, 6) 1135 FIELD(PREDDESC, ESZ, 6, 2) 1136 FIELD(PREDDESC, DATA, 8, 24) 1137 1138 /* 1139 * The SVE simd_data field, for memory ops, contains either 1140 * rd (5 bits) or a shift count (2 bits). 1141 */ 1142 #define SVE_MTEDESC_SHIFT 5 1143 1144 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1145 FIELD(MTEDESC, MIDX, 0, 4) 1146 FIELD(MTEDESC, TBI, 4, 2) 1147 FIELD(MTEDESC, TCMA, 6, 2) 1148 FIELD(MTEDESC, WRITE, 8, 1) 1149 FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */ 1150 1151 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1152 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1153 1154 static inline int allocation_tag_from_addr(uint64_t ptr) 1155 { 1156 return extract64(ptr, 56, 4); 1157 } 1158 1159 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1160 { 1161 return deposit64(ptr, 56, 4, rtag); 1162 } 1163 1164 /* Return true if tbi bits mean that the access is checked. */ 1165 static inline bool tbi_check(uint32_t desc, int bit55) 1166 { 1167 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1168 } 1169 1170 /* Return true if tcma bits mean that the access is unchecked. */ 1171 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1172 { 1173 /* 1174 * We had extracted bit55 and ptr_tag for other reasons, so fold 1175 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1176 */ 1177 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1178 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1179 return tcma && match; 1180 } 1181 1182 /* 1183 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1184 * for the tag to be present in the FAR_ELx register. But for user-only 1185 * mode, we do not have a TLB with which to implement this, so we must 1186 * remove the top byte. 1187 */ 1188 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1189 { 1190 #ifdef CONFIG_USER_ONLY 1191 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1192 ptr &= sextract64(ptr, 0, 56); 1193 #endif 1194 return ptr; 1195 } 1196 1197 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1198 { 1199 #ifdef CONFIG_USER_ONLY 1200 int64_t clean_ptr = sextract64(ptr, 0, 56); 1201 if (tbi_check(desc, clean_ptr < 0)) { 1202 ptr = clean_ptr; 1203 } 1204 #endif 1205 return ptr; 1206 } 1207 1208 /* Values for M-profile PSR.ECI for MVE insns */ 1209 enum MVEECIState { 1210 ECI_NONE = 0, /* No completed beats */ 1211 ECI_A0 = 1, /* Completed: A0 */ 1212 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1213 /* 3 is reserved */ 1214 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1215 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1216 /* All other values reserved */ 1217 }; 1218 1219 #endif 1220