1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "hw/registerfields.h" 29 #include "tcg/tcg-gvec-desc.h" 30 #include "syndrome.h" 31 #include "cpu-features.h" 32 33 /* register banks for CPU modes */ 34 #define BANK_USRSYS 0 35 #define BANK_SVC 1 36 #define BANK_ABT 2 37 #define BANK_UND 3 38 #define BANK_IRQ 4 39 #define BANK_FIQ 5 40 #define BANK_HYP 6 41 #define BANK_MON 7 42 43 static inline bool excp_is_internal(int excp) 44 { 45 /* Return true if this exception number represents a QEMU-internal 46 * exception that will not be passed to the guest. 47 */ 48 return excp == EXCP_INTERRUPT 49 || excp == EXCP_HLT 50 || excp == EXCP_DEBUG 51 || excp == EXCP_HALTED 52 || excp == EXCP_EXCEPTION_EXIT 53 || excp == EXCP_KERNEL_TRAP 54 || excp == EXCP_SEMIHOST; 55 } 56 57 /* Scale factor for generic timers, ie number of ns per tick. 58 * This gives a 62.5MHz timer. 59 */ 60 #define GTIMER_SCALE 16 61 62 /* Bit definitions for the v7M CONTROL register */ 63 FIELD(V7M_CONTROL, NPRIV, 0, 1) 64 FIELD(V7M_CONTROL, SPSEL, 1, 1) 65 FIELD(V7M_CONTROL, FPCA, 2, 1) 66 FIELD(V7M_CONTROL, SFPA, 3, 1) 67 68 /* Bit definitions for v7M exception return payload */ 69 FIELD(V7M_EXCRET, ES, 0, 1) 70 FIELD(V7M_EXCRET, RES0, 1, 1) 71 FIELD(V7M_EXCRET, SPSEL, 2, 1) 72 FIELD(V7M_EXCRET, MODE, 3, 1) 73 FIELD(V7M_EXCRET, FTYPE, 4, 1) 74 FIELD(V7M_EXCRET, DCRS, 5, 1) 75 FIELD(V7M_EXCRET, S, 6, 1) 76 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 77 78 /* Minimum value which is a magic number for exception return */ 79 #define EXC_RETURN_MIN_MAGIC 0xff000000 80 /* Minimum number which is a magic number for function or exception return 81 * when using v8M security extension 82 */ 83 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 84 85 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 86 FIELD(DBGWCR, E, 0, 1) 87 FIELD(DBGWCR, PAC, 1, 2) 88 FIELD(DBGWCR, LSC, 3, 2) 89 FIELD(DBGWCR, BAS, 5, 8) 90 FIELD(DBGWCR, HMC, 13, 1) 91 FIELD(DBGWCR, SSC, 14, 2) 92 FIELD(DBGWCR, LBN, 16, 4) 93 FIELD(DBGWCR, WT, 20, 1) 94 FIELD(DBGWCR, MASK, 24, 5) 95 FIELD(DBGWCR, SSCE, 29, 1) 96 97 /* We use a few fake FSR values for internal purposes in M profile. 98 * M profile cores don't have A/R format FSRs, but currently our 99 * get_phys_addr() code assumes A/R profile and reports failures via 100 * an A/R format FSR value. We then translate that into the proper 101 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 102 * Mostly the FSR values we use for this are those defined for v7PMSA, 103 * since we share some of that codepath. A few kinds of fault are 104 * only for M profile and have no A/R equivalent, though, so we have 105 * to pick a value from the reserved range (which we never otherwise 106 * generate) to use for these. 107 * These values will never be visible to the guest. 108 */ 109 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 110 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 111 112 /** 113 * raise_exception: Raise the specified exception. 114 * Raise a guest exception with the specified value, syndrome register 115 * and target exception level. This should be called from helper functions, 116 * and never returns because we will longjump back up to the CPU main loop. 117 */ 118 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 119 uint32_t syndrome, uint32_t target_el); 120 121 /* 122 * Similarly, but also use unwinding to restore cpu state. 123 */ 124 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 125 uint32_t syndrome, uint32_t target_el, 126 uintptr_t ra); 127 128 /* 129 * For AArch64, map a given EL to an index in the banked_spsr array. 130 * Note that this mapping and the AArch32 mapping defined in bank_number() 131 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 132 * mandated mapping between each other. 133 */ 134 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 135 { 136 static const unsigned int map[4] = { 137 [1] = BANK_SVC, /* EL1. */ 138 [2] = BANK_HYP, /* EL2. */ 139 [3] = BANK_MON, /* EL3. */ 140 }; 141 assert(el >= 1 && el <= 3); 142 return map[el]; 143 } 144 145 /* Map CPU modes onto saved register banks. */ 146 static inline int bank_number(int mode) 147 { 148 switch (mode) { 149 case ARM_CPU_MODE_USR: 150 case ARM_CPU_MODE_SYS: 151 return BANK_USRSYS; 152 case ARM_CPU_MODE_SVC: 153 return BANK_SVC; 154 case ARM_CPU_MODE_ABT: 155 return BANK_ABT; 156 case ARM_CPU_MODE_UND: 157 return BANK_UND; 158 case ARM_CPU_MODE_IRQ: 159 return BANK_IRQ; 160 case ARM_CPU_MODE_FIQ: 161 return BANK_FIQ; 162 case ARM_CPU_MODE_HYP: 163 return BANK_HYP; 164 case ARM_CPU_MODE_MON: 165 return BANK_MON; 166 } 167 g_assert_not_reached(); 168 } 169 170 /** 171 * r14_bank_number: Map CPU mode onto register bank for r14 172 * 173 * Given an AArch32 CPU mode, return the index into the saved register 174 * banks to use for the R14 (LR) in that mode. This is the same as 175 * bank_number(), except for the special case of Hyp mode, where 176 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 177 * This should be used as the index into env->banked_r14[], and 178 * bank_number() used for the index into env->banked_r13[] and 179 * env->banked_spsr[]. 180 */ 181 static inline int r14_bank_number(int mode) 182 { 183 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 184 } 185 186 void arm_cpu_register(const ARMCPUInfo *info); 187 void aarch64_cpu_register(const ARMCPUInfo *info); 188 189 void register_cp_regs_for_features(ARMCPU *cpu); 190 void init_cpreg_list(ARMCPU *cpu); 191 192 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 193 void arm_translate_init(void); 194 195 void arm_restore_state_to_opc(CPUState *cs, 196 const TranslationBlock *tb, 197 const uint64_t *data); 198 199 #ifdef CONFIG_TCG 200 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 201 #endif /* CONFIG_TCG */ 202 203 typedef enum ARMFPRounding { 204 FPROUNDING_TIEEVEN, 205 FPROUNDING_POSINF, 206 FPROUNDING_NEGINF, 207 FPROUNDING_ZERO, 208 FPROUNDING_TIEAWAY, 209 FPROUNDING_ODD 210 } ARMFPRounding; 211 212 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 213 214 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 215 { 216 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 217 return arm_rmode_to_sf_map[rmode]; 218 } 219 220 static inline void aarch64_save_sp(CPUARMState *env, int el) 221 { 222 if (env->pstate & PSTATE_SP) { 223 env->sp_el[el] = env->xregs[31]; 224 } else { 225 env->sp_el[0] = env->xregs[31]; 226 } 227 } 228 229 static inline void aarch64_restore_sp(CPUARMState *env, int el) 230 { 231 if (env->pstate & PSTATE_SP) { 232 env->xregs[31] = env->sp_el[el]; 233 } else { 234 env->xregs[31] = env->sp_el[0]; 235 } 236 } 237 238 static inline void update_spsel(CPUARMState *env, uint32_t imm) 239 { 240 unsigned int cur_el = arm_current_el(env); 241 /* Update PSTATE SPSel bit; this requires us to update the 242 * working stack pointer in xregs[31]. 243 */ 244 if (!((imm ^ env->pstate) & PSTATE_SP)) { 245 return; 246 } 247 aarch64_save_sp(env, cur_el); 248 env->pstate = deposit32(env->pstate, 0, 1, imm); 249 250 /* We rely on illegal updates to SPsel from EL0 to get trapped 251 * at translation time. 252 */ 253 assert(cur_el >= 1 && cur_el <= 3); 254 aarch64_restore_sp(env, cur_el); 255 } 256 257 /* 258 * arm_pamax 259 * @cpu: ARMCPU 260 * 261 * Returns the implementation defined bit-width of physical addresses. 262 * The ARMv8 reference manuals refer to this as PAMax(). 263 */ 264 unsigned int arm_pamax(ARMCPU *cpu); 265 266 /* Return true if extended addresses are enabled. 267 * This is always the case if our translation regime is 64 bit, 268 * but depends on TTBCR.EAE for 32 bit. 269 */ 270 static inline bool extended_addresses_enabled(CPUARMState *env) 271 { 272 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 273 if (arm_feature(env, ARM_FEATURE_PMSA) && 274 arm_feature(env, ARM_FEATURE_V8)) { 275 return true; 276 } 277 return arm_el_is_aa64(env, 1) || 278 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 279 } 280 281 /* Update a QEMU watchpoint based on the information the guest has set in the 282 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 283 */ 284 void hw_watchpoint_update(ARMCPU *cpu, int n); 285 /* Update the QEMU watchpoints for every guest watchpoint. This does a 286 * complete delete-and-reinstate of the QEMU watchpoint list and so is 287 * suitable for use after migration or on reset. 288 */ 289 void hw_watchpoint_update_all(ARMCPU *cpu); 290 /* Update a QEMU breakpoint based on the information the guest has set in the 291 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 292 */ 293 void hw_breakpoint_update(ARMCPU *cpu, int n); 294 /* Update the QEMU breakpoints for every guest breakpoint. This does a 295 * complete delete-and-reinstate of the QEMU breakpoint list and so is 296 * suitable for use after migration or on reset. 297 */ 298 void hw_breakpoint_update_all(ARMCPU *cpu); 299 300 /* Callback function for checking if a breakpoint should trigger. */ 301 bool arm_debug_check_breakpoint(CPUState *cs); 302 303 /* Callback function for checking if a watchpoint should trigger. */ 304 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 305 306 /* Adjust addresses (in BE32 mode) before testing against watchpoint 307 * addresses. 308 */ 309 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 310 311 /* Callback function for when a watchpoint or breakpoint triggers. */ 312 void arm_debug_excp_handler(CPUState *cs); 313 314 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 315 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 316 { 317 return false; 318 } 319 static inline void arm_handle_psci_call(ARMCPU *cpu) 320 { 321 g_assert_not_reached(); 322 } 323 #else 324 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 325 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 326 /* Actually handle a PSCI call */ 327 void arm_handle_psci_call(ARMCPU *cpu); 328 #endif 329 330 /** 331 * arm_clear_exclusive: clear the exclusive monitor 332 * @env: CPU env 333 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 334 */ 335 static inline void arm_clear_exclusive(CPUARMState *env) 336 { 337 env->exclusive_addr = -1; 338 } 339 340 /** 341 * ARMFaultType: type of an ARM MMU fault 342 * This corresponds to the v8A pseudocode's Fault enumeration, 343 * with extensions for QEMU internal conditions. 344 */ 345 typedef enum ARMFaultType { 346 ARMFault_None, 347 ARMFault_AccessFlag, 348 ARMFault_Alignment, 349 ARMFault_Background, 350 ARMFault_Domain, 351 ARMFault_Permission, 352 ARMFault_Translation, 353 ARMFault_AddressSize, 354 ARMFault_SyncExternal, 355 ARMFault_SyncExternalOnWalk, 356 ARMFault_SyncParity, 357 ARMFault_SyncParityOnWalk, 358 ARMFault_AsyncParity, 359 ARMFault_AsyncExternal, 360 ARMFault_Debug, 361 ARMFault_TLBConflict, 362 ARMFault_UnsuppAtomicUpdate, 363 ARMFault_Lockdown, 364 ARMFault_Exclusive, 365 ARMFault_ICacheMaint, 366 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 367 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 368 ARMFault_GPCFOnWalk, 369 ARMFault_GPCFOnOutput, 370 } ARMFaultType; 371 372 typedef enum ARMGPCF { 373 GPCF_None, 374 GPCF_AddressSize, 375 GPCF_Walk, 376 GPCF_EABT, 377 GPCF_Fail, 378 } ARMGPCF; 379 380 /** 381 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 382 * @type: Type of fault 383 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 384 * @level: Table walk level (for translation, access flag and permission faults) 385 * @domain: Domain of the fault address (for non-LPAE CPUs only) 386 * @s2addr: Address that caused a fault at stage 2 387 * @paddr: physical address that caused a fault for gpc 388 * @paddr_space: physical address space that caused a fault for gpc 389 * @stage2: True if we faulted at stage 2 390 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 391 * @s1ns: True if we faulted on a non-secure IPA while in secure state 392 * @ea: True if we should set the EA (external abort type) bit in syndrome 393 */ 394 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 395 struct ARMMMUFaultInfo { 396 ARMFaultType type; 397 ARMGPCF gpcf; 398 target_ulong s2addr; 399 target_ulong paddr; 400 ARMSecuritySpace paddr_space; 401 int level; 402 int domain; 403 bool stage2; 404 bool s1ptw; 405 bool s1ns; 406 bool ea; 407 }; 408 409 /** 410 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 411 * Compare pseudocode EncodeSDFSC(), though unlike that function 412 * we set up a whole FSR-format code including domain field and 413 * putting the high bit of the FSC into bit 10. 414 */ 415 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 416 { 417 uint32_t fsc; 418 419 switch (fi->type) { 420 case ARMFault_None: 421 return 0; 422 case ARMFault_AccessFlag: 423 fsc = fi->level == 1 ? 0x3 : 0x6; 424 break; 425 case ARMFault_Alignment: 426 fsc = 0x1; 427 break; 428 case ARMFault_Permission: 429 fsc = fi->level == 1 ? 0xd : 0xf; 430 break; 431 case ARMFault_Domain: 432 fsc = fi->level == 1 ? 0x9 : 0xb; 433 break; 434 case ARMFault_Translation: 435 fsc = fi->level == 1 ? 0x5 : 0x7; 436 break; 437 case ARMFault_SyncExternal: 438 fsc = 0x8 | (fi->ea << 12); 439 break; 440 case ARMFault_SyncExternalOnWalk: 441 fsc = fi->level == 1 ? 0xc : 0xe; 442 fsc |= (fi->ea << 12); 443 break; 444 case ARMFault_SyncParity: 445 fsc = 0x409; 446 break; 447 case ARMFault_SyncParityOnWalk: 448 fsc = fi->level == 1 ? 0x40c : 0x40e; 449 break; 450 case ARMFault_AsyncParity: 451 fsc = 0x408; 452 break; 453 case ARMFault_AsyncExternal: 454 fsc = 0x406 | (fi->ea << 12); 455 break; 456 case ARMFault_Debug: 457 fsc = 0x2; 458 break; 459 case ARMFault_TLBConflict: 460 fsc = 0x400; 461 break; 462 case ARMFault_Lockdown: 463 fsc = 0x404; 464 break; 465 case ARMFault_Exclusive: 466 fsc = 0x405; 467 break; 468 case ARMFault_ICacheMaint: 469 fsc = 0x4; 470 break; 471 case ARMFault_Background: 472 fsc = 0x0; 473 break; 474 case ARMFault_QEMU_NSCExec: 475 fsc = M_FAKE_FSR_NSC_EXEC; 476 break; 477 case ARMFault_QEMU_SFault: 478 fsc = M_FAKE_FSR_SFAULT; 479 break; 480 default: 481 /* Other faults can't occur in a context that requires a 482 * short-format status code. 483 */ 484 g_assert_not_reached(); 485 } 486 487 fsc |= (fi->domain << 4); 488 return fsc; 489 } 490 491 /** 492 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 493 * Compare pseudocode EncodeLDFSC(), though unlike that function 494 * we fill in also the LPAE bit 9 of a DFSR format. 495 */ 496 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 497 { 498 uint32_t fsc; 499 500 switch (fi->type) { 501 case ARMFault_None: 502 return 0; 503 case ARMFault_AddressSize: 504 assert(fi->level >= -1 && fi->level <= 3); 505 if (fi->level < 0) { 506 fsc = 0b101001; 507 } else { 508 fsc = fi->level; 509 } 510 break; 511 case ARMFault_AccessFlag: 512 assert(fi->level >= 0 && fi->level <= 3); 513 fsc = 0b001000 | fi->level; 514 break; 515 case ARMFault_Permission: 516 assert(fi->level >= 0 && fi->level <= 3); 517 fsc = 0b001100 | fi->level; 518 break; 519 case ARMFault_Translation: 520 assert(fi->level >= -1 && fi->level <= 3); 521 if (fi->level < 0) { 522 fsc = 0b101011; 523 } else { 524 fsc = 0b000100 | fi->level; 525 } 526 break; 527 case ARMFault_SyncExternal: 528 fsc = 0x10 | (fi->ea << 12); 529 break; 530 case ARMFault_SyncExternalOnWalk: 531 assert(fi->level >= -1 && fi->level <= 3); 532 if (fi->level < 0) { 533 fsc = 0b010011; 534 } else { 535 fsc = 0b010100 | fi->level; 536 } 537 fsc |= fi->ea << 12; 538 break; 539 case ARMFault_SyncParity: 540 fsc = 0x18; 541 break; 542 case ARMFault_SyncParityOnWalk: 543 assert(fi->level >= -1 && fi->level <= 3); 544 if (fi->level < 0) { 545 fsc = 0b011011; 546 } else { 547 fsc = 0b011100 | fi->level; 548 } 549 break; 550 case ARMFault_AsyncParity: 551 fsc = 0x19; 552 break; 553 case ARMFault_AsyncExternal: 554 fsc = 0x11 | (fi->ea << 12); 555 break; 556 case ARMFault_Alignment: 557 fsc = 0x21; 558 break; 559 case ARMFault_Debug: 560 fsc = 0x22; 561 break; 562 case ARMFault_TLBConflict: 563 fsc = 0x30; 564 break; 565 case ARMFault_UnsuppAtomicUpdate: 566 fsc = 0x31; 567 break; 568 case ARMFault_Lockdown: 569 fsc = 0x34; 570 break; 571 case ARMFault_Exclusive: 572 fsc = 0x35; 573 break; 574 case ARMFault_GPCFOnWalk: 575 assert(fi->level >= -1 && fi->level <= 3); 576 if (fi->level < 0) { 577 fsc = 0b100011; 578 } else { 579 fsc = 0b100100 | fi->level; 580 } 581 break; 582 case ARMFault_GPCFOnOutput: 583 fsc = 0b101000; 584 break; 585 default: 586 /* Other faults can't occur in a context that requires a 587 * long-format status code. 588 */ 589 g_assert_not_reached(); 590 } 591 592 fsc |= 1 << 9; 593 return fsc; 594 } 595 596 static inline bool arm_extabort_type(MemTxResult result) 597 { 598 /* The EA bit in syndromes and fault status registers is an 599 * IMPDEF classification of external aborts. ARM implementations 600 * usually use this to indicate AXI bus Decode error (0) or 601 * Slave error (1); in QEMU we follow that. 602 */ 603 return result != MEMTX_DECODE_ERROR; 604 } 605 606 #ifdef CONFIG_USER_ONLY 607 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 608 MMUAccessType access_type, 609 bool maperr, uintptr_t ra); 610 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 611 MMUAccessType access_type, uintptr_t ra); 612 #else 613 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 614 MMUAccessType access_type, int mmu_idx, 615 bool probe, uintptr_t retaddr); 616 #endif 617 618 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 619 { 620 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 621 } 622 623 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 624 { 625 if (arm_feature(env, ARM_FEATURE_M)) { 626 return mmu_idx | ARM_MMU_IDX_M; 627 } else { 628 return mmu_idx | ARM_MMU_IDX_A; 629 } 630 } 631 632 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 633 { 634 /* AArch64 is always a-profile. */ 635 return mmu_idx | ARM_MMU_IDX_A; 636 } 637 638 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 639 640 /* Return the MMU index for a v7M CPU in the specified security state */ 641 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 642 643 /* 644 * Return true if the stage 1 translation regime is using LPAE 645 * format page tables 646 */ 647 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 648 649 /* Raise a data fault alignment exception for the specified virtual address */ 650 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 651 MMUAccessType access_type, 652 int mmu_idx, uintptr_t retaddr); 653 654 #ifndef CONFIG_USER_ONLY 655 /* arm_cpu_do_transaction_failed: handle a memory system error response 656 * (eg "no device/memory present at address") by raising an external abort 657 * exception 658 */ 659 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 660 vaddr addr, unsigned size, 661 MMUAccessType access_type, 662 int mmu_idx, MemTxAttrs attrs, 663 MemTxResult response, uintptr_t retaddr); 664 #endif 665 666 /* Call any registered EL change hooks */ 667 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 668 { 669 ARMELChangeHook *hook, *next; 670 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 671 hook->hook(cpu, hook->opaque); 672 } 673 } 674 static inline void arm_call_el_change_hook(ARMCPU *cpu) 675 { 676 ARMELChangeHook *hook, *next; 677 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 678 hook->hook(cpu, hook->opaque); 679 } 680 } 681 682 /* Return true if this address translation regime has two ranges. */ 683 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 684 { 685 switch (mmu_idx) { 686 case ARMMMUIdx_Stage1_E0: 687 case ARMMMUIdx_Stage1_E1: 688 case ARMMMUIdx_Stage1_E1_PAN: 689 case ARMMMUIdx_E10_0: 690 case ARMMMUIdx_E10_1: 691 case ARMMMUIdx_E10_1_PAN: 692 case ARMMMUIdx_E20_0: 693 case ARMMMUIdx_E20_2: 694 case ARMMMUIdx_E20_2_PAN: 695 return true; 696 default: 697 return false; 698 } 699 } 700 701 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 702 { 703 switch (mmu_idx) { 704 case ARMMMUIdx_Stage1_E1_PAN: 705 case ARMMMUIdx_E10_1_PAN: 706 case ARMMMUIdx_E20_2_PAN: 707 return true; 708 default: 709 return false; 710 } 711 } 712 713 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 714 { 715 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 716 } 717 718 /* Return the exception level which controls this address translation regime */ 719 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 720 { 721 switch (mmu_idx) { 722 case ARMMMUIdx_E20_0: 723 case ARMMMUIdx_E20_2: 724 case ARMMMUIdx_E20_2_PAN: 725 case ARMMMUIdx_Stage2: 726 case ARMMMUIdx_Stage2_S: 727 case ARMMMUIdx_E2: 728 return 2; 729 case ARMMMUIdx_E3: 730 return 3; 731 case ARMMMUIdx_E10_0: 732 case ARMMMUIdx_Stage1_E0: 733 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; 734 case ARMMMUIdx_Stage1_E1: 735 case ARMMMUIdx_Stage1_E1_PAN: 736 case ARMMMUIdx_E10_1: 737 case ARMMMUIdx_E10_1_PAN: 738 case ARMMMUIdx_MPrivNegPri: 739 case ARMMMUIdx_MUserNegPri: 740 case ARMMMUIdx_MPriv: 741 case ARMMMUIdx_MUser: 742 case ARMMMUIdx_MSPrivNegPri: 743 case ARMMMUIdx_MSUserNegPri: 744 case ARMMMUIdx_MSPriv: 745 case ARMMMUIdx_MSUser: 746 return 1; 747 default: 748 g_assert_not_reached(); 749 } 750 } 751 752 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 753 { 754 switch (mmu_idx) { 755 case ARMMMUIdx_E20_0: 756 case ARMMMUIdx_Stage1_E0: 757 case ARMMMUIdx_MUser: 758 case ARMMMUIdx_MSUser: 759 case ARMMMUIdx_MUserNegPri: 760 case ARMMMUIdx_MSUserNegPri: 761 return true; 762 default: 763 return false; 764 case ARMMMUIdx_E10_0: 765 case ARMMMUIdx_E10_1: 766 case ARMMMUIdx_E10_1_PAN: 767 g_assert_not_reached(); 768 } 769 } 770 771 /* Return the SCTLR value which controls this address translation regime */ 772 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 773 { 774 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 775 } 776 777 /* 778 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 779 * and the Non-Secure stage 2 translation regimes (and hence which are 780 * not present in VSTCR_EL2). 781 */ 782 #define VTCR_SHARED_FIELD_MASK \ 783 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 784 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 785 R_VTCR_DS_MASK) 786 787 /* Return the value of the TCR controlling this translation regime */ 788 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 789 { 790 if (mmu_idx == ARMMMUIdx_Stage2) { 791 return env->cp15.vtcr_el2; 792 } 793 if (mmu_idx == ARMMMUIdx_Stage2_S) { 794 /* 795 * Secure stage 2 shares fields from VTCR_EL2. We merge those 796 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 797 * value so the callers don't need to special case this. 798 * 799 * If a future architecture change defines bits in VSTCR_EL2 that 800 * overlap with these VTCR_EL2 fields we may need to revisit this. 801 */ 802 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 803 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 804 return v; 805 } 806 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 807 } 808 809 /* Return true if the translation regime is using LPAE format page tables */ 810 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 811 { 812 int el = regime_el(env, mmu_idx); 813 if (el == 2 || arm_el_is_aa64(env, el)) { 814 return true; 815 } 816 if (arm_feature(env, ARM_FEATURE_PMSA) && 817 arm_feature(env, ARM_FEATURE_V8)) { 818 return true; 819 } 820 if (arm_feature(env, ARM_FEATURE_LPAE) 821 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 822 return true; 823 } 824 return false; 825 } 826 827 /** 828 * arm_num_brps: Return number of implemented breakpoints. 829 * Note that the ID register BRPS field is "number of bps - 1", 830 * and we return the actual number of breakpoints. 831 */ 832 static inline int arm_num_brps(ARMCPU *cpu) 833 { 834 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 835 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 836 } else { 837 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 838 } 839 } 840 841 /** 842 * arm_num_wrps: Return number of implemented watchpoints. 843 * Note that the ID register WRPS field is "number of wps - 1", 844 * and we return the actual number of watchpoints. 845 */ 846 static inline int arm_num_wrps(ARMCPU *cpu) 847 { 848 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 849 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 850 } else { 851 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 852 } 853 } 854 855 /** 856 * arm_num_ctx_cmps: Return number of implemented context comparators. 857 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 858 * and we return the actual number of comparators. 859 */ 860 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 861 { 862 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 863 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 864 } else { 865 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 866 } 867 } 868 869 /** 870 * v7m_using_psp: Return true if using process stack pointer 871 * Return true if the CPU is currently using the process stack 872 * pointer, or false if it is using the main stack pointer. 873 */ 874 static inline bool v7m_using_psp(CPUARMState *env) 875 { 876 /* Handler mode always uses the main stack; for thread mode 877 * the CONTROL.SPSEL bit determines the answer. 878 * Note that in v7M it is not possible to be in Handler mode with 879 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 880 */ 881 return !arm_v7m_is_handler_mode(env) && 882 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 883 } 884 885 /** 886 * v7m_sp_limit: Return SP limit for current CPU state 887 * Return the SP limit value for the current CPU security state 888 * and stack pointer. 889 */ 890 static inline uint32_t v7m_sp_limit(CPUARMState *env) 891 { 892 if (v7m_using_psp(env)) { 893 return env->v7m.psplim[env->v7m.secure]; 894 } else { 895 return env->v7m.msplim[env->v7m.secure]; 896 } 897 } 898 899 /** 900 * v7m_cpacr_pass: 901 * Return true if the v7M CPACR permits access to the FPU for the specified 902 * security state and privilege level. 903 */ 904 static inline bool v7m_cpacr_pass(CPUARMState *env, 905 bool is_secure, bool is_priv) 906 { 907 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 908 case 0: 909 case 2: /* UNPREDICTABLE: we treat like 0 */ 910 return false; 911 case 1: 912 return is_priv; 913 case 3: 914 return true; 915 default: 916 g_assert_not_reached(); 917 } 918 } 919 920 /** 921 * aarch32_mode_name(): Return name of the AArch32 CPU mode 922 * @psr: Program Status Register indicating CPU mode 923 * 924 * Returns, for debug logging purposes, a printable representation 925 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 926 * the low bits of the specified PSR. 927 */ 928 static inline const char *aarch32_mode_name(uint32_t psr) 929 { 930 static const char cpu_mode_names[16][4] = { 931 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 932 "???", "???", "hyp", "und", "???", "???", "???", "sys" 933 }; 934 935 return cpu_mode_names[psr & 0xf]; 936 } 937 938 /** 939 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 940 * 941 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 942 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 943 * Must be called with the BQL held. 944 */ 945 void arm_cpu_update_virq(ARMCPU *cpu); 946 947 /** 948 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 949 * 950 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 951 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 952 * Must be called with the BQL held. 953 */ 954 void arm_cpu_update_vfiq(ARMCPU *cpu); 955 956 /** 957 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 958 * 959 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 960 * following a change to the HCR_EL2.VSE bit. 961 */ 962 void arm_cpu_update_vserr(ARMCPU *cpu); 963 964 /** 965 * arm_mmu_idx_el: 966 * @env: The cpu environment 967 * @el: The EL to use. 968 * 969 * Return the full ARMMMUIdx for the translation regime for EL. 970 */ 971 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 972 973 /** 974 * arm_mmu_idx: 975 * @env: The cpu environment 976 * 977 * Return the full ARMMMUIdx for the current translation regime. 978 */ 979 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 980 981 /** 982 * arm_stage1_mmu_idx: 983 * @env: The cpu environment 984 * 985 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 986 */ 987 #ifdef CONFIG_USER_ONLY 988 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 989 { 990 return ARMMMUIdx_Stage1_E0; 991 } 992 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 993 { 994 return ARMMMUIdx_Stage1_E0; 995 } 996 #else 997 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 998 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 999 #endif 1000 1001 /** 1002 * arm_mmu_idx_is_stage1_of_2: 1003 * @mmu_idx: The ARMMMUIdx to test 1004 * 1005 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1006 * first stage of a two stage regime. 1007 */ 1008 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1009 { 1010 switch (mmu_idx) { 1011 case ARMMMUIdx_Stage1_E0: 1012 case ARMMMUIdx_Stage1_E1: 1013 case ARMMMUIdx_Stage1_E1_PAN: 1014 return true; 1015 default: 1016 return false; 1017 } 1018 } 1019 1020 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1021 const ARMISARegisters *id) 1022 { 1023 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1024 1025 if ((features >> ARM_FEATURE_V4T) & 1) { 1026 valid |= CPSR_T; 1027 } 1028 if ((features >> ARM_FEATURE_V5) & 1) { 1029 valid |= CPSR_Q; /* V5TE in reality*/ 1030 } 1031 if ((features >> ARM_FEATURE_V6) & 1) { 1032 valid |= CPSR_E | CPSR_GE; 1033 } 1034 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1035 valid |= CPSR_IT; 1036 } 1037 if (isar_feature_aa32_jazelle(id)) { 1038 valid |= CPSR_J; 1039 } 1040 if (isar_feature_aa32_pan(id)) { 1041 valid |= CPSR_PAN; 1042 } 1043 if (isar_feature_aa32_dit(id)) { 1044 valid |= CPSR_DIT; 1045 } 1046 if (isar_feature_aa32_ssbs(id)) { 1047 valid |= CPSR_SSBS; 1048 } 1049 1050 return valid; 1051 } 1052 1053 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1054 { 1055 uint32_t valid; 1056 1057 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1058 if (isar_feature_aa64_bti(id)) { 1059 valid |= PSTATE_BTYPE; 1060 } 1061 if (isar_feature_aa64_pan(id)) { 1062 valid |= PSTATE_PAN; 1063 } 1064 if (isar_feature_aa64_uao(id)) { 1065 valid |= PSTATE_UAO; 1066 } 1067 if (isar_feature_aa64_dit(id)) { 1068 valid |= PSTATE_DIT; 1069 } 1070 if (isar_feature_aa64_ssbs(id)) { 1071 valid |= PSTATE_SSBS; 1072 } 1073 if (isar_feature_aa64_mte(id)) { 1074 valid |= PSTATE_TCO; 1075 } 1076 1077 return valid; 1078 } 1079 1080 /* Granule size (i.e. page size) */ 1081 typedef enum ARMGranuleSize { 1082 /* Same order as TG0 encoding */ 1083 Gran4K, 1084 Gran64K, 1085 Gran16K, 1086 GranInvalid, 1087 } ARMGranuleSize; 1088 1089 /** 1090 * arm_granule_bits: Return address size of the granule in bits 1091 * 1092 * Return the address size of the granule in bits. This corresponds 1093 * to the pseudocode TGxGranuleBits(). 1094 */ 1095 static inline int arm_granule_bits(ARMGranuleSize gran) 1096 { 1097 switch (gran) { 1098 case Gran64K: 1099 return 16; 1100 case Gran16K: 1101 return 14; 1102 case Gran4K: 1103 return 12; 1104 default: 1105 g_assert_not_reached(); 1106 } 1107 } 1108 1109 /* 1110 * Parameters of a given virtual address, as extracted from the 1111 * translation control register (TCR) for a given regime. 1112 */ 1113 typedef struct ARMVAParameters { 1114 unsigned tsz : 8; 1115 unsigned ps : 3; 1116 unsigned sh : 2; 1117 unsigned select : 1; 1118 bool tbi : 1; 1119 bool epd : 1; 1120 bool hpd : 1; 1121 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1122 bool ds : 1; 1123 bool ha : 1; 1124 bool hd : 1; 1125 ARMGranuleSize gran : 2; 1126 } ARMVAParameters; 1127 1128 /** 1129 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1130 * @env: CPU 1131 * @va: virtual address to look up 1132 * @mmu_idx: determines translation regime to use 1133 * @data: true if this is a data access 1134 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1135 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1136 */ 1137 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1138 ARMMMUIdx mmu_idx, bool data, 1139 bool el1_is_aa32); 1140 1141 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1142 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1143 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1144 1145 /* Determine if allocation tags are available. */ 1146 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1147 uint64_t sctlr) 1148 { 1149 if (el < 3 1150 && arm_feature(env, ARM_FEATURE_EL3) 1151 && !(env->cp15.scr_el3 & SCR_ATA)) { 1152 return false; 1153 } 1154 if (el < 2 && arm_is_el2_enabled(env)) { 1155 uint64_t hcr = arm_hcr_el2_eff(env); 1156 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1157 return false; 1158 } 1159 } 1160 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1161 return sctlr != 0; 1162 } 1163 1164 #ifndef CONFIG_USER_ONLY 1165 1166 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1167 typedef struct V8M_SAttributes { 1168 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1169 bool ns; 1170 bool nsc; 1171 uint8_t sregion; 1172 bool srvalid; 1173 uint8_t iregion; 1174 bool irvalid; 1175 } V8M_SAttributes; 1176 1177 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1178 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1179 bool secure, V8M_SAttributes *sattrs); 1180 1181 /* Cacheability and shareability attributes for a memory access */ 1182 typedef struct ARMCacheAttrs { 1183 /* 1184 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1185 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1186 */ 1187 unsigned int attrs:8; 1188 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1189 bool is_s2_format:1; 1190 } ARMCacheAttrs; 1191 1192 /* Fields that are valid upon success. */ 1193 typedef struct GetPhysAddrResult { 1194 CPUTLBEntryFull f; 1195 ARMCacheAttrs cacheattrs; 1196 } GetPhysAddrResult; 1197 1198 /** 1199 * get_phys_addr: get the physical address for a virtual address 1200 * @env: CPUARMState 1201 * @address: virtual address to get physical address for 1202 * @access_type: 0 for read, 1 for write, 2 for execute 1203 * @mmu_idx: MMU index indicating required translation regime 1204 * @result: set on translation success. 1205 * @fi: set to fault info if the translation fails 1206 * 1207 * Find the physical address corresponding to the given virtual address, 1208 * by doing a translation table walk on MMU based systems or using the 1209 * MPU state on MPU based systems. 1210 * 1211 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1212 * prot and page_size may not be filled in, and the populated fsr value provides 1213 * information on why the translation aborted, in the format of a 1214 * DFSR/IFSR fault register, with the following caveats: 1215 * * we honour the short vs long DFSR format differences. 1216 * * the WnR bit is never set (the caller must do this). 1217 * * for PSMAv5 based systems we don't bother to return a full FSR format 1218 * value. 1219 */ 1220 bool get_phys_addr(CPUARMState *env, target_ulong address, 1221 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1222 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1223 __attribute__((nonnull)); 1224 1225 /** 1226 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1227 * address 1228 * @env: CPUARMState 1229 * @address: virtual address to get physical address for 1230 * @access_type: 0 for read, 1 for write, 2 for execute 1231 * @mmu_idx: MMU index indicating required translation regime 1232 * @space: security space for the access 1233 * @result: set on translation success. 1234 * @fi: set to fault info if the translation fails 1235 * 1236 * Similar to get_phys_addr, but use the given security space and don't perform 1237 * a Granule Protection Check on the resulting address. 1238 */ 1239 bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, 1240 MMUAccessType access_type, 1241 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1242 GetPhysAddrResult *result, 1243 ARMMMUFaultInfo *fi) 1244 __attribute__((nonnull)); 1245 1246 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1247 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1248 bool is_secure, GetPhysAddrResult *result, 1249 ARMMMUFaultInfo *fi, uint32_t *mregion); 1250 1251 void arm_log_exception(CPUState *cs); 1252 1253 #endif /* !CONFIG_USER_ONLY */ 1254 1255 /* 1256 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1257 * the same simd_desc() encoding due to restrictions on size. 1258 * Use these instead. 1259 */ 1260 FIELD(PREDDESC, OPRSZ, 0, 6) 1261 FIELD(PREDDESC, ESZ, 6, 2) 1262 FIELD(PREDDESC, DATA, 8, 24) 1263 1264 /* 1265 * The SVE simd_data field, for memory ops, contains either 1266 * rd (5 bits) or a shift count (2 bits). 1267 */ 1268 #define SVE_MTEDESC_SHIFT 5 1269 1270 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1271 FIELD(MTEDESC, MIDX, 0, 4) 1272 FIELD(MTEDESC, TBI, 4, 2) 1273 FIELD(MTEDESC, TCMA, 6, 2) 1274 FIELD(MTEDESC, WRITE, 8, 1) 1275 FIELD(MTEDESC, ALIGN, 9, 3) 1276 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - 12) /* size - 1 */ 1277 1278 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1279 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1280 1281 /** 1282 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1283 * @env: CPU env 1284 * @ptr: start address of memory region (dirty pointer) 1285 * @size: length of region (guaranteed not to cross a page boundary) 1286 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1287 * Returns: the size of the region that can be copied without hitting 1288 * an MTE tag failure 1289 * 1290 * Note that we assume that the caller has already checked the TBI 1291 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1292 * required. 1293 */ 1294 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1295 uint32_t desc); 1296 1297 /** 1298 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1299 * operation going in the reverse direction 1300 * @env: CPU env 1301 * @ptr: *end* address of memory region (dirty pointer) 1302 * @size: length of region (guaranteed not to cross a page boundary) 1303 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1304 * Returns: the size of the region that can be copied without hitting 1305 * an MTE tag failure 1306 * 1307 * Note that we assume that the caller has already checked the TBI 1308 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1309 * required. 1310 */ 1311 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1312 uint32_t desc); 1313 1314 /** 1315 * mte_check_fail: Record an MTE tag check failure 1316 * @env: CPU env 1317 * @desc: MTEDESC descriptor word 1318 * @dirty_ptr: Failing dirty address 1319 * @ra: TCG retaddr 1320 * 1321 * This may never return (if the MTE tag checks are configured to fault). 1322 */ 1323 void mte_check_fail(CPUARMState *env, uint32_t desc, 1324 uint64_t dirty_ptr, uintptr_t ra); 1325 1326 /** 1327 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1328 * @env: CPU env 1329 * @dirty_ptr: Start address of memory region (dirty pointer) 1330 * @size: length of region (guaranteed not to cross page boundary) 1331 * @desc: MTEDESC descriptor word 1332 */ 1333 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1334 uint32_t desc); 1335 1336 static inline int allocation_tag_from_addr(uint64_t ptr) 1337 { 1338 return extract64(ptr, 56, 4); 1339 } 1340 1341 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1342 { 1343 return deposit64(ptr, 56, 4, rtag); 1344 } 1345 1346 /* Return true if tbi bits mean that the access is checked. */ 1347 static inline bool tbi_check(uint32_t desc, int bit55) 1348 { 1349 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1350 } 1351 1352 /* Return true if tcma bits mean that the access is unchecked. */ 1353 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1354 { 1355 /* 1356 * We had extracted bit55 and ptr_tag for other reasons, so fold 1357 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1358 */ 1359 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1360 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1361 return tcma && match; 1362 } 1363 1364 /* 1365 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1366 * for the tag to be present in the FAR_ELx register. But for user-only 1367 * mode, we do not have a TLB with which to implement this, so we must 1368 * remove the top byte. 1369 */ 1370 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1371 { 1372 #ifdef CONFIG_USER_ONLY 1373 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1374 ptr &= sextract64(ptr, 0, 56); 1375 #endif 1376 return ptr; 1377 } 1378 1379 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1380 { 1381 #ifdef CONFIG_USER_ONLY 1382 int64_t clean_ptr = sextract64(ptr, 0, 56); 1383 if (tbi_check(desc, clean_ptr < 0)) { 1384 ptr = clean_ptr; 1385 } 1386 #endif 1387 return ptr; 1388 } 1389 1390 /* Values for M-profile PSR.ECI for MVE insns */ 1391 enum MVEECIState { 1392 ECI_NONE = 0, /* No completed beats */ 1393 ECI_A0 = 1, /* Completed: A0 */ 1394 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1395 /* 3 is reserved */ 1396 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1397 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1398 /* All other values reserved */ 1399 }; 1400 1401 /* Definitions for the PMU registers */ 1402 #define PMCRN_MASK 0xf800 1403 #define PMCRN_SHIFT 11 1404 #define PMCRLP 0x80 1405 #define PMCRLC 0x40 1406 #define PMCRDP 0x20 1407 #define PMCRX 0x10 1408 #define PMCRD 0x8 1409 #define PMCRC 0x4 1410 #define PMCRP 0x2 1411 #define PMCRE 0x1 1412 /* 1413 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1414 * which can be written as 1 to trigger behaviour but which stay RAZ). 1415 */ 1416 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1417 1418 #define PMXEVTYPER_P 0x80000000 1419 #define PMXEVTYPER_U 0x40000000 1420 #define PMXEVTYPER_NSK 0x20000000 1421 #define PMXEVTYPER_NSU 0x10000000 1422 #define PMXEVTYPER_NSH 0x08000000 1423 #define PMXEVTYPER_M 0x04000000 1424 #define PMXEVTYPER_MT 0x02000000 1425 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1426 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1427 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1428 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1429 PMXEVTYPER_EVTCOUNT) 1430 1431 #define PMCCFILTR 0xf8000000 1432 #define PMCCFILTR_M PMXEVTYPER_M 1433 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1434 1435 static inline uint32_t pmu_num_counters(CPUARMState *env) 1436 { 1437 ARMCPU *cpu = env_archcpu(env); 1438 1439 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1440 } 1441 1442 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1443 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1444 { 1445 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1446 } 1447 1448 #ifdef TARGET_AARCH64 1449 int arm_gen_dynamic_svereg_xml(CPUState *cpu, int base_reg); 1450 int aarch64_gdb_get_sve_reg(CPUARMState *env, GByteArray *buf, int reg); 1451 int aarch64_gdb_set_sve_reg(CPUARMState *env, uint8_t *buf, int reg); 1452 int aarch64_gdb_get_fpu_reg(CPUARMState *env, GByteArray *buf, int reg); 1453 int aarch64_gdb_set_fpu_reg(CPUARMState *env, uint8_t *buf, int reg); 1454 int aarch64_gdb_get_pauth_reg(CPUARMState *env, GByteArray *buf, int reg); 1455 int aarch64_gdb_set_pauth_reg(CPUARMState *env, uint8_t *buf, int reg); 1456 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1457 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1458 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1459 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1460 void aarch64_max_tcg_initfn(Object *obj); 1461 void aarch64_add_pauth_properties(Object *obj); 1462 void aarch64_add_sve_properties(Object *obj); 1463 void aarch64_add_sme_properties(Object *obj); 1464 #endif 1465 1466 /* Read the CONTROL register as the MRS instruction would. */ 1467 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1468 1469 /* 1470 * Return a pointer to the location where we currently store the 1471 * stack pointer for the requested security state and thread mode. 1472 * This pointer will become invalid if the CPU state is updated 1473 * such that the stack pointers are switched around (eg changing 1474 * the SPSEL control bit). 1475 */ 1476 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1477 bool threadmode, bool spsel); 1478 1479 bool el_is_in_host(CPUARMState *env, int el); 1480 1481 void aa32_max_features(ARMCPU *cpu); 1482 int exception_target_el(CPUARMState *env); 1483 bool arm_singlestep_active(CPUARMState *env); 1484 bool arm_generate_debug_exceptions(CPUARMState *env); 1485 1486 /** 1487 * pauth_ptr_mask: 1488 * @param: parameters defining the MMU setup 1489 * 1490 * Return a mask of the address bits that contain the authentication code, 1491 * given the MMU config defined by @param. 1492 */ 1493 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1494 { 1495 int bot_pac_bit = 64 - param.tsz; 1496 int top_pac_bit = 64 - 8 * param.tbi; 1497 1498 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1499 } 1500 1501 /* Add the cpreg definitions for debug related system registers */ 1502 void define_debug_regs(ARMCPU *cpu); 1503 1504 /* Effective value of MDCR_EL2 */ 1505 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1506 { 1507 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1508 } 1509 1510 /* Powers of 2 for sve_vq_map et al. */ 1511 #define SVE_VQ_POW2_MAP \ 1512 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1513 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1514 1515 /* 1516 * Return true if it is possible to take a fine-grained-trap to EL2. 1517 */ 1518 static inline bool arm_fgt_active(CPUARMState *env, int el) 1519 { 1520 /* 1521 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1522 * that can affect EL0, but it is harmless to do the test also for 1523 * traps on registers that are only accessible at EL1 because if the test 1524 * returns true then we can't be executing at EL1 anyway. 1525 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1526 * traps from AArch32 only happen for the EL0 is AArch32 case. 1527 */ 1528 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1529 el < 2 && arm_is_el2_enabled(env) && 1530 arm_el_is_aa64(env, 1) && 1531 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1532 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1533 } 1534 1535 void assert_hflags_rebuild_correctly(CPUARMState *env); 1536 1537 /* 1538 * Although the ARM implementation of hardware assisted debugging 1539 * allows for different breakpoints per-core, the current GDB 1540 * interface treats them as a global pool of registers (which seems to 1541 * be the case for x86, ppc and s390). As a result we store one copy 1542 * of registers which is used for all active cores. 1543 * 1544 * Write access is serialised by virtue of the GDB protocol which 1545 * updates things. Read access (i.e. when the values are copied to the 1546 * vCPU) is also gated by GDB's run control. 1547 * 1548 * This is not unreasonable as most of the time debugging kernels you 1549 * never know which core will eventually execute your function. 1550 */ 1551 1552 typedef struct { 1553 uint64_t bcr; 1554 uint64_t bvr; 1555 } HWBreakpoint; 1556 1557 /* 1558 * The watchpoint registers can cover more area than the requested 1559 * watchpoint so we need to store the additional information 1560 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1561 * when the watchpoint is hit. 1562 */ 1563 typedef struct { 1564 uint64_t wcr; 1565 uint64_t wvr; 1566 CPUWatchpoint details; 1567 } HWWatchpoint; 1568 1569 /* Maximum and current break/watch point counts */ 1570 extern int max_hw_bps, max_hw_wps; 1571 extern GArray *hw_breakpoints, *hw_watchpoints; 1572 1573 #define cur_hw_wps (hw_watchpoints->len) 1574 #define cur_hw_bps (hw_breakpoints->len) 1575 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1576 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1577 1578 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1579 int insert_hw_breakpoint(target_ulong pc); 1580 int delete_hw_breakpoint(target_ulong pc); 1581 1582 bool check_watchpoint_in_range(int i, target_ulong addr); 1583 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1584 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1585 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1586 #endif 1587