1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/breakpoint.h" 29 #include "hw/registerfields.h" 30 #include "tcg/tcg-gvec-desc.h" 31 #include "syndrome.h" 32 #include "cpu-features.h" 33 34 /* register banks for CPU modes */ 35 #define BANK_USRSYS 0 36 #define BANK_SVC 1 37 #define BANK_ABT 2 38 #define BANK_UND 3 39 #define BANK_IRQ 4 40 #define BANK_FIQ 5 41 #define BANK_HYP 6 42 #define BANK_MON 7 43 44 static inline int arm_env_mmu_index(CPUARMState *env) 45 { 46 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 47 } 48 49 static inline bool excp_is_internal(int excp) 50 { 51 /* Return true if this exception number represents a QEMU-internal 52 * exception that will not be passed to the guest. 53 */ 54 return excp == EXCP_INTERRUPT 55 || excp == EXCP_HLT 56 || excp == EXCP_DEBUG 57 || excp == EXCP_HALTED 58 || excp == EXCP_EXCEPTION_EXIT 59 || excp == EXCP_KERNEL_TRAP 60 || excp == EXCP_SEMIHOST; 61 } 62 63 /* 64 * Default frequency for the generic timer, in Hz. 65 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 66 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 67 * which gives a 16ns tick period. 68 * 69 * We will use the back-compat value: 70 * - for QEMU CPU types added before we standardized on 1GHz 71 * - for versioned machine types with a version of 9.0 or earlier 72 * In any case, the machine model may override via the cntfrq property. 73 */ 74 #define GTIMER_DEFAULT_HZ 1000000000 75 #define GTIMER_BACKCOMPAT_HZ 62500000 76 77 /* Bit definitions for the v7M CONTROL register */ 78 FIELD(V7M_CONTROL, NPRIV, 0, 1) 79 FIELD(V7M_CONTROL, SPSEL, 1, 1) 80 FIELD(V7M_CONTROL, FPCA, 2, 1) 81 FIELD(V7M_CONTROL, SFPA, 3, 1) 82 83 /* Bit definitions for v7M exception return payload */ 84 FIELD(V7M_EXCRET, ES, 0, 1) 85 FIELD(V7M_EXCRET, RES0, 1, 1) 86 FIELD(V7M_EXCRET, SPSEL, 2, 1) 87 FIELD(V7M_EXCRET, MODE, 3, 1) 88 FIELD(V7M_EXCRET, FTYPE, 4, 1) 89 FIELD(V7M_EXCRET, DCRS, 5, 1) 90 FIELD(V7M_EXCRET, S, 6, 1) 91 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 92 93 /* Minimum value which is a magic number for exception return */ 94 #define EXC_RETURN_MIN_MAGIC 0xff000000 95 /* Minimum number which is a magic number for function or exception return 96 * when using v8M security extension 97 */ 98 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 99 100 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 101 FIELD(DBGWCR, E, 0, 1) 102 FIELD(DBGWCR, PAC, 1, 2) 103 FIELD(DBGWCR, LSC, 3, 2) 104 FIELD(DBGWCR, BAS, 5, 8) 105 FIELD(DBGWCR, HMC, 13, 1) 106 FIELD(DBGWCR, SSC, 14, 2) 107 FIELD(DBGWCR, LBN, 16, 4) 108 FIELD(DBGWCR, WT, 20, 1) 109 FIELD(DBGWCR, MASK, 24, 5) 110 FIELD(DBGWCR, SSCE, 29, 1) 111 112 #define VTCR_NSW (1u << 29) 113 #define VTCR_NSA (1u << 30) 114 #define VSTCR_SW VTCR_NSW 115 #define VSTCR_SA VTCR_NSA 116 117 /* Bit definitions for CPACR (AArch32 only) */ 118 FIELD(CPACR, CP10, 20, 2) 119 FIELD(CPACR, CP11, 22, 2) 120 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 121 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 122 FIELD(CPACR, ASEDIS, 31, 1) 123 124 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 125 FIELD(CPACR_EL1, ZEN, 16, 2) 126 FIELD(CPACR_EL1, FPEN, 20, 2) 127 FIELD(CPACR_EL1, SMEN, 24, 2) 128 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 129 130 /* Bit definitions for HCPTR (AArch32 only) */ 131 FIELD(HCPTR, TCP10, 10, 1) 132 FIELD(HCPTR, TCP11, 11, 1) 133 FIELD(HCPTR, TASE, 15, 1) 134 FIELD(HCPTR, TTA, 20, 1) 135 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 136 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 137 138 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 139 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 140 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 141 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 142 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 143 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 144 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 145 FIELD(CPTR_EL2, TTA, 28, 1) 146 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 147 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 148 149 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 150 FIELD(CPTR_EL3, EZ, 8, 1) 151 FIELD(CPTR_EL3, TFP, 10, 1) 152 FIELD(CPTR_EL3, ESM, 12, 1) 153 FIELD(CPTR_EL3, TTA, 20, 1) 154 FIELD(CPTR_EL3, TAM, 30, 1) 155 FIELD(CPTR_EL3, TCPAC, 31, 1) 156 157 #define MDCR_MTPME (1U << 28) 158 #define MDCR_TDCC (1U << 27) 159 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 160 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 161 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 162 #define MDCR_EPMAD (1U << 21) 163 #define MDCR_EDAD (1U << 20) 164 #define MDCR_TTRF (1U << 19) 165 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 166 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 167 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 168 #define MDCR_SDD (1U << 16) 169 #define MDCR_SPD (3U << 14) 170 #define MDCR_TDRA (1U << 11) 171 #define MDCR_TDOSA (1U << 10) 172 #define MDCR_TDA (1U << 9) 173 #define MDCR_TDE (1U << 8) 174 #define MDCR_HPME (1U << 7) 175 #define MDCR_TPM (1U << 6) 176 #define MDCR_TPMCR (1U << 5) 177 #define MDCR_HPMN (0x1fU) 178 179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 181 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 182 MDCR_STE | MDCR_SPME | MDCR_SPD) 183 184 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 185 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 186 #define TTBCR_PD0 (1U << 4) 187 #define TTBCR_PD1 (1U << 5) 188 #define TTBCR_EPD0 (1U << 7) 189 #define TTBCR_IRGN0 (3U << 8) 190 #define TTBCR_ORGN0 (3U << 10) 191 #define TTBCR_SH0 (3U << 12) 192 #define TTBCR_T1SZ (3U << 16) 193 #define TTBCR_A1 (1U << 22) 194 #define TTBCR_EPD1 (1U << 23) 195 #define TTBCR_IRGN1 (3U << 24) 196 #define TTBCR_ORGN1 (3U << 26) 197 #define TTBCR_SH1 (1U << 28) 198 #define TTBCR_EAE (1U << 31) 199 200 FIELD(VTCR, T0SZ, 0, 6) 201 FIELD(VTCR, SL0, 6, 2) 202 FIELD(VTCR, IRGN0, 8, 2) 203 FIELD(VTCR, ORGN0, 10, 2) 204 FIELD(VTCR, SH0, 12, 2) 205 FIELD(VTCR, TG0, 14, 2) 206 FIELD(VTCR, PS, 16, 3) 207 FIELD(VTCR, VS, 19, 1) 208 FIELD(VTCR, HA, 21, 1) 209 FIELD(VTCR, HD, 22, 1) 210 FIELD(VTCR, HWU59, 25, 1) 211 FIELD(VTCR, HWU60, 26, 1) 212 FIELD(VTCR, HWU61, 27, 1) 213 FIELD(VTCR, HWU62, 28, 1) 214 FIELD(VTCR, NSW, 29, 1) 215 FIELD(VTCR, NSA, 30, 1) 216 FIELD(VTCR, DS, 32, 1) 217 FIELD(VTCR, SL2, 33, 1) 218 219 #define HCRX_ENAS0 (1ULL << 0) 220 #define HCRX_ENALS (1ULL << 1) 221 #define HCRX_ENASR (1ULL << 2) 222 #define HCRX_FNXS (1ULL << 3) 223 #define HCRX_FGTNXS (1ULL << 4) 224 #define HCRX_SMPME (1ULL << 5) 225 #define HCRX_TALLINT (1ULL << 6) 226 #define HCRX_VINMI (1ULL << 7) 227 #define HCRX_VFNMI (1ULL << 8) 228 #define HCRX_CMOW (1ULL << 9) 229 #define HCRX_MCE2 (1ULL << 10) 230 #define HCRX_MSCEN (1ULL << 11) 231 232 #define HPFAR_NS (1ULL << 63) 233 234 #define HSTR_TTEE (1 << 16) 235 #define HSTR_TJDBX (1 << 17) 236 237 /* 238 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 239 * have different bit definitions, and EL1PCTEN might be 240 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 241 * disambiguate if necessary. 242 */ 243 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 244 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 245 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 246 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 247 FIELD(CNTHCTL, EVNTEN, 2, 1) 248 FIELD(CNTHCTL, EVNTDIR, 3, 1) 249 FIELD(CNTHCTL, EVNTI, 4, 4) 250 FIELD(CNTHCTL, EL0VTEN, 8, 1) 251 FIELD(CNTHCTL, EL0PTEN, 9, 1) 252 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 253 FIELD(CNTHCTL, EL1PTEN, 11, 1) 254 FIELD(CNTHCTL, ECV, 12, 1) 255 FIELD(CNTHCTL, EL1TVT, 13, 1) 256 FIELD(CNTHCTL, EL1TVCT, 14, 1) 257 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 258 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 259 FIELD(CNTHCTL, EVNTIS, 17, 1) 260 FIELD(CNTHCTL, CNTVMASK, 18, 1) 261 FIELD(CNTHCTL, CNTPMASK, 19, 1) 262 263 /* We use a few fake FSR values for internal purposes in M profile. 264 * M profile cores don't have A/R format FSRs, but currently our 265 * get_phys_addr() code assumes A/R profile and reports failures via 266 * an A/R format FSR value. We then translate that into the proper 267 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 268 * Mostly the FSR values we use for this are those defined for v7PMSA, 269 * since we share some of that codepath. A few kinds of fault are 270 * only for M profile and have no A/R equivalent, though, so we have 271 * to pick a value from the reserved range (which we never otherwise 272 * generate) to use for these. 273 * These values will never be visible to the guest. 274 */ 275 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 276 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 277 278 /** 279 * raise_exception: Raise the specified exception. 280 * Raise a guest exception with the specified value, syndrome register 281 * and target exception level. This should be called from helper functions, 282 * and never returns because we will longjump back up to the CPU main loop. 283 */ 284 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 285 uint32_t syndrome, uint32_t target_el); 286 287 /* 288 * Similarly, but also use unwinding to restore cpu state. 289 */ 290 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 291 uint32_t syndrome, uint32_t target_el, 292 uintptr_t ra); 293 294 /* 295 * For AArch64, map a given EL to an index in the banked_spsr array. 296 * Note that this mapping and the AArch32 mapping defined in bank_number() 297 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 298 * mandated mapping between each other. 299 */ 300 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 301 { 302 static const unsigned int map[4] = { 303 [1] = BANK_SVC, /* EL1. */ 304 [2] = BANK_HYP, /* EL2. */ 305 [3] = BANK_MON, /* EL3. */ 306 }; 307 assert(el >= 1 && el <= 3); 308 return map[el]; 309 } 310 311 /* Map CPU modes onto saved register banks. */ 312 static inline int bank_number(int mode) 313 { 314 switch (mode) { 315 case ARM_CPU_MODE_USR: 316 case ARM_CPU_MODE_SYS: 317 return BANK_USRSYS; 318 case ARM_CPU_MODE_SVC: 319 return BANK_SVC; 320 case ARM_CPU_MODE_ABT: 321 return BANK_ABT; 322 case ARM_CPU_MODE_UND: 323 return BANK_UND; 324 case ARM_CPU_MODE_IRQ: 325 return BANK_IRQ; 326 case ARM_CPU_MODE_FIQ: 327 return BANK_FIQ; 328 case ARM_CPU_MODE_HYP: 329 return BANK_HYP; 330 case ARM_CPU_MODE_MON: 331 return BANK_MON; 332 } 333 g_assert_not_reached(); 334 } 335 336 /** 337 * r14_bank_number: Map CPU mode onto register bank for r14 338 * 339 * Given an AArch32 CPU mode, return the index into the saved register 340 * banks to use for the R14 (LR) in that mode. This is the same as 341 * bank_number(), except for the special case of Hyp mode, where 342 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 343 * This should be used as the index into env->banked_r14[], and 344 * bank_number() used for the index into env->banked_r13[] and 345 * env->banked_spsr[]. 346 */ 347 static inline int r14_bank_number(int mode) 348 { 349 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 350 } 351 352 void arm_cpu_register(const ARMCPUInfo *info); 353 void aarch64_cpu_register(const ARMCPUInfo *info); 354 355 void register_cp_regs_for_features(ARMCPU *cpu); 356 void init_cpreg_list(ARMCPU *cpu); 357 358 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 359 void arm_translate_init(void); 360 361 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 362 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 363 GPtrArray *, GPtrArray *); 364 365 void arm_restore_state_to_opc(CPUState *cs, 366 const TranslationBlock *tb, 367 const uint64_t *data); 368 369 #ifdef CONFIG_TCG 370 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 371 372 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 373 bool arm_cpu_exec_halt(CPUState *cs); 374 #endif /* CONFIG_TCG */ 375 376 typedef enum ARMFPRounding { 377 FPROUNDING_TIEEVEN, 378 FPROUNDING_POSINF, 379 FPROUNDING_NEGINF, 380 FPROUNDING_ZERO, 381 FPROUNDING_TIEAWAY, 382 FPROUNDING_ODD 383 } ARMFPRounding; 384 385 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 386 387 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 388 { 389 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 390 return arm_rmode_to_sf_map[rmode]; 391 } 392 393 static inline void aarch64_save_sp(CPUARMState *env, int el) 394 { 395 if (env->pstate & PSTATE_SP) { 396 env->sp_el[el] = env->xregs[31]; 397 } else { 398 env->sp_el[0] = env->xregs[31]; 399 } 400 } 401 402 static inline void aarch64_restore_sp(CPUARMState *env, int el) 403 { 404 if (env->pstate & PSTATE_SP) { 405 env->xregs[31] = env->sp_el[el]; 406 } else { 407 env->xregs[31] = env->sp_el[0]; 408 } 409 } 410 411 static inline void update_spsel(CPUARMState *env, uint32_t imm) 412 { 413 unsigned int cur_el = arm_current_el(env); 414 /* Update PSTATE SPSel bit; this requires us to update the 415 * working stack pointer in xregs[31]. 416 */ 417 if (!((imm ^ env->pstate) & PSTATE_SP)) { 418 return; 419 } 420 aarch64_save_sp(env, cur_el); 421 env->pstate = deposit32(env->pstate, 0, 1, imm); 422 423 /* We rely on illegal updates to SPsel from EL0 to get trapped 424 * at translation time. 425 */ 426 assert(cur_el >= 1 && cur_el <= 3); 427 aarch64_restore_sp(env, cur_el); 428 } 429 430 /* 431 * arm_pamax 432 * @cpu: ARMCPU 433 * 434 * Returns the implementation defined bit-width of physical addresses. 435 * The ARMv8 reference manuals refer to this as PAMax(). 436 */ 437 unsigned int arm_pamax(ARMCPU *cpu); 438 439 /* Return true if extended addresses are enabled. 440 * This is always the case if our translation regime is 64 bit, 441 * but depends on TTBCR.EAE for 32 bit. 442 */ 443 static inline bool extended_addresses_enabled(CPUARMState *env) 444 { 445 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 446 if (arm_feature(env, ARM_FEATURE_PMSA) && 447 arm_feature(env, ARM_FEATURE_V8)) { 448 return true; 449 } 450 return arm_el_is_aa64(env, 1) || 451 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 452 } 453 454 /* Update a QEMU watchpoint based on the information the guest has set in the 455 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 456 */ 457 void hw_watchpoint_update(ARMCPU *cpu, int n); 458 /* Update the QEMU watchpoints for every guest watchpoint. This does a 459 * complete delete-and-reinstate of the QEMU watchpoint list and so is 460 * suitable for use after migration or on reset. 461 */ 462 void hw_watchpoint_update_all(ARMCPU *cpu); 463 /* Update a QEMU breakpoint based on the information the guest has set in the 464 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 465 */ 466 void hw_breakpoint_update(ARMCPU *cpu, int n); 467 /* Update the QEMU breakpoints for every guest breakpoint. This does a 468 * complete delete-and-reinstate of the QEMU breakpoint list and so is 469 * suitable for use after migration or on reset. 470 */ 471 void hw_breakpoint_update_all(ARMCPU *cpu); 472 473 /* Callback function for checking if a breakpoint should trigger. */ 474 bool arm_debug_check_breakpoint(CPUState *cs); 475 476 /* Callback function for checking if a watchpoint should trigger. */ 477 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 478 479 /* Adjust addresses (in BE32 mode) before testing against watchpoint 480 * addresses. 481 */ 482 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 483 484 /* Callback function for when a watchpoint or breakpoint triggers. */ 485 void arm_debug_excp_handler(CPUState *cs); 486 487 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 488 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 489 { 490 return false; 491 } 492 static inline void arm_handle_psci_call(ARMCPU *cpu) 493 { 494 g_assert_not_reached(); 495 } 496 #else 497 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 498 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 499 /* Actually handle a PSCI call */ 500 void arm_handle_psci_call(ARMCPU *cpu); 501 #endif 502 503 /** 504 * arm_clear_exclusive: clear the exclusive monitor 505 * @env: CPU env 506 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 507 */ 508 static inline void arm_clear_exclusive(CPUARMState *env) 509 { 510 env->exclusive_addr = -1; 511 } 512 513 /** 514 * ARMFaultType: type of an ARM MMU fault 515 * This corresponds to the v8A pseudocode's Fault enumeration, 516 * with extensions for QEMU internal conditions. 517 */ 518 typedef enum ARMFaultType { 519 ARMFault_None, 520 ARMFault_AccessFlag, 521 ARMFault_Alignment, 522 ARMFault_Background, 523 ARMFault_Domain, 524 ARMFault_Permission, 525 ARMFault_Translation, 526 ARMFault_AddressSize, 527 ARMFault_SyncExternal, 528 ARMFault_SyncExternalOnWalk, 529 ARMFault_SyncParity, 530 ARMFault_SyncParityOnWalk, 531 ARMFault_AsyncParity, 532 ARMFault_AsyncExternal, 533 ARMFault_Debug, 534 ARMFault_TLBConflict, 535 ARMFault_UnsuppAtomicUpdate, 536 ARMFault_Lockdown, 537 ARMFault_Exclusive, 538 ARMFault_ICacheMaint, 539 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 540 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 541 ARMFault_GPCFOnWalk, 542 ARMFault_GPCFOnOutput, 543 } ARMFaultType; 544 545 typedef enum ARMGPCF { 546 GPCF_None, 547 GPCF_AddressSize, 548 GPCF_Walk, 549 GPCF_EABT, 550 GPCF_Fail, 551 } ARMGPCF; 552 553 /** 554 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 555 * @type: Type of fault 556 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 557 * @level: Table walk level (for translation, access flag and permission faults) 558 * @domain: Domain of the fault address (for non-LPAE CPUs only) 559 * @s2addr: Address that caused a fault at stage 2 560 * @paddr: physical address that caused a fault for gpc 561 * @paddr_space: physical address space that caused a fault for gpc 562 * @stage2: True if we faulted at stage 2 563 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 564 * @s1ns: True if we faulted on a non-secure IPA while in secure state 565 * @ea: True if we should set the EA (external abort type) bit in syndrome 566 */ 567 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 568 struct ARMMMUFaultInfo { 569 ARMFaultType type; 570 ARMGPCF gpcf; 571 target_ulong s2addr; 572 target_ulong paddr; 573 ARMSecuritySpace paddr_space; 574 int level; 575 int domain; 576 bool stage2; 577 bool s1ptw; 578 bool s1ns; 579 bool ea; 580 }; 581 582 /** 583 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 584 * Compare pseudocode EncodeSDFSC(), though unlike that function 585 * we set up a whole FSR-format code including domain field and 586 * putting the high bit of the FSC into bit 10. 587 */ 588 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 589 { 590 uint32_t fsc; 591 592 switch (fi->type) { 593 case ARMFault_None: 594 return 0; 595 case ARMFault_AccessFlag: 596 fsc = fi->level == 1 ? 0x3 : 0x6; 597 break; 598 case ARMFault_Alignment: 599 fsc = 0x1; 600 break; 601 case ARMFault_Permission: 602 fsc = fi->level == 1 ? 0xd : 0xf; 603 break; 604 case ARMFault_Domain: 605 fsc = fi->level == 1 ? 0x9 : 0xb; 606 break; 607 case ARMFault_Translation: 608 fsc = fi->level == 1 ? 0x5 : 0x7; 609 break; 610 case ARMFault_SyncExternal: 611 fsc = 0x8 | (fi->ea << 12); 612 break; 613 case ARMFault_SyncExternalOnWalk: 614 fsc = fi->level == 1 ? 0xc : 0xe; 615 fsc |= (fi->ea << 12); 616 break; 617 case ARMFault_SyncParity: 618 fsc = 0x409; 619 break; 620 case ARMFault_SyncParityOnWalk: 621 fsc = fi->level == 1 ? 0x40c : 0x40e; 622 break; 623 case ARMFault_AsyncParity: 624 fsc = 0x408; 625 break; 626 case ARMFault_AsyncExternal: 627 fsc = 0x406 | (fi->ea << 12); 628 break; 629 case ARMFault_Debug: 630 fsc = 0x2; 631 break; 632 case ARMFault_TLBConflict: 633 fsc = 0x400; 634 break; 635 case ARMFault_Lockdown: 636 fsc = 0x404; 637 break; 638 case ARMFault_Exclusive: 639 fsc = 0x405; 640 break; 641 case ARMFault_ICacheMaint: 642 fsc = 0x4; 643 break; 644 case ARMFault_Background: 645 fsc = 0x0; 646 break; 647 case ARMFault_QEMU_NSCExec: 648 fsc = M_FAKE_FSR_NSC_EXEC; 649 break; 650 case ARMFault_QEMU_SFault: 651 fsc = M_FAKE_FSR_SFAULT; 652 break; 653 default: 654 /* Other faults can't occur in a context that requires a 655 * short-format status code. 656 */ 657 g_assert_not_reached(); 658 } 659 660 fsc |= (fi->domain << 4); 661 return fsc; 662 } 663 664 /** 665 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 666 * Compare pseudocode EncodeLDFSC(), though unlike that function 667 * we fill in also the LPAE bit 9 of a DFSR format. 668 */ 669 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 670 { 671 uint32_t fsc; 672 673 switch (fi->type) { 674 case ARMFault_None: 675 return 0; 676 case ARMFault_AddressSize: 677 assert(fi->level >= -1 && fi->level <= 3); 678 if (fi->level < 0) { 679 fsc = 0b101001; 680 } else { 681 fsc = fi->level; 682 } 683 break; 684 case ARMFault_AccessFlag: 685 assert(fi->level >= 0 && fi->level <= 3); 686 fsc = 0b001000 | fi->level; 687 break; 688 case ARMFault_Permission: 689 assert(fi->level >= 0 && fi->level <= 3); 690 fsc = 0b001100 | fi->level; 691 break; 692 case ARMFault_Translation: 693 assert(fi->level >= -1 && fi->level <= 3); 694 if (fi->level < 0) { 695 fsc = 0b101011; 696 } else { 697 fsc = 0b000100 | fi->level; 698 } 699 break; 700 case ARMFault_SyncExternal: 701 fsc = 0x10 | (fi->ea << 12); 702 break; 703 case ARMFault_SyncExternalOnWalk: 704 assert(fi->level >= -1 && fi->level <= 3); 705 if (fi->level < 0) { 706 fsc = 0b010011; 707 } else { 708 fsc = 0b010100 | fi->level; 709 } 710 fsc |= fi->ea << 12; 711 break; 712 case ARMFault_SyncParity: 713 fsc = 0x18; 714 break; 715 case ARMFault_SyncParityOnWalk: 716 assert(fi->level >= -1 && fi->level <= 3); 717 if (fi->level < 0) { 718 fsc = 0b011011; 719 } else { 720 fsc = 0b011100 | fi->level; 721 } 722 break; 723 case ARMFault_AsyncParity: 724 fsc = 0x19; 725 break; 726 case ARMFault_AsyncExternal: 727 fsc = 0x11 | (fi->ea << 12); 728 break; 729 case ARMFault_Alignment: 730 fsc = 0x21; 731 break; 732 case ARMFault_Debug: 733 fsc = 0x22; 734 break; 735 case ARMFault_TLBConflict: 736 fsc = 0x30; 737 break; 738 case ARMFault_UnsuppAtomicUpdate: 739 fsc = 0x31; 740 break; 741 case ARMFault_Lockdown: 742 fsc = 0x34; 743 break; 744 case ARMFault_Exclusive: 745 fsc = 0x35; 746 break; 747 case ARMFault_GPCFOnWalk: 748 assert(fi->level >= -1 && fi->level <= 3); 749 if (fi->level < 0) { 750 fsc = 0b100011; 751 } else { 752 fsc = 0b100100 | fi->level; 753 } 754 break; 755 case ARMFault_GPCFOnOutput: 756 fsc = 0b101000; 757 break; 758 default: 759 /* Other faults can't occur in a context that requires a 760 * long-format status code. 761 */ 762 g_assert_not_reached(); 763 } 764 765 fsc |= 1 << 9; 766 return fsc; 767 } 768 769 static inline bool arm_extabort_type(MemTxResult result) 770 { 771 /* The EA bit in syndromes and fault status registers is an 772 * IMPDEF classification of external aborts. ARM implementations 773 * usually use this to indicate AXI bus Decode error (0) or 774 * Slave error (1); in QEMU we follow that. 775 */ 776 return result != MEMTX_DECODE_ERROR; 777 } 778 779 #ifdef CONFIG_USER_ONLY 780 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 781 MMUAccessType access_type, 782 bool maperr, uintptr_t ra); 783 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 784 MMUAccessType access_type, uintptr_t ra); 785 #else 786 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 787 MMUAccessType access_type, int mmu_idx, 788 bool probe, uintptr_t retaddr); 789 #endif 790 791 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 792 { 793 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 794 } 795 796 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 797 { 798 if (arm_feature(env, ARM_FEATURE_M)) { 799 return mmu_idx | ARM_MMU_IDX_M; 800 } else { 801 return mmu_idx | ARM_MMU_IDX_A; 802 } 803 } 804 805 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 806 { 807 /* AArch64 is always a-profile. */ 808 return mmu_idx | ARM_MMU_IDX_A; 809 } 810 811 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 812 813 /* Return the MMU index for a v7M CPU in the specified security state */ 814 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 815 816 /* 817 * Return true if the stage 1 translation regime is using LPAE 818 * format page tables 819 */ 820 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 821 822 /* Raise a data fault alignment exception for the specified virtual address */ 823 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 824 MMUAccessType access_type, 825 int mmu_idx, uintptr_t retaddr); 826 827 #ifndef CONFIG_USER_ONLY 828 /* arm_cpu_do_transaction_failed: handle a memory system error response 829 * (eg "no device/memory present at address") by raising an external abort 830 * exception 831 */ 832 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 833 vaddr addr, unsigned size, 834 MMUAccessType access_type, 835 int mmu_idx, MemTxAttrs attrs, 836 MemTxResult response, uintptr_t retaddr); 837 #endif 838 839 /* Call any registered EL change hooks */ 840 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 841 { 842 ARMELChangeHook *hook, *next; 843 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 844 hook->hook(cpu, hook->opaque); 845 } 846 } 847 static inline void arm_call_el_change_hook(ARMCPU *cpu) 848 { 849 ARMELChangeHook *hook, *next; 850 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 851 hook->hook(cpu, hook->opaque); 852 } 853 } 854 855 /* Return true if this address translation regime has two ranges. */ 856 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 857 { 858 switch (mmu_idx) { 859 case ARMMMUIdx_Stage1_E0: 860 case ARMMMUIdx_Stage1_E1: 861 case ARMMMUIdx_Stage1_E1_PAN: 862 case ARMMMUIdx_E10_0: 863 case ARMMMUIdx_E10_1: 864 case ARMMMUIdx_E10_1_PAN: 865 case ARMMMUIdx_E20_0: 866 case ARMMMUIdx_E20_2: 867 case ARMMMUIdx_E20_2_PAN: 868 return true; 869 default: 870 return false; 871 } 872 } 873 874 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 875 { 876 switch (mmu_idx) { 877 case ARMMMUIdx_Stage1_E1_PAN: 878 case ARMMMUIdx_E10_1_PAN: 879 case ARMMMUIdx_E20_2_PAN: 880 return true; 881 default: 882 return false; 883 } 884 } 885 886 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 887 { 888 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 889 } 890 891 /* Return the exception level which controls this address translation regime */ 892 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 893 { 894 switch (mmu_idx) { 895 case ARMMMUIdx_E20_0: 896 case ARMMMUIdx_E20_2: 897 case ARMMMUIdx_E20_2_PAN: 898 case ARMMMUIdx_Stage2: 899 case ARMMMUIdx_Stage2_S: 900 case ARMMMUIdx_E2: 901 return 2; 902 case ARMMMUIdx_E3: 903 return 3; 904 case ARMMMUIdx_E10_0: 905 case ARMMMUIdx_Stage1_E0: 906 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; 907 case ARMMMUIdx_Stage1_E1: 908 case ARMMMUIdx_Stage1_E1_PAN: 909 case ARMMMUIdx_E10_1: 910 case ARMMMUIdx_E10_1_PAN: 911 case ARMMMUIdx_MPrivNegPri: 912 case ARMMMUIdx_MUserNegPri: 913 case ARMMMUIdx_MPriv: 914 case ARMMMUIdx_MUser: 915 case ARMMMUIdx_MSPrivNegPri: 916 case ARMMMUIdx_MSUserNegPri: 917 case ARMMMUIdx_MSPriv: 918 case ARMMMUIdx_MSUser: 919 return 1; 920 default: 921 g_assert_not_reached(); 922 } 923 } 924 925 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 926 { 927 switch (mmu_idx) { 928 case ARMMMUIdx_E10_0: 929 case ARMMMUIdx_E20_0: 930 case ARMMMUIdx_Stage1_E0: 931 case ARMMMUIdx_MUser: 932 case ARMMMUIdx_MSUser: 933 case ARMMMUIdx_MUserNegPri: 934 case ARMMMUIdx_MSUserNegPri: 935 return true; 936 default: 937 return false; 938 } 939 } 940 941 /* Return the SCTLR value which controls this address translation regime */ 942 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 943 { 944 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 945 } 946 947 /* 948 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 949 * and the Non-Secure stage 2 translation regimes (and hence which are 950 * not present in VSTCR_EL2). 951 */ 952 #define VTCR_SHARED_FIELD_MASK \ 953 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 954 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 955 R_VTCR_DS_MASK) 956 957 /* Return the value of the TCR controlling this translation regime */ 958 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 959 { 960 if (mmu_idx == ARMMMUIdx_Stage2) { 961 return env->cp15.vtcr_el2; 962 } 963 if (mmu_idx == ARMMMUIdx_Stage2_S) { 964 /* 965 * Secure stage 2 shares fields from VTCR_EL2. We merge those 966 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 967 * value so the callers don't need to special case this. 968 * 969 * If a future architecture change defines bits in VSTCR_EL2 that 970 * overlap with these VTCR_EL2 fields we may need to revisit this. 971 */ 972 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 973 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 974 return v; 975 } 976 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 977 } 978 979 /* Return true if the translation regime is using LPAE format page tables */ 980 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 981 { 982 int el = regime_el(env, mmu_idx); 983 if (el == 2 || arm_el_is_aa64(env, el)) { 984 return true; 985 } 986 if (arm_feature(env, ARM_FEATURE_PMSA) && 987 arm_feature(env, ARM_FEATURE_V8)) { 988 return true; 989 } 990 if (arm_feature(env, ARM_FEATURE_LPAE) 991 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 992 return true; 993 } 994 return false; 995 } 996 997 /** 998 * arm_num_brps: Return number of implemented breakpoints. 999 * Note that the ID register BRPS field is "number of bps - 1", 1000 * and we return the actual number of breakpoints. 1001 */ 1002 static inline int arm_num_brps(ARMCPU *cpu) 1003 { 1004 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1005 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 1006 } else { 1007 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1008 } 1009 } 1010 1011 /** 1012 * arm_num_wrps: Return number of implemented watchpoints. 1013 * Note that the ID register WRPS field is "number of wps - 1", 1014 * and we return the actual number of watchpoints. 1015 */ 1016 static inline int arm_num_wrps(ARMCPU *cpu) 1017 { 1018 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1019 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1020 } else { 1021 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1022 } 1023 } 1024 1025 /** 1026 * arm_num_ctx_cmps: Return number of implemented context comparators. 1027 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1028 * and we return the actual number of comparators. 1029 */ 1030 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1031 { 1032 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1033 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1034 } else { 1035 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1036 } 1037 } 1038 1039 /** 1040 * v7m_using_psp: Return true if using process stack pointer 1041 * Return true if the CPU is currently using the process stack 1042 * pointer, or false if it is using the main stack pointer. 1043 */ 1044 static inline bool v7m_using_psp(CPUARMState *env) 1045 { 1046 /* Handler mode always uses the main stack; for thread mode 1047 * the CONTROL.SPSEL bit determines the answer. 1048 * Note that in v7M it is not possible to be in Handler mode with 1049 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1050 */ 1051 return !arm_v7m_is_handler_mode(env) && 1052 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1053 } 1054 1055 /** 1056 * v7m_sp_limit: Return SP limit for current CPU state 1057 * Return the SP limit value for the current CPU security state 1058 * and stack pointer. 1059 */ 1060 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1061 { 1062 if (v7m_using_psp(env)) { 1063 return env->v7m.psplim[env->v7m.secure]; 1064 } else { 1065 return env->v7m.msplim[env->v7m.secure]; 1066 } 1067 } 1068 1069 /** 1070 * v7m_cpacr_pass: 1071 * Return true if the v7M CPACR permits access to the FPU for the specified 1072 * security state and privilege level. 1073 */ 1074 static inline bool v7m_cpacr_pass(CPUARMState *env, 1075 bool is_secure, bool is_priv) 1076 { 1077 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1078 case 0: 1079 case 2: /* UNPREDICTABLE: we treat like 0 */ 1080 return false; 1081 case 1: 1082 return is_priv; 1083 case 3: 1084 return true; 1085 default: 1086 g_assert_not_reached(); 1087 } 1088 } 1089 1090 /** 1091 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1092 * @psr: Program Status Register indicating CPU mode 1093 * 1094 * Returns, for debug logging purposes, a printable representation 1095 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1096 * the low bits of the specified PSR. 1097 */ 1098 static inline const char *aarch32_mode_name(uint32_t psr) 1099 { 1100 static const char cpu_mode_names[16][4] = { 1101 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1102 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1103 }; 1104 1105 return cpu_mode_names[psr & 0xf]; 1106 } 1107 1108 /** 1109 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1110 * 1111 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1112 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1113 * Must be called with the BQL held. 1114 */ 1115 void arm_cpu_update_virq(ARMCPU *cpu); 1116 1117 /** 1118 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1119 * 1120 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1121 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1122 * Must be called with the BQL held. 1123 */ 1124 void arm_cpu_update_vfiq(ARMCPU *cpu); 1125 1126 /** 1127 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1128 * 1129 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1130 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1131 * Must be called with the BQL held. 1132 */ 1133 void arm_cpu_update_vinmi(ARMCPU *cpu); 1134 1135 /** 1136 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1137 * 1138 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1139 * a change to the HCRX_EL2.VFNMI. 1140 * Must be called with the BQL held. 1141 */ 1142 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1143 1144 /** 1145 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1146 * 1147 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1148 * following a change to the HCR_EL2.VSE bit. 1149 */ 1150 void arm_cpu_update_vserr(ARMCPU *cpu); 1151 1152 /** 1153 * arm_mmu_idx_el: 1154 * @env: The cpu environment 1155 * @el: The EL to use. 1156 * 1157 * Return the full ARMMMUIdx for the translation regime for EL. 1158 */ 1159 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1160 1161 /** 1162 * arm_mmu_idx: 1163 * @env: The cpu environment 1164 * 1165 * Return the full ARMMMUIdx for the current translation regime. 1166 */ 1167 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1168 1169 /** 1170 * arm_stage1_mmu_idx: 1171 * @env: The cpu environment 1172 * 1173 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1174 */ 1175 #ifdef CONFIG_USER_ONLY 1176 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1177 { 1178 return ARMMMUIdx_Stage1_E0; 1179 } 1180 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1181 { 1182 return ARMMMUIdx_Stage1_E0; 1183 } 1184 #else 1185 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1186 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1187 #endif 1188 1189 /** 1190 * arm_mmu_idx_is_stage1_of_2: 1191 * @mmu_idx: The ARMMMUIdx to test 1192 * 1193 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1194 * first stage of a two stage regime. 1195 */ 1196 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1197 { 1198 switch (mmu_idx) { 1199 case ARMMMUIdx_Stage1_E0: 1200 case ARMMMUIdx_Stage1_E1: 1201 case ARMMMUIdx_Stage1_E1_PAN: 1202 return true; 1203 default: 1204 return false; 1205 } 1206 } 1207 1208 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1209 const ARMISARegisters *id) 1210 { 1211 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1212 1213 if ((features >> ARM_FEATURE_V4T) & 1) { 1214 valid |= CPSR_T; 1215 } 1216 if ((features >> ARM_FEATURE_V5) & 1) { 1217 valid |= CPSR_Q; /* V5TE in reality*/ 1218 } 1219 if ((features >> ARM_FEATURE_V6) & 1) { 1220 valid |= CPSR_E | CPSR_GE; 1221 } 1222 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1223 valid |= CPSR_IT; 1224 } 1225 if (isar_feature_aa32_jazelle(id)) { 1226 valid |= CPSR_J; 1227 } 1228 if (isar_feature_aa32_pan(id)) { 1229 valid |= CPSR_PAN; 1230 } 1231 if (isar_feature_aa32_dit(id)) { 1232 valid |= CPSR_DIT; 1233 } 1234 if (isar_feature_aa32_ssbs(id)) { 1235 valid |= CPSR_SSBS; 1236 } 1237 1238 return valid; 1239 } 1240 1241 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1242 { 1243 uint32_t valid; 1244 1245 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1246 if (isar_feature_aa64_bti(id)) { 1247 valid |= PSTATE_BTYPE; 1248 } 1249 if (isar_feature_aa64_pan(id)) { 1250 valid |= PSTATE_PAN; 1251 } 1252 if (isar_feature_aa64_uao(id)) { 1253 valid |= PSTATE_UAO; 1254 } 1255 if (isar_feature_aa64_dit(id)) { 1256 valid |= PSTATE_DIT; 1257 } 1258 if (isar_feature_aa64_ssbs(id)) { 1259 valid |= PSTATE_SSBS; 1260 } 1261 if (isar_feature_aa64_mte(id)) { 1262 valid |= PSTATE_TCO; 1263 } 1264 if (isar_feature_aa64_nmi(id)) { 1265 valid |= PSTATE_ALLINT; 1266 } 1267 1268 return valid; 1269 } 1270 1271 /* Granule size (i.e. page size) */ 1272 typedef enum ARMGranuleSize { 1273 /* Same order as TG0 encoding */ 1274 Gran4K, 1275 Gran64K, 1276 Gran16K, 1277 GranInvalid, 1278 } ARMGranuleSize; 1279 1280 /** 1281 * arm_granule_bits: Return address size of the granule in bits 1282 * 1283 * Return the address size of the granule in bits. This corresponds 1284 * to the pseudocode TGxGranuleBits(). 1285 */ 1286 static inline int arm_granule_bits(ARMGranuleSize gran) 1287 { 1288 switch (gran) { 1289 case Gran64K: 1290 return 16; 1291 case Gran16K: 1292 return 14; 1293 case Gran4K: 1294 return 12; 1295 default: 1296 g_assert_not_reached(); 1297 } 1298 } 1299 1300 /* 1301 * Parameters of a given virtual address, as extracted from the 1302 * translation control register (TCR) for a given regime. 1303 */ 1304 typedef struct ARMVAParameters { 1305 unsigned tsz : 8; 1306 unsigned ps : 3; 1307 unsigned sh : 2; 1308 unsigned select : 1; 1309 bool tbi : 1; 1310 bool epd : 1; 1311 bool hpd : 1; 1312 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1313 bool ds : 1; 1314 bool ha : 1; 1315 bool hd : 1; 1316 ARMGranuleSize gran : 2; 1317 } ARMVAParameters; 1318 1319 /** 1320 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1321 * @env: CPU 1322 * @va: virtual address to look up 1323 * @mmu_idx: determines translation regime to use 1324 * @data: true if this is a data access 1325 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1326 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1327 */ 1328 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1329 ARMMMUIdx mmu_idx, bool data, 1330 bool el1_is_aa32); 1331 1332 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1333 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1334 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1335 1336 /* Determine if allocation tags are available. */ 1337 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1338 uint64_t sctlr) 1339 { 1340 if (el < 3 1341 && arm_feature(env, ARM_FEATURE_EL3) 1342 && !(env->cp15.scr_el3 & SCR_ATA)) { 1343 return false; 1344 } 1345 if (el < 2 && arm_is_el2_enabled(env)) { 1346 uint64_t hcr = arm_hcr_el2_eff(env); 1347 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1348 return false; 1349 } 1350 } 1351 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1352 return sctlr != 0; 1353 } 1354 1355 #ifndef CONFIG_USER_ONLY 1356 1357 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1358 typedef struct V8M_SAttributes { 1359 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1360 bool ns; 1361 bool nsc; 1362 uint8_t sregion; 1363 bool srvalid; 1364 uint8_t iregion; 1365 bool irvalid; 1366 } V8M_SAttributes; 1367 1368 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1369 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1370 bool secure, V8M_SAttributes *sattrs); 1371 1372 /* Cacheability and shareability attributes for a memory access */ 1373 typedef struct ARMCacheAttrs { 1374 /* 1375 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1376 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1377 */ 1378 unsigned int attrs:8; 1379 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1380 bool is_s2_format:1; 1381 } ARMCacheAttrs; 1382 1383 /* Fields that are valid upon success. */ 1384 typedef struct GetPhysAddrResult { 1385 CPUTLBEntryFull f; 1386 ARMCacheAttrs cacheattrs; 1387 } GetPhysAddrResult; 1388 1389 /** 1390 * get_phys_addr: get the physical address for a virtual address 1391 * @env: CPUARMState 1392 * @address: virtual address to get physical address for 1393 * @access_type: 0 for read, 1 for write, 2 for execute 1394 * @mmu_idx: MMU index indicating required translation regime 1395 * @result: set on translation success. 1396 * @fi: set to fault info if the translation fails 1397 * 1398 * Find the physical address corresponding to the given virtual address, 1399 * by doing a translation table walk on MMU based systems or using the 1400 * MPU state on MPU based systems. 1401 * 1402 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1403 * prot and page_size may not be filled in, and the populated fsr value provides 1404 * information on why the translation aborted, in the format of a 1405 * DFSR/IFSR fault register, with the following caveats: 1406 * * we honour the short vs long DFSR format differences. 1407 * * the WnR bit is never set (the caller must do this). 1408 * * for PSMAv5 based systems we don't bother to return a full FSR format 1409 * value. 1410 */ 1411 bool get_phys_addr(CPUARMState *env, vaddr address, 1412 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1413 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1414 __attribute__((nonnull)); 1415 1416 /** 1417 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1418 * address 1419 * @env: CPUARMState 1420 * @address: virtual address to get physical address for 1421 * @access_type: 0 for read, 1 for write, 2 for execute 1422 * @mmu_idx: MMU index indicating required translation regime 1423 * @space: security space for the access 1424 * @result: set on translation success. 1425 * @fi: set to fault info if the translation fails 1426 * 1427 * Similar to get_phys_addr, but use the given security space and don't perform 1428 * a Granule Protection Check on the resulting address. 1429 */ 1430 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 1431 MMUAccessType access_type, 1432 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1433 GetPhysAddrResult *result, 1434 ARMMMUFaultInfo *fi) 1435 __attribute__((nonnull)); 1436 1437 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1438 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1439 bool is_secure, GetPhysAddrResult *result, 1440 ARMMMUFaultInfo *fi, uint32_t *mregion); 1441 1442 void arm_log_exception(CPUState *cs); 1443 1444 #endif /* !CONFIG_USER_ONLY */ 1445 1446 /* 1447 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1448 * the same simd_desc() encoding due to restrictions on size. 1449 * Use these instead. 1450 */ 1451 FIELD(PREDDESC, OPRSZ, 0, 6) 1452 FIELD(PREDDESC, ESZ, 6, 2) 1453 FIELD(PREDDESC, DATA, 8, 24) 1454 1455 /* 1456 * The SVE simd_data field, for memory ops, contains either 1457 * rd (5 bits) or a shift count (2 bits). 1458 */ 1459 #define SVE_MTEDESC_SHIFT 5 1460 1461 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1462 FIELD(MTEDESC, MIDX, 0, 4) 1463 FIELD(MTEDESC, TBI, 4, 2) 1464 FIELD(MTEDESC, TCMA, 6, 2) 1465 FIELD(MTEDESC, WRITE, 8, 1) 1466 FIELD(MTEDESC, ALIGN, 9, 3) 1467 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1468 1469 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1470 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1471 1472 /** 1473 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1474 * @env: CPU env 1475 * @ptr: start address of memory region (dirty pointer) 1476 * @size: length of region (guaranteed not to cross a page boundary) 1477 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1478 * Returns: the size of the region that can be copied without hitting 1479 * an MTE tag failure 1480 * 1481 * Note that we assume that the caller has already checked the TBI 1482 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1483 * required. 1484 */ 1485 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1486 uint32_t desc); 1487 1488 /** 1489 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1490 * operation going in the reverse direction 1491 * @env: CPU env 1492 * @ptr: *end* address of memory region (dirty pointer) 1493 * @size: length of region (guaranteed not to cross a page boundary) 1494 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1495 * Returns: the size of the region that can be copied without hitting 1496 * an MTE tag failure 1497 * 1498 * Note that we assume that the caller has already checked the TBI 1499 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1500 * required. 1501 */ 1502 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1503 uint32_t desc); 1504 1505 /** 1506 * mte_check_fail: Record an MTE tag check failure 1507 * @env: CPU env 1508 * @desc: MTEDESC descriptor word 1509 * @dirty_ptr: Failing dirty address 1510 * @ra: TCG retaddr 1511 * 1512 * This may never return (if the MTE tag checks are configured to fault). 1513 */ 1514 void mte_check_fail(CPUARMState *env, uint32_t desc, 1515 uint64_t dirty_ptr, uintptr_t ra); 1516 1517 /** 1518 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1519 * @env: CPU env 1520 * @dirty_ptr: Start address of memory region (dirty pointer) 1521 * @size: length of region (guaranteed not to cross page boundary) 1522 * @desc: MTEDESC descriptor word 1523 */ 1524 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1525 uint32_t desc); 1526 1527 static inline int allocation_tag_from_addr(uint64_t ptr) 1528 { 1529 return extract64(ptr, 56, 4); 1530 } 1531 1532 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1533 { 1534 return deposit64(ptr, 56, 4, rtag); 1535 } 1536 1537 /* Return true if tbi bits mean that the access is checked. */ 1538 static inline bool tbi_check(uint32_t desc, int bit55) 1539 { 1540 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1541 } 1542 1543 /* Return true if tcma bits mean that the access is unchecked. */ 1544 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1545 { 1546 /* 1547 * We had extracted bit55 and ptr_tag for other reasons, so fold 1548 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1549 */ 1550 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1551 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1552 return tcma && match; 1553 } 1554 1555 /* 1556 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1557 * for the tag to be present in the FAR_ELx register. But for user-only 1558 * mode, we do not have a TLB with which to implement this, so we must 1559 * remove the top byte. 1560 */ 1561 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1562 { 1563 #ifdef CONFIG_USER_ONLY 1564 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1565 ptr &= sextract64(ptr, 0, 56); 1566 #endif 1567 return ptr; 1568 } 1569 1570 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1571 { 1572 #ifdef CONFIG_USER_ONLY 1573 int64_t clean_ptr = sextract64(ptr, 0, 56); 1574 if (tbi_check(desc, clean_ptr < 0)) { 1575 ptr = clean_ptr; 1576 } 1577 #endif 1578 return ptr; 1579 } 1580 1581 /* Values for M-profile PSR.ECI for MVE insns */ 1582 enum MVEECIState { 1583 ECI_NONE = 0, /* No completed beats */ 1584 ECI_A0 = 1, /* Completed: A0 */ 1585 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1586 /* 3 is reserved */ 1587 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1588 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1589 /* All other values reserved */ 1590 }; 1591 1592 /* Definitions for the PMU registers */ 1593 #define PMCRN_MASK 0xf800 1594 #define PMCRN_SHIFT 11 1595 #define PMCRLP 0x80 1596 #define PMCRLC 0x40 1597 #define PMCRDP 0x20 1598 #define PMCRX 0x10 1599 #define PMCRD 0x8 1600 #define PMCRC 0x4 1601 #define PMCRP 0x2 1602 #define PMCRE 0x1 1603 /* 1604 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1605 * which can be written as 1 to trigger behaviour but which stay RAZ). 1606 */ 1607 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1608 1609 #define PMXEVTYPER_P 0x80000000 1610 #define PMXEVTYPER_U 0x40000000 1611 #define PMXEVTYPER_NSK 0x20000000 1612 #define PMXEVTYPER_NSU 0x10000000 1613 #define PMXEVTYPER_NSH 0x08000000 1614 #define PMXEVTYPER_M 0x04000000 1615 #define PMXEVTYPER_MT 0x02000000 1616 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1617 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1618 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1619 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1620 PMXEVTYPER_EVTCOUNT) 1621 1622 #define PMCCFILTR 0xf8000000 1623 #define PMCCFILTR_M PMXEVTYPER_M 1624 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1625 1626 static inline uint32_t pmu_num_counters(CPUARMState *env) 1627 { 1628 ARMCPU *cpu = env_archcpu(env); 1629 1630 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1631 } 1632 1633 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1634 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1635 { 1636 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1637 } 1638 1639 #ifdef TARGET_AARCH64 1640 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1641 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1642 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1643 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1644 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1645 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1646 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1647 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1648 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1649 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1650 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1651 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1652 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1653 void aarch64_max_tcg_initfn(Object *obj); 1654 void aarch64_add_pauth_properties(Object *obj); 1655 void aarch64_add_sve_properties(Object *obj); 1656 void aarch64_add_sme_properties(Object *obj); 1657 #endif 1658 1659 /* Read the CONTROL register as the MRS instruction would. */ 1660 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1661 1662 /* 1663 * Return a pointer to the location where we currently store the 1664 * stack pointer for the requested security state and thread mode. 1665 * This pointer will become invalid if the CPU state is updated 1666 * such that the stack pointers are switched around (eg changing 1667 * the SPSEL control bit). 1668 */ 1669 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1670 bool threadmode, bool spsel); 1671 1672 bool el_is_in_host(CPUARMState *env, int el); 1673 1674 void aa32_max_features(ARMCPU *cpu); 1675 int exception_target_el(CPUARMState *env); 1676 bool arm_singlestep_active(CPUARMState *env); 1677 bool arm_generate_debug_exceptions(CPUARMState *env); 1678 1679 /** 1680 * pauth_ptr_mask: 1681 * @param: parameters defining the MMU setup 1682 * 1683 * Return a mask of the address bits that contain the authentication code, 1684 * given the MMU config defined by @param. 1685 */ 1686 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1687 { 1688 int bot_pac_bit = 64 - param.tsz; 1689 int top_pac_bit = 64 - 8 * param.tbi; 1690 1691 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1692 } 1693 1694 /* Add the cpreg definitions for debug related system registers */ 1695 void define_debug_regs(ARMCPU *cpu); 1696 1697 /* Effective value of MDCR_EL2 */ 1698 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1699 { 1700 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1701 } 1702 1703 /* Powers of 2 for sve_vq_map et al. */ 1704 #define SVE_VQ_POW2_MAP \ 1705 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1706 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1707 1708 /* 1709 * Return true if it is possible to take a fine-grained-trap to EL2. 1710 */ 1711 static inline bool arm_fgt_active(CPUARMState *env, int el) 1712 { 1713 /* 1714 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1715 * that can affect EL0, but it is harmless to do the test also for 1716 * traps on registers that are only accessible at EL1 because if the test 1717 * returns true then we can't be executing at EL1 anyway. 1718 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1719 * traps from AArch32 only happen for the EL0 is AArch32 case. 1720 */ 1721 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1722 el < 2 && arm_is_el2_enabled(env) && 1723 arm_el_is_aa64(env, 1) && 1724 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1725 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1726 } 1727 1728 void assert_hflags_rebuild_correctly(CPUARMState *env); 1729 1730 /* 1731 * Although the ARM implementation of hardware assisted debugging 1732 * allows for different breakpoints per-core, the current GDB 1733 * interface treats them as a global pool of registers (which seems to 1734 * be the case for x86, ppc and s390). As a result we store one copy 1735 * of registers which is used for all active cores. 1736 * 1737 * Write access is serialised by virtue of the GDB protocol which 1738 * updates things. Read access (i.e. when the values are copied to the 1739 * vCPU) is also gated by GDB's run control. 1740 * 1741 * This is not unreasonable as most of the time debugging kernels you 1742 * never know which core will eventually execute your function. 1743 */ 1744 1745 typedef struct { 1746 uint64_t bcr; 1747 uint64_t bvr; 1748 } HWBreakpoint; 1749 1750 /* 1751 * The watchpoint registers can cover more area than the requested 1752 * watchpoint so we need to store the additional information 1753 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1754 * when the watchpoint is hit. 1755 */ 1756 typedef struct { 1757 uint64_t wcr; 1758 uint64_t wvr; 1759 CPUWatchpoint details; 1760 } HWWatchpoint; 1761 1762 /* Maximum and current break/watch point counts */ 1763 extern int max_hw_bps, max_hw_wps; 1764 extern GArray *hw_breakpoints, *hw_watchpoints; 1765 1766 #define cur_hw_wps (hw_watchpoints->len) 1767 #define cur_hw_bps (hw_breakpoints->len) 1768 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1769 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1770 1771 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1772 int insert_hw_breakpoint(target_ulong pc); 1773 int delete_hw_breakpoint(target_ulong pc); 1774 1775 bool check_watchpoint_in_range(int i, target_ulong addr); 1776 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1777 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1778 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1779 1780 /* Return the current value of the system counter in ticks */ 1781 uint64_t gt_get_countervalue(CPUARMState *env); 1782 /* 1783 * Return the currently applicable offset between the system counter 1784 * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2). 1785 */ 1786 uint64_t gt_virt_cnt_offset(CPUARMState *env); 1787 #endif 1788