1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/breakpoint.h" 29 #include "hw/registerfields.h" 30 #include "tcg/tcg-gvec-desc.h" 31 #include "syndrome.h" 32 #include "cpu-features.h" 33 34 /* register banks for CPU modes */ 35 #define BANK_USRSYS 0 36 #define BANK_SVC 1 37 #define BANK_ABT 2 38 #define BANK_UND 3 39 #define BANK_IRQ 4 40 #define BANK_FIQ 5 41 #define BANK_HYP 6 42 #define BANK_MON 7 43 44 static inline int arm_env_mmu_index(CPUARMState *env) 45 { 46 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 47 } 48 49 static inline bool excp_is_internal(int excp) 50 { 51 /* Return true if this exception number represents a QEMU-internal 52 * exception that will not be passed to the guest. 53 */ 54 return excp == EXCP_INTERRUPT 55 || excp == EXCP_HLT 56 || excp == EXCP_DEBUG 57 || excp == EXCP_HALTED 58 || excp == EXCP_EXCEPTION_EXIT 59 || excp == EXCP_KERNEL_TRAP 60 || excp == EXCP_SEMIHOST; 61 } 62 63 /* 64 * Default frequency for the generic timer, in Hz. 65 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 66 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 67 * which gives a 16ns tick period. 68 * 69 * We will use the back-compat value: 70 * - for QEMU CPU types added before we standardized on 1GHz 71 * - for versioned machine types with a version of 9.0 or earlier 72 * In any case, the machine model may override via the cntfrq property. 73 */ 74 #define GTIMER_DEFAULT_HZ 1000000000 75 #define GTIMER_BACKCOMPAT_HZ 62500000 76 77 /* Bit definitions for the v7M CONTROL register */ 78 FIELD(V7M_CONTROL, NPRIV, 0, 1) 79 FIELD(V7M_CONTROL, SPSEL, 1, 1) 80 FIELD(V7M_CONTROL, FPCA, 2, 1) 81 FIELD(V7M_CONTROL, SFPA, 3, 1) 82 83 /* Bit definitions for v7M exception return payload */ 84 FIELD(V7M_EXCRET, ES, 0, 1) 85 FIELD(V7M_EXCRET, RES0, 1, 1) 86 FIELD(V7M_EXCRET, SPSEL, 2, 1) 87 FIELD(V7M_EXCRET, MODE, 3, 1) 88 FIELD(V7M_EXCRET, FTYPE, 4, 1) 89 FIELD(V7M_EXCRET, DCRS, 5, 1) 90 FIELD(V7M_EXCRET, S, 6, 1) 91 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 92 93 /* Minimum value which is a magic number for exception return */ 94 #define EXC_RETURN_MIN_MAGIC 0xff000000 95 /* Minimum number which is a magic number for function or exception return 96 * when using v8M security extension 97 */ 98 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 99 100 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 101 FIELD(DBGWCR, E, 0, 1) 102 FIELD(DBGWCR, PAC, 1, 2) 103 FIELD(DBGWCR, LSC, 3, 2) 104 FIELD(DBGWCR, BAS, 5, 8) 105 FIELD(DBGWCR, HMC, 13, 1) 106 FIELD(DBGWCR, SSC, 14, 2) 107 FIELD(DBGWCR, LBN, 16, 4) 108 FIELD(DBGWCR, WT, 20, 1) 109 FIELD(DBGWCR, MASK, 24, 5) 110 FIELD(DBGWCR, SSCE, 29, 1) 111 112 #define VTCR_NSW (1u << 29) 113 #define VTCR_NSA (1u << 30) 114 #define VSTCR_SW VTCR_NSW 115 #define VSTCR_SA VTCR_NSA 116 117 /* Bit definitions for CPACR (AArch32 only) */ 118 FIELD(CPACR, CP10, 20, 2) 119 FIELD(CPACR, CP11, 22, 2) 120 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 121 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 122 FIELD(CPACR, ASEDIS, 31, 1) 123 124 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 125 FIELD(CPACR_EL1, ZEN, 16, 2) 126 FIELD(CPACR_EL1, FPEN, 20, 2) 127 FIELD(CPACR_EL1, SMEN, 24, 2) 128 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 129 130 /* Bit definitions for HCPTR (AArch32 only) */ 131 FIELD(HCPTR, TCP10, 10, 1) 132 FIELD(HCPTR, TCP11, 11, 1) 133 FIELD(HCPTR, TASE, 15, 1) 134 FIELD(HCPTR, TTA, 20, 1) 135 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 136 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 137 138 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 139 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 140 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 141 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 142 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 143 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 144 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 145 FIELD(CPTR_EL2, TTA, 28, 1) 146 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 147 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 148 149 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 150 FIELD(CPTR_EL3, EZ, 8, 1) 151 FIELD(CPTR_EL3, TFP, 10, 1) 152 FIELD(CPTR_EL3, ESM, 12, 1) 153 FIELD(CPTR_EL3, TTA, 20, 1) 154 FIELD(CPTR_EL3, TAM, 30, 1) 155 FIELD(CPTR_EL3, TCPAC, 31, 1) 156 157 #define MDCR_MTPME (1U << 28) 158 #define MDCR_TDCC (1U << 27) 159 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 160 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 161 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 162 #define MDCR_EPMAD (1U << 21) 163 #define MDCR_EDAD (1U << 20) 164 #define MDCR_TTRF (1U << 19) 165 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 166 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 167 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 168 #define MDCR_SDD (1U << 16) 169 #define MDCR_SPD (3U << 14) 170 #define MDCR_TDRA (1U << 11) 171 #define MDCR_TDOSA (1U << 10) 172 #define MDCR_TDA (1U << 9) 173 #define MDCR_TDE (1U << 8) 174 #define MDCR_HPME (1U << 7) 175 #define MDCR_TPM (1U << 6) 176 #define MDCR_TPMCR (1U << 5) 177 #define MDCR_HPMN (0x1fU) 178 179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 181 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 182 MDCR_STE | MDCR_SPME | MDCR_SPD) 183 184 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 185 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 186 #define TTBCR_PD0 (1U << 4) 187 #define TTBCR_PD1 (1U << 5) 188 #define TTBCR_EPD0 (1U << 7) 189 #define TTBCR_IRGN0 (3U << 8) 190 #define TTBCR_ORGN0 (3U << 10) 191 #define TTBCR_SH0 (3U << 12) 192 #define TTBCR_T1SZ (3U << 16) 193 #define TTBCR_A1 (1U << 22) 194 #define TTBCR_EPD1 (1U << 23) 195 #define TTBCR_IRGN1 (3U << 24) 196 #define TTBCR_ORGN1 (3U << 26) 197 #define TTBCR_SH1 (1U << 28) 198 #define TTBCR_EAE (1U << 31) 199 200 FIELD(VTCR, T0SZ, 0, 6) 201 FIELD(VTCR, SL0, 6, 2) 202 FIELD(VTCR, IRGN0, 8, 2) 203 FIELD(VTCR, ORGN0, 10, 2) 204 FIELD(VTCR, SH0, 12, 2) 205 FIELD(VTCR, TG0, 14, 2) 206 FIELD(VTCR, PS, 16, 3) 207 FIELD(VTCR, VS, 19, 1) 208 FIELD(VTCR, HA, 21, 1) 209 FIELD(VTCR, HD, 22, 1) 210 FIELD(VTCR, HWU59, 25, 1) 211 FIELD(VTCR, HWU60, 26, 1) 212 FIELD(VTCR, HWU61, 27, 1) 213 FIELD(VTCR, HWU62, 28, 1) 214 FIELD(VTCR, NSW, 29, 1) 215 FIELD(VTCR, NSA, 30, 1) 216 FIELD(VTCR, DS, 32, 1) 217 FIELD(VTCR, SL2, 33, 1) 218 219 #define HCRX_ENAS0 (1ULL << 0) 220 #define HCRX_ENALS (1ULL << 1) 221 #define HCRX_ENASR (1ULL << 2) 222 #define HCRX_FNXS (1ULL << 3) 223 #define HCRX_FGTNXS (1ULL << 4) 224 #define HCRX_SMPME (1ULL << 5) 225 #define HCRX_TALLINT (1ULL << 6) 226 #define HCRX_VINMI (1ULL << 7) 227 #define HCRX_VFNMI (1ULL << 8) 228 #define HCRX_CMOW (1ULL << 9) 229 #define HCRX_MCE2 (1ULL << 10) 230 #define HCRX_MSCEN (1ULL << 11) 231 232 #define HPFAR_NS (1ULL << 63) 233 234 #define HSTR_TTEE (1 << 16) 235 #define HSTR_TJDBX (1 << 17) 236 237 /* 238 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 239 * have different bit definitions, and EL1PCTEN might be 240 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 241 * disambiguate if necessary. 242 */ 243 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 244 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 245 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 246 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 247 FIELD(CNTHCTL, EVNTEN, 2, 1) 248 FIELD(CNTHCTL, EVNTDIR, 3, 1) 249 FIELD(CNTHCTL, EVNTI, 4, 4) 250 FIELD(CNTHCTL, EL0VTEN, 8, 1) 251 FIELD(CNTHCTL, EL0PTEN, 9, 1) 252 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 253 FIELD(CNTHCTL, EL1PTEN, 11, 1) 254 FIELD(CNTHCTL, ECV, 12, 1) 255 FIELD(CNTHCTL, EL1TVT, 13, 1) 256 FIELD(CNTHCTL, EL1TVCT, 14, 1) 257 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 258 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 259 FIELD(CNTHCTL, EVNTIS, 17, 1) 260 FIELD(CNTHCTL, CNTVMASK, 18, 1) 261 FIELD(CNTHCTL, CNTPMASK, 19, 1) 262 263 /* We use a few fake FSR values for internal purposes in M profile. 264 * M profile cores don't have A/R format FSRs, but currently our 265 * get_phys_addr() code assumes A/R profile and reports failures via 266 * an A/R format FSR value. We then translate that into the proper 267 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 268 * Mostly the FSR values we use for this are those defined for v7PMSA, 269 * since we share some of that codepath. A few kinds of fault are 270 * only for M profile and have no A/R equivalent, though, so we have 271 * to pick a value from the reserved range (which we never otherwise 272 * generate) to use for these. 273 * These values will never be visible to the guest. 274 */ 275 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 276 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 277 278 /** 279 * raise_exception: Raise the specified exception. 280 * Raise a guest exception with the specified value, syndrome register 281 * and target exception level. This should be called from helper functions, 282 * and never returns because we will longjump back up to the CPU main loop. 283 */ 284 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 285 uint32_t syndrome, uint32_t target_el); 286 287 /* 288 * Similarly, but also use unwinding to restore cpu state. 289 */ 290 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 291 uint32_t syndrome, uint32_t target_el, 292 uintptr_t ra); 293 294 /* 295 * For AArch64, map a given EL to an index in the banked_spsr array. 296 * Note that this mapping and the AArch32 mapping defined in bank_number() 297 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 298 * mandated mapping between each other. 299 */ 300 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 301 { 302 static const unsigned int map[4] = { 303 [1] = BANK_SVC, /* EL1. */ 304 [2] = BANK_HYP, /* EL2. */ 305 [3] = BANK_MON, /* EL3. */ 306 }; 307 assert(el >= 1 && el <= 3); 308 return map[el]; 309 } 310 311 /* Map CPU modes onto saved register banks. */ 312 static inline int bank_number(int mode) 313 { 314 switch (mode) { 315 case ARM_CPU_MODE_USR: 316 case ARM_CPU_MODE_SYS: 317 return BANK_USRSYS; 318 case ARM_CPU_MODE_SVC: 319 return BANK_SVC; 320 case ARM_CPU_MODE_ABT: 321 return BANK_ABT; 322 case ARM_CPU_MODE_UND: 323 return BANK_UND; 324 case ARM_CPU_MODE_IRQ: 325 return BANK_IRQ; 326 case ARM_CPU_MODE_FIQ: 327 return BANK_FIQ; 328 case ARM_CPU_MODE_HYP: 329 return BANK_HYP; 330 case ARM_CPU_MODE_MON: 331 return BANK_MON; 332 } 333 g_assert_not_reached(); 334 } 335 336 /** 337 * r14_bank_number: Map CPU mode onto register bank for r14 338 * 339 * Given an AArch32 CPU mode, return the index into the saved register 340 * banks to use for the R14 (LR) in that mode. This is the same as 341 * bank_number(), except for the special case of Hyp mode, where 342 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 343 * This should be used as the index into env->banked_r14[], and 344 * bank_number() used for the index into env->banked_r13[] and 345 * env->banked_spsr[]. 346 */ 347 static inline int r14_bank_number(int mode) 348 { 349 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 350 } 351 352 void arm_cpu_register(const ARMCPUInfo *info); 353 void aarch64_cpu_register(const ARMCPUInfo *info); 354 355 void register_cp_regs_for_features(ARMCPU *cpu); 356 void init_cpreg_list(ARMCPU *cpu); 357 358 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 359 void arm_translate_init(void); 360 361 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 362 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 363 GPtrArray *, GPtrArray *); 364 365 void arm_restore_state_to_opc(CPUState *cs, 366 const TranslationBlock *tb, 367 const uint64_t *data); 368 369 #ifdef CONFIG_TCG 370 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 371 372 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 373 bool arm_cpu_exec_halt(CPUState *cs); 374 #endif /* CONFIG_TCG */ 375 376 typedef enum ARMFPRounding { 377 FPROUNDING_TIEEVEN, 378 FPROUNDING_POSINF, 379 FPROUNDING_NEGINF, 380 FPROUNDING_ZERO, 381 FPROUNDING_TIEAWAY, 382 FPROUNDING_ODD 383 } ARMFPRounding; 384 385 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 386 387 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 388 { 389 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 390 return arm_rmode_to_sf_map[rmode]; 391 } 392 393 static inline void aarch64_save_sp(CPUARMState *env, int el) 394 { 395 if (env->pstate & PSTATE_SP) { 396 env->sp_el[el] = env->xregs[31]; 397 } else { 398 env->sp_el[0] = env->xregs[31]; 399 } 400 } 401 402 static inline void aarch64_restore_sp(CPUARMState *env, int el) 403 { 404 if (env->pstate & PSTATE_SP) { 405 env->xregs[31] = env->sp_el[el]; 406 } else { 407 env->xregs[31] = env->sp_el[0]; 408 } 409 } 410 411 static inline void update_spsel(CPUARMState *env, uint32_t imm) 412 { 413 unsigned int cur_el = arm_current_el(env); 414 /* Update PSTATE SPSel bit; this requires us to update the 415 * working stack pointer in xregs[31]. 416 */ 417 if (!((imm ^ env->pstate) & PSTATE_SP)) { 418 return; 419 } 420 aarch64_save_sp(env, cur_el); 421 env->pstate = deposit32(env->pstate, 0, 1, imm); 422 423 /* We rely on illegal updates to SPsel from EL0 to get trapped 424 * at translation time. 425 */ 426 assert(cur_el >= 1 && cur_el <= 3); 427 aarch64_restore_sp(env, cur_el); 428 } 429 430 /* 431 * arm_pamax 432 * @cpu: ARMCPU 433 * 434 * Returns the implementation defined bit-width of physical addresses. 435 * The ARMv8 reference manuals refer to this as PAMax(). 436 */ 437 unsigned int arm_pamax(ARMCPU *cpu); 438 439 /* Return true if extended addresses are enabled. 440 * This is always the case if our translation regime is 64 bit, 441 * but depends on TTBCR.EAE for 32 bit. 442 */ 443 static inline bool extended_addresses_enabled(CPUARMState *env) 444 { 445 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 446 if (arm_feature(env, ARM_FEATURE_PMSA) && 447 arm_feature(env, ARM_FEATURE_V8)) { 448 return true; 449 } 450 return arm_el_is_aa64(env, 1) || 451 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 452 } 453 454 /* Update a QEMU watchpoint based on the information the guest has set in the 455 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 456 */ 457 void hw_watchpoint_update(ARMCPU *cpu, int n); 458 /* Update the QEMU watchpoints for every guest watchpoint. This does a 459 * complete delete-and-reinstate of the QEMU watchpoint list and so is 460 * suitable for use after migration or on reset. 461 */ 462 void hw_watchpoint_update_all(ARMCPU *cpu); 463 /* Update a QEMU breakpoint based on the information the guest has set in the 464 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 465 */ 466 void hw_breakpoint_update(ARMCPU *cpu, int n); 467 /* Update the QEMU breakpoints for every guest breakpoint. This does a 468 * complete delete-and-reinstate of the QEMU breakpoint list and so is 469 * suitable for use after migration or on reset. 470 */ 471 void hw_breakpoint_update_all(ARMCPU *cpu); 472 473 /* Callback function for checking if a breakpoint should trigger. */ 474 bool arm_debug_check_breakpoint(CPUState *cs); 475 476 /* Callback function for checking if a watchpoint should trigger. */ 477 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 478 479 /* Adjust addresses (in BE32 mode) before testing against watchpoint 480 * addresses. 481 */ 482 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 483 484 /* Callback function for when a watchpoint or breakpoint triggers. */ 485 void arm_debug_excp_handler(CPUState *cs); 486 487 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 488 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 489 { 490 return false; 491 } 492 static inline void arm_handle_psci_call(ARMCPU *cpu) 493 { 494 g_assert_not_reached(); 495 } 496 #else 497 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 498 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 499 /* Actually handle a PSCI call */ 500 void arm_handle_psci_call(ARMCPU *cpu); 501 #endif 502 503 /** 504 * arm_clear_exclusive: clear the exclusive monitor 505 * @env: CPU env 506 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 507 */ 508 static inline void arm_clear_exclusive(CPUARMState *env) 509 { 510 env->exclusive_addr = -1; 511 } 512 513 /** 514 * ARMFaultType: type of an ARM MMU fault 515 * This corresponds to the v8A pseudocode's Fault enumeration, 516 * with extensions for QEMU internal conditions. 517 */ 518 typedef enum ARMFaultType { 519 ARMFault_None, 520 ARMFault_AccessFlag, 521 ARMFault_Alignment, 522 ARMFault_Background, 523 ARMFault_Domain, 524 ARMFault_Permission, 525 ARMFault_Translation, 526 ARMFault_AddressSize, 527 ARMFault_SyncExternal, 528 ARMFault_SyncExternalOnWalk, 529 ARMFault_SyncParity, 530 ARMFault_SyncParityOnWalk, 531 ARMFault_AsyncParity, 532 ARMFault_AsyncExternal, 533 ARMFault_Debug, 534 ARMFault_TLBConflict, 535 ARMFault_UnsuppAtomicUpdate, 536 ARMFault_Lockdown, 537 ARMFault_Exclusive, 538 ARMFault_ICacheMaint, 539 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 540 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 541 ARMFault_GPCFOnWalk, 542 ARMFault_GPCFOnOutput, 543 } ARMFaultType; 544 545 typedef enum ARMGPCF { 546 GPCF_None, 547 GPCF_AddressSize, 548 GPCF_Walk, 549 GPCF_EABT, 550 GPCF_Fail, 551 } ARMGPCF; 552 553 /** 554 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 555 * @type: Type of fault 556 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 557 * @level: Table walk level (for translation, access flag and permission faults) 558 * @domain: Domain of the fault address (for non-LPAE CPUs only) 559 * @s2addr: Address that caused a fault at stage 2 560 * @paddr: physical address that caused a fault for gpc 561 * @paddr_space: physical address space that caused a fault for gpc 562 * @stage2: True if we faulted at stage 2 563 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 564 * @s1ns: True if we faulted on a non-secure IPA while in secure state 565 * @ea: True if we should set the EA (external abort type) bit in syndrome 566 */ 567 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 568 struct ARMMMUFaultInfo { 569 ARMFaultType type; 570 ARMGPCF gpcf; 571 target_ulong s2addr; 572 target_ulong paddr; 573 ARMSecuritySpace paddr_space; 574 int level; 575 int domain; 576 bool stage2; 577 bool s1ptw; 578 bool s1ns; 579 bool ea; 580 }; 581 582 /** 583 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 584 * Compare pseudocode EncodeSDFSC(), though unlike that function 585 * we set up a whole FSR-format code including domain field and 586 * putting the high bit of the FSC into bit 10. 587 */ 588 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 589 { 590 uint32_t fsc; 591 592 switch (fi->type) { 593 case ARMFault_None: 594 return 0; 595 case ARMFault_AccessFlag: 596 fsc = fi->level == 1 ? 0x3 : 0x6; 597 break; 598 case ARMFault_Alignment: 599 fsc = 0x1; 600 break; 601 case ARMFault_Permission: 602 fsc = fi->level == 1 ? 0xd : 0xf; 603 break; 604 case ARMFault_Domain: 605 fsc = fi->level == 1 ? 0x9 : 0xb; 606 break; 607 case ARMFault_Translation: 608 fsc = fi->level == 1 ? 0x5 : 0x7; 609 break; 610 case ARMFault_SyncExternal: 611 fsc = 0x8 | (fi->ea << 12); 612 break; 613 case ARMFault_SyncExternalOnWalk: 614 fsc = fi->level == 1 ? 0xc : 0xe; 615 fsc |= (fi->ea << 12); 616 break; 617 case ARMFault_SyncParity: 618 fsc = 0x409; 619 break; 620 case ARMFault_SyncParityOnWalk: 621 fsc = fi->level == 1 ? 0x40c : 0x40e; 622 break; 623 case ARMFault_AsyncParity: 624 fsc = 0x408; 625 break; 626 case ARMFault_AsyncExternal: 627 fsc = 0x406 | (fi->ea << 12); 628 break; 629 case ARMFault_Debug: 630 fsc = 0x2; 631 break; 632 case ARMFault_TLBConflict: 633 fsc = 0x400; 634 break; 635 case ARMFault_Lockdown: 636 fsc = 0x404; 637 break; 638 case ARMFault_Exclusive: 639 fsc = 0x405; 640 break; 641 case ARMFault_ICacheMaint: 642 fsc = 0x4; 643 break; 644 case ARMFault_Background: 645 fsc = 0x0; 646 break; 647 case ARMFault_QEMU_NSCExec: 648 fsc = M_FAKE_FSR_NSC_EXEC; 649 break; 650 case ARMFault_QEMU_SFault: 651 fsc = M_FAKE_FSR_SFAULT; 652 break; 653 default: 654 /* Other faults can't occur in a context that requires a 655 * short-format status code. 656 */ 657 g_assert_not_reached(); 658 } 659 660 fsc |= (fi->domain << 4); 661 return fsc; 662 } 663 664 /** 665 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 666 * Compare pseudocode EncodeLDFSC(), though unlike that function 667 * we fill in also the LPAE bit 9 of a DFSR format. 668 */ 669 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 670 { 671 uint32_t fsc; 672 673 switch (fi->type) { 674 case ARMFault_None: 675 return 0; 676 case ARMFault_AddressSize: 677 assert(fi->level >= -1 && fi->level <= 3); 678 if (fi->level < 0) { 679 fsc = 0b101001; 680 } else { 681 fsc = fi->level; 682 } 683 break; 684 case ARMFault_AccessFlag: 685 assert(fi->level >= 0 && fi->level <= 3); 686 fsc = 0b001000 | fi->level; 687 break; 688 case ARMFault_Permission: 689 assert(fi->level >= 0 && fi->level <= 3); 690 fsc = 0b001100 | fi->level; 691 break; 692 case ARMFault_Translation: 693 assert(fi->level >= -1 && fi->level <= 3); 694 if (fi->level < 0) { 695 fsc = 0b101011; 696 } else { 697 fsc = 0b000100 | fi->level; 698 } 699 break; 700 case ARMFault_SyncExternal: 701 fsc = 0x10 | (fi->ea << 12); 702 break; 703 case ARMFault_SyncExternalOnWalk: 704 assert(fi->level >= -1 && fi->level <= 3); 705 if (fi->level < 0) { 706 fsc = 0b010011; 707 } else { 708 fsc = 0b010100 | fi->level; 709 } 710 fsc |= fi->ea << 12; 711 break; 712 case ARMFault_SyncParity: 713 fsc = 0x18; 714 break; 715 case ARMFault_SyncParityOnWalk: 716 assert(fi->level >= -1 && fi->level <= 3); 717 if (fi->level < 0) { 718 fsc = 0b011011; 719 } else { 720 fsc = 0b011100 | fi->level; 721 } 722 break; 723 case ARMFault_AsyncParity: 724 fsc = 0x19; 725 break; 726 case ARMFault_AsyncExternal: 727 fsc = 0x11 | (fi->ea << 12); 728 break; 729 case ARMFault_Alignment: 730 fsc = 0x21; 731 break; 732 case ARMFault_Debug: 733 fsc = 0x22; 734 break; 735 case ARMFault_TLBConflict: 736 fsc = 0x30; 737 break; 738 case ARMFault_UnsuppAtomicUpdate: 739 fsc = 0x31; 740 break; 741 case ARMFault_Lockdown: 742 fsc = 0x34; 743 break; 744 case ARMFault_Exclusive: 745 fsc = 0x35; 746 break; 747 case ARMFault_GPCFOnWalk: 748 assert(fi->level >= -1 && fi->level <= 3); 749 if (fi->level < 0) { 750 fsc = 0b100011; 751 } else { 752 fsc = 0b100100 | fi->level; 753 } 754 break; 755 case ARMFault_GPCFOnOutput: 756 fsc = 0b101000; 757 break; 758 default: 759 /* Other faults can't occur in a context that requires a 760 * long-format status code. 761 */ 762 g_assert_not_reached(); 763 } 764 765 fsc |= 1 << 9; 766 return fsc; 767 } 768 769 static inline bool arm_extabort_type(MemTxResult result) 770 { 771 /* The EA bit in syndromes and fault status registers is an 772 * IMPDEF classification of external aborts. ARM implementations 773 * usually use this to indicate AXI bus Decode error (0) or 774 * Slave error (1); in QEMU we follow that. 775 */ 776 return result != MEMTX_DECODE_ERROR; 777 } 778 779 #ifdef CONFIG_USER_ONLY 780 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 781 MMUAccessType access_type, 782 bool maperr, uintptr_t ra); 783 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 784 MMUAccessType access_type, uintptr_t ra); 785 #else 786 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 787 MMUAccessType access_type, int mmu_idx, 788 bool probe, uintptr_t retaddr); 789 #endif 790 791 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 792 { 793 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 794 } 795 796 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 797 { 798 if (arm_feature(env, ARM_FEATURE_M)) { 799 return mmu_idx | ARM_MMU_IDX_M; 800 } else { 801 return mmu_idx | ARM_MMU_IDX_A; 802 } 803 } 804 805 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 806 { 807 /* AArch64 is always a-profile. */ 808 return mmu_idx | ARM_MMU_IDX_A; 809 } 810 811 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 812 813 /* Return the MMU index for a v7M CPU in the specified security state */ 814 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 815 816 /* 817 * Return true if the stage 1 translation regime is using LPAE 818 * format page tables 819 */ 820 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 821 822 /* Raise a data fault alignment exception for the specified virtual address */ 823 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 824 MMUAccessType access_type, 825 int mmu_idx, uintptr_t retaddr); 826 827 #ifndef CONFIG_USER_ONLY 828 /* arm_cpu_do_transaction_failed: handle a memory system error response 829 * (eg "no device/memory present at address") by raising an external abort 830 * exception 831 */ 832 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 833 vaddr addr, unsigned size, 834 MMUAccessType access_type, 835 int mmu_idx, MemTxAttrs attrs, 836 MemTxResult response, uintptr_t retaddr); 837 #endif 838 839 /* Call any registered EL change hooks */ 840 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 841 { 842 ARMELChangeHook *hook, *next; 843 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 844 hook->hook(cpu, hook->opaque); 845 } 846 } 847 static inline void arm_call_el_change_hook(ARMCPU *cpu) 848 { 849 ARMELChangeHook *hook, *next; 850 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 851 hook->hook(cpu, hook->opaque); 852 } 853 } 854 855 /* Return true if this address translation regime has two ranges. */ 856 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 857 { 858 switch (mmu_idx) { 859 case ARMMMUIdx_Stage1_E0: 860 case ARMMMUIdx_Stage1_E1: 861 case ARMMMUIdx_Stage1_E1_PAN: 862 case ARMMMUIdx_E10_0: 863 case ARMMMUIdx_E10_1: 864 case ARMMMUIdx_E10_1_PAN: 865 case ARMMMUIdx_E20_0: 866 case ARMMMUIdx_E20_2: 867 case ARMMMUIdx_E20_2_PAN: 868 return true; 869 default: 870 return false; 871 } 872 } 873 874 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 875 { 876 switch (mmu_idx) { 877 case ARMMMUIdx_Stage1_E1_PAN: 878 case ARMMMUIdx_E10_1_PAN: 879 case ARMMMUIdx_E20_2_PAN: 880 return true; 881 default: 882 return false; 883 } 884 } 885 886 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 887 { 888 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 889 } 890 891 /* Return the exception level which controls this address translation regime */ 892 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 893 { 894 switch (mmu_idx) { 895 case ARMMMUIdx_E20_0: 896 case ARMMMUIdx_E20_2: 897 case ARMMMUIdx_E20_2_PAN: 898 case ARMMMUIdx_Stage2: 899 case ARMMMUIdx_Stage2_S: 900 case ARMMMUIdx_E2: 901 return 2; 902 case ARMMMUIdx_E3: 903 return 3; 904 case ARMMMUIdx_E10_0: 905 case ARMMMUIdx_Stage1_E0: 906 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; 907 case ARMMMUIdx_Stage1_E1: 908 case ARMMMUIdx_Stage1_E1_PAN: 909 case ARMMMUIdx_E10_1: 910 case ARMMMUIdx_E10_1_PAN: 911 case ARMMMUIdx_MPrivNegPri: 912 case ARMMMUIdx_MUserNegPri: 913 case ARMMMUIdx_MPriv: 914 case ARMMMUIdx_MUser: 915 case ARMMMUIdx_MSPrivNegPri: 916 case ARMMMUIdx_MSUserNegPri: 917 case ARMMMUIdx_MSPriv: 918 case ARMMMUIdx_MSUser: 919 return 1; 920 default: 921 g_assert_not_reached(); 922 } 923 } 924 925 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 926 { 927 switch (mmu_idx) { 928 case ARMMMUIdx_E20_0: 929 case ARMMMUIdx_Stage1_E0: 930 case ARMMMUIdx_MUser: 931 case ARMMMUIdx_MSUser: 932 case ARMMMUIdx_MUserNegPri: 933 case ARMMMUIdx_MSUserNegPri: 934 return true; 935 default: 936 return false; 937 case ARMMMUIdx_E10_0: 938 case ARMMMUIdx_E10_1: 939 case ARMMMUIdx_E10_1_PAN: 940 g_assert_not_reached(); 941 } 942 } 943 944 /* Return the SCTLR value which controls this address translation regime */ 945 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 946 { 947 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 948 } 949 950 /* 951 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 952 * and the Non-Secure stage 2 translation regimes (and hence which are 953 * not present in VSTCR_EL2). 954 */ 955 #define VTCR_SHARED_FIELD_MASK \ 956 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 957 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 958 R_VTCR_DS_MASK) 959 960 /* Return the value of the TCR controlling this translation regime */ 961 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 962 { 963 if (mmu_idx == ARMMMUIdx_Stage2) { 964 return env->cp15.vtcr_el2; 965 } 966 if (mmu_idx == ARMMMUIdx_Stage2_S) { 967 /* 968 * Secure stage 2 shares fields from VTCR_EL2. We merge those 969 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 970 * value so the callers don't need to special case this. 971 * 972 * If a future architecture change defines bits in VSTCR_EL2 that 973 * overlap with these VTCR_EL2 fields we may need to revisit this. 974 */ 975 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 976 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 977 return v; 978 } 979 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 980 } 981 982 /* Return true if the translation regime is using LPAE format page tables */ 983 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 984 { 985 int el = regime_el(env, mmu_idx); 986 if (el == 2 || arm_el_is_aa64(env, el)) { 987 return true; 988 } 989 if (arm_feature(env, ARM_FEATURE_PMSA) && 990 arm_feature(env, ARM_FEATURE_V8)) { 991 return true; 992 } 993 if (arm_feature(env, ARM_FEATURE_LPAE) 994 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 995 return true; 996 } 997 return false; 998 } 999 1000 /** 1001 * arm_num_brps: Return number of implemented breakpoints. 1002 * Note that the ID register BRPS field is "number of bps - 1", 1003 * and we return the actual number of breakpoints. 1004 */ 1005 static inline int arm_num_brps(ARMCPU *cpu) 1006 { 1007 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1008 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 1009 } else { 1010 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1011 } 1012 } 1013 1014 /** 1015 * arm_num_wrps: Return number of implemented watchpoints. 1016 * Note that the ID register WRPS field is "number of wps - 1", 1017 * and we return the actual number of watchpoints. 1018 */ 1019 static inline int arm_num_wrps(ARMCPU *cpu) 1020 { 1021 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1022 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1023 } else { 1024 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1025 } 1026 } 1027 1028 /** 1029 * arm_num_ctx_cmps: Return number of implemented context comparators. 1030 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1031 * and we return the actual number of comparators. 1032 */ 1033 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1034 { 1035 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1036 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1037 } else { 1038 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1039 } 1040 } 1041 1042 /** 1043 * v7m_using_psp: Return true if using process stack pointer 1044 * Return true if the CPU is currently using the process stack 1045 * pointer, or false if it is using the main stack pointer. 1046 */ 1047 static inline bool v7m_using_psp(CPUARMState *env) 1048 { 1049 /* Handler mode always uses the main stack; for thread mode 1050 * the CONTROL.SPSEL bit determines the answer. 1051 * Note that in v7M it is not possible to be in Handler mode with 1052 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1053 */ 1054 return !arm_v7m_is_handler_mode(env) && 1055 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1056 } 1057 1058 /** 1059 * v7m_sp_limit: Return SP limit for current CPU state 1060 * Return the SP limit value for the current CPU security state 1061 * and stack pointer. 1062 */ 1063 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1064 { 1065 if (v7m_using_psp(env)) { 1066 return env->v7m.psplim[env->v7m.secure]; 1067 } else { 1068 return env->v7m.msplim[env->v7m.secure]; 1069 } 1070 } 1071 1072 /** 1073 * v7m_cpacr_pass: 1074 * Return true if the v7M CPACR permits access to the FPU for the specified 1075 * security state and privilege level. 1076 */ 1077 static inline bool v7m_cpacr_pass(CPUARMState *env, 1078 bool is_secure, bool is_priv) 1079 { 1080 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1081 case 0: 1082 case 2: /* UNPREDICTABLE: we treat like 0 */ 1083 return false; 1084 case 1: 1085 return is_priv; 1086 case 3: 1087 return true; 1088 default: 1089 g_assert_not_reached(); 1090 } 1091 } 1092 1093 /** 1094 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1095 * @psr: Program Status Register indicating CPU mode 1096 * 1097 * Returns, for debug logging purposes, a printable representation 1098 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1099 * the low bits of the specified PSR. 1100 */ 1101 static inline const char *aarch32_mode_name(uint32_t psr) 1102 { 1103 static const char cpu_mode_names[16][4] = { 1104 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1105 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1106 }; 1107 1108 return cpu_mode_names[psr & 0xf]; 1109 } 1110 1111 /** 1112 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1113 * 1114 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1115 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1116 * Must be called with the BQL held. 1117 */ 1118 void arm_cpu_update_virq(ARMCPU *cpu); 1119 1120 /** 1121 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1122 * 1123 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1124 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1125 * Must be called with the BQL held. 1126 */ 1127 void arm_cpu_update_vfiq(ARMCPU *cpu); 1128 1129 /** 1130 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1131 * 1132 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1133 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1134 * Must be called with the BQL held. 1135 */ 1136 void arm_cpu_update_vinmi(ARMCPU *cpu); 1137 1138 /** 1139 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1140 * 1141 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1142 * a change to the HCRX_EL2.VFNMI. 1143 * Must be called with the BQL held. 1144 */ 1145 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1146 1147 /** 1148 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1149 * 1150 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1151 * following a change to the HCR_EL2.VSE bit. 1152 */ 1153 void arm_cpu_update_vserr(ARMCPU *cpu); 1154 1155 /** 1156 * arm_mmu_idx_el: 1157 * @env: The cpu environment 1158 * @el: The EL to use. 1159 * 1160 * Return the full ARMMMUIdx for the translation regime for EL. 1161 */ 1162 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1163 1164 /** 1165 * arm_mmu_idx: 1166 * @env: The cpu environment 1167 * 1168 * Return the full ARMMMUIdx for the current translation regime. 1169 */ 1170 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1171 1172 /** 1173 * arm_stage1_mmu_idx: 1174 * @env: The cpu environment 1175 * 1176 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1177 */ 1178 #ifdef CONFIG_USER_ONLY 1179 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1180 { 1181 return ARMMMUIdx_Stage1_E0; 1182 } 1183 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1184 { 1185 return ARMMMUIdx_Stage1_E0; 1186 } 1187 #else 1188 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1189 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1190 #endif 1191 1192 /** 1193 * arm_mmu_idx_is_stage1_of_2: 1194 * @mmu_idx: The ARMMMUIdx to test 1195 * 1196 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1197 * first stage of a two stage regime. 1198 */ 1199 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1200 { 1201 switch (mmu_idx) { 1202 case ARMMMUIdx_Stage1_E0: 1203 case ARMMMUIdx_Stage1_E1: 1204 case ARMMMUIdx_Stage1_E1_PAN: 1205 return true; 1206 default: 1207 return false; 1208 } 1209 } 1210 1211 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1212 const ARMISARegisters *id) 1213 { 1214 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1215 1216 if ((features >> ARM_FEATURE_V4T) & 1) { 1217 valid |= CPSR_T; 1218 } 1219 if ((features >> ARM_FEATURE_V5) & 1) { 1220 valid |= CPSR_Q; /* V5TE in reality*/ 1221 } 1222 if ((features >> ARM_FEATURE_V6) & 1) { 1223 valid |= CPSR_E | CPSR_GE; 1224 } 1225 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1226 valid |= CPSR_IT; 1227 } 1228 if (isar_feature_aa32_jazelle(id)) { 1229 valid |= CPSR_J; 1230 } 1231 if (isar_feature_aa32_pan(id)) { 1232 valid |= CPSR_PAN; 1233 } 1234 if (isar_feature_aa32_dit(id)) { 1235 valid |= CPSR_DIT; 1236 } 1237 if (isar_feature_aa32_ssbs(id)) { 1238 valid |= CPSR_SSBS; 1239 } 1240 1241 return valid; 1242 } 1243 1244 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1245 { 1246 uint32_t valid; 1247 1248 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1249 if (isar_feature_aa64_bti(id)) { 1250 valid |= PSTATE_BTYPE; 1251 } 1252 if (isar_feature_aa64_pan(id)) { 1253 valid |= PSTATE_PAN; 1254 } 1255 if (isar_feature_aa64_uao(id)) { 1256 valid |= PSTATE_UAO; 1257 } 1258 if (isar_feature_aa64_dit(id)) { 1259 valid |= PSTATE_DIT; 1260 } 1261 if (isar_feature_aa64_ssbs(id)) { 1262 valid |= PSTATE_SSBS; 1263 } 1264 if (isar_feature_aa64_mte(id)) { 1265 valid |= PSTATE_TCO; 1266 } 1267 if (isar_feature_aa64_nmi(id)) { 1268 valid |= PSTATE_ALLINT; 1269 } 1270 1271 return valid; 1272 } 1273 1274 /* Granule size (i.e. page size) */ 1275 typedef enum ARMGranuleSize { 1276 /* Same order as TG0 encoding */ 1277 Gran4K, 1278 Gran64K, 1279 Gran16K, 1280 GranInvalid, 1281 } ARMGranuleSize; 1282 1283 /** 1284 * arm_granule_bits: Return address size of the granule in bits 1285 * 1286 * Return the address size of the granule in bits. This corresponds 1287 * to the pseudocode TGxGranuleBits(). 1288 */ 1289 static inline int arm_granule_bits(ARMGranuleSize gran) 1290 { 1291 switch (gran) { 1292 case Gran64K: 1293 return 16; 1294 case Gran16K: 1295 return 14; 1296 case Gran4K: 1297 return 12; 1298 default: 1299 g_assert_not_reached(); 1300 } 1301 } 1302 1303 /* 1304 * Parameters of a given virtual address, as extracted from the 1305 * translation control register (TCR) for a given regime. 1306 */ 1307 typedef struct ARMVAParameters { 1308 unsigned tsz : 8; 1309 unsigned ps : 3; 1310 unsigned sh : 2; 1311 unsigned select : 1; 1312 bool tbi : 1; 1313 bool epd : 1; 1314 bool hpd : 1; 1315 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1316 bool ds : 1; 1317 bool ha : 1; 1318 bool hd : 1; 1319 ARMGranuleSize gran : 2; 1320 } ARMVAParameters; 1321 1322 /** 1323 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1324 * @env: CPU 1325 * @va: virtual address to look up 1326 * @mmu_idx: determines translation regime to use 1327 * @data: true if this is a data access 1328 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1329 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1330 */ 1331 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1332 ARMMMUIdx mmu_idx, bool data, 1333 bool el1_is_aa32); 1334 1335 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1336 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1337 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1338 1339 /* Determine if allocation tags are available. */ 1340 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1341 uint64_t sctlr) 1342 { 1343 if (el < 3 1344 && arm_feature(env, ARM_FEATURE_EL3) 1345 && !(env->cp15.scr_el3 & SCR_ATA)) { 1346 return false; 1347 } 1348 if (el < 2 && arm_is_el2_enabled(env)) { 1349 uint64_t hcr = arm_hcr_el2_eff(env); 1350 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1351 return false; 1352 } 1353 } 1354 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1355 return sctlr != 0; 1356 } 1357 1358 #ifndef CONFIG_USER_ONLY 1359 1360 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1361 typedef struct V8M_SAttributes { 1362 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1363 bool ns; 1364 bool nsc; 1365 uint8_t sregion; 1366 bool srvalid; 1367 uint8_t iregion; 1368 bool irvalid; 1369 } V8M_SAttributes; 1370 1371 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1372 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1373 bool secure, V8M_SAttributes *sattrs); 1374 1375 /* Cacheability and shareability attributes for a memory access */ 1376 typedef struct ARMCacheAttrs { 1377 /* 1378 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1379 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1380 */ 1381 unsigned int attrs:8; 1382 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1383 bool is_s2_format:1; 1384 } ARMCacheAttrs; 1385 1386 /* Fields that are valid upon success. */ 1387 typedef struct GetPhysAddrResult { 1388 CPUTLBEntryFull f; 1389 ARMCacheAttrs cacheattrs; 1390 } GetPhysAddrResult; 1391 1392 /** 1393 * get_phys_addr: get the physical address for a virtual address 1394 * @env: CPUARMState 1395 * @address: virtual address to get physical address for 1396 * @access_type: 0 for read, 1 for write, 2 for execute 1397 * @mmu_idx: MMU index indicating required translation regime 1398 * @result: set on translation success. 1399 * @fi: set to fault info if the translation fails 1400 * 1401 * Find the physical address corresponding to the given virtual address, 1402 * by doing a translation table walk on MMU based systems or using the 1403 * MPU state on MPU based systems. 1404 * 1405 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1406 * prot and page_size may not be filled in, and the populated fsr value provides 1407 * information on why the translation aborted, in the format of a 1408 * DFSR/IFSR fault register, with the following caveats: 1409 * * we honour the short vs long DFSR format differences. 1410 * * the WnR bit is never set (the caller must do this). 1411 * * for PSMAv5 based systems we don't bother to return a full FSR format 1412 * value. 1413 */ 1414 bool get_phys_addr(CPUARMState *env, target_ulong address, 1415 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1416 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1417 __attribute__((nonnull)); 1418 1419 /** 1420 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1421 * address 1422 * @env: CPUARMState 1423 * @address: virtual address to get physical address for 1424 * @access_type: 0 for read, 1 for write, 2 for execute 1425 * @mmu_idx: MMU index indicating required translation regime 1426 * @space: security space for the access 1427 * @result: set on translation success. 1428 * @fi: set to fault info if the translation fails 1429 * 1430 * Similar to get_phys_addr, but use the given security space and don't perform 1431 * a Granule Protection Check on the resulting address. 1432 */ 1433 bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, 1434 MMUAccessType access_type, 1435 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1436 GetPhysAddrResult *result, 1437 ARMMMUFaultInfo *fi) 1438 __attribute__((nonnull)); 1439 1440 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1441 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1442 bool is_secure, GetPhysAddrResult *result, 1443 ARMMMUFaultInfo *fi, uint32_t *mregion); 1444 1445 void arm_log_exception(CPUState *cs); 1446 1447 #endif /* !CONFIG_USER_ONLY */ 1448 1449 /* 1450 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1451 * the same simd_desc() encoding due to restrictions on size. 1452 * Use these instead. 1453 */ 1454 FIELD(PREDDESC, OPRSZ, 0, 6) 1455 FIELD(PREDDESC, ESZ, 6, 2) 1456 FIELD(PREDDESC, DATA, 8, 24) 1457 1458 /* 1459 * The SVE simd_data field, for memory ops, contains either 1460 * rd (5 bits) or a shift count (2 bits). 1461 */ 1462 #define SVE_MTEDESC_SHIFT 5 1463 1464 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1465 FIELD(MTEDESC, MIDX, 0, 4) 1466 FIELD(MTEDESC, TBI, 4, 2) 1467 FIELD(MTEDESC, TCMA, 6, 2) 1468 FIELD(MTEDESC, WRITE, 8, 1) 1469 FIELD(MTEDESC, ALIGN, 9, 3) 1470 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1471 1472 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1473 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1474 1475 /** 1476 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1477 * @env: CPU env 1478 * @ptr: start address of memory region (dirty pointer) 1479 * @size: length of region (guaranteed not to cross a page boundary) 1480 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1481 * Returns: the size of the region that can be copied without hitting 1482 * an MTE tag failure 1483 * 1484 * Note that we assume that the caller has already checked the TBI 1485 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1486 * required. 1487 */ 1488 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1489 uint32_t desc); 1490 1491 /** 1492 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1493 * operation going in the reverse direction 1494 * @env: CPU env 1495 * @ptr: *end* address of memory region (dirty pointer) 1496 * @size: length of region (guaranteed not to cross a page boundary) 1497 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1498 * Returns: the size of the region that can be copied without hitting 1499 * an MTE tag failure 1500 * 1501 * Note that we assume that the caller has already checked the TBI 1502 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1503 * required. 1504 */ 1505 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1506 uint32_t desc); 1507 1508 /** 1509 * mte_check_fail: Record an MTE tag check failure 1510 * @env: CPU env 1511 * @desc: MTEDESC descriptor word 1512 * @dirty_ptr: Failing dirty address 1513 * @ra: TCG retaddr 1514 * 1515 * This may never return (if the MTE tag checks are configured to fault). 1516 */ 1517 void mte_check_fail(CPUARMState *env, uint32_t desc, 1518 uint64_t dirty_ptr, uintptr_t ra); 1519 1520 /** 1521 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1522 * @env: CPU env 1523 * @dirty_ptr: Start address of memory region (dirty pointer) 1524 * @size: length of region (guaranteed not to cross page boundary) 1525 * @desc: MTEDESC descriptor word 1526 */ 1527 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1528 uint32_t desc); 1529 1530 static inline int allocation_tag_from_addr(uint64_t ptr) 1531 { 1532 return extract64(ptr, 56, 4); 1533 } 1534 1535 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1536 { 1537 return deposit64(ptr, 56, 4, rtag); 1538 } 1539 1540 /* Return true if tbi bits mean that the access is checked. */ 1541 static inline bool tbi_check(uint32_t desc, int bit55) 1542 { 1543 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1544 } 1545 1546 /* Return true if tcma bits mean that the access is unchecked. */ 1547 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1548 { 1549 /* 1550 * We had extracted bit55 and ptr_tag for other reasons, so fold 1551 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1552 */ 1553 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1554 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1555 return tcma && match; 1556 } 1557 1558 /* 1559 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1560 * for the tag to be present in the FAR_ELx register. But for user-only 1561 * mode, we do not have a TLB with which to implement this, so we must 1562 * remove the top byte. 1563 */ 1564 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1565 { 1566 #ifdef CONFIG_USER_ONLY 1567 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1568 ptr &= sextract64(ptr, 0, 56); 1569 #endif 1570 return ptr; 1571 } 1572 1573 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1574 { 1575 #ifdef CONFIG_USER_ONLY 1576 int64_t clean_ptr = sextract64(ptr, 0, 56); 1577 if (tbi_check(desc, clean_ptr < 0)) { 1578 ptr = clean_ptr; 1579 } 1580 #endif 1581 return ptr; 1582 } 1583 1584 /* Values for M-profile PSR.ECI for MVE insns */ 1585 enum MVEECIState { 1586 ECI_NONE = 0, /* No completed beats */ 1587 ECI_A0 = 1, /* Completed: A0 */ 1588 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1589 /* 3 is reserved */ 1590 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1591 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1592 /* All other values reserved */ 1593 }; 1594 1595 /* Definitions for the PMU registers */ 1596 #define PMCRN_MASK 0xf800 1597 #define PMCRN_SHIFT 11 1598 #define PMCRLP 0x80 1599 #define PMCRLC 0x40 1600 #define PMCRDP 0x20 1601 #define PMCRX 0x10 1602 #define PMCRD 0x8 1603 #define PMCRC 0x4 1604 #define PMCRP 0x2 1605 #define PMCRE 0x1 1606 /* 1607 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1608 * which can be written as 1 to trigger behaviour but which stay RAZ). 1609 */ 1610 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1611 1612 #define PMXEVTYPER_P 0x80000000 1613 #define PMXEVTYPER_U 0x40000000 1614 #define PMXEVTYPER_NSK 0x20000000 1615 #define PMXEVTYPER_NSU 0x10000000 1616 #define PMXEVTYPER_NSH 0x08000000 1617 #define PMXEVTYPER_M 0x04000000 1618 #define PMXEVTYPER_MT 0x02000000 1619 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1620 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1621 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1622 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1623 PMXEVTYPER_EVTCOUNT) 1624 1625 #define PMCCFILTR 0xf8000000 1626 #define PMCCFILTR_M PMXEVTYPER_M 1627 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1628 1629 static inline uint32_t pmu_num_counters(CPUARMState *env) 1630 { 1631 ARMCPU *cpu = env_archcpu(env); 1632 1633 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1634 } 1635 1636 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1637 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1638 { 1639 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1640 } 1641 1642 #ifdef TARGET_AARCH64 1643 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1644 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1645 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1646 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1647 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1648 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1649 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1650 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1651 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1652 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1653 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1654 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1655 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1656 void aarch64_max_tcg_initfn(Object *obj); 1657 void aarch64_add_pauth_properties(Object *obj); 1658 void aarch64_add_sve_properties(Object *obj); 1659 void aarch64_add_sme_properties(Object *obj); 1660 #endif 1661 1662 /* Read the CONTROL register as the MRS instruction would. */ 1663 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1664 1665 /* 1666 * Return a pointer to the location where we currently store the 1667 * stack pointer for the requested security state and thread mode. 1668 * This pointer will become invalid if the CPU state is updated 1669 * such that the stack pointers are switched around (eg changing 1670 * the SPSEL control bit). 1671 */ 1672 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1673 bool threadmode, bool spsel); 1674 1675 bool el_is_in_host(CPUARMState *env, int el); 1676 1677 void aa32_max_features(ARMCPU *cpu); 1678 int exception_target_el(CPUARMState *env); 1679 bool arm_singlestep_active(CPUARMState *env); 1680 bool arm_generate_debug_exceptions(CPUARMState *env); 1681 1682 /** 1683 * pauth_ptr_mask: 1684 * @param: parameters defining the MMU setup 1685 * 1686 * Return a mask of the address bits that contain the authentication code, 1687 * given the MMU config defined by @param. 1688 */ 1689 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1690 { 1691 int bot_pac_bit = 64 - param.tsz; 1692 int top_pac_bit = 64 - 8 * param.tbi; 1693 1694 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1695 } 1696 1697 /* Add the cpreg definitions for debug related system registers */ 1698 void define_debug_regs(ARMCPU *cpu); 1699 1700 /* Effective value of MDCR_EL2 */ 1701 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1702 { 1703 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1704 } 1705 1706 /* Powers of 2 for sve_vq_map et al. */ 1707 #define SVE_VQ_POW2_MAP \ 1708 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1709 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1710 1711 /* 1712 * Return true if it is possible to take a fine-grained-trap to EL2. 1713 */ 1714 static inline bool arm_fgt_active(CPUARMState *env, int el) 1715 { 1716 /* 1717 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1718 * that can affect EL0, but it is harmless to do the test also for 1719 * traps on registers that are only accessible at EL1 because if the test 1720 * returns true then we can't be executing at EL1 anyway. 1721 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1722 * traps from AArch32 only happen for the EL0 is AArch32 case. 1723 */ 1724 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1725 el < 2 && arm_is_el2_enabled(env) && 1726 arm_el_is_aa64(env, 1) && 1727 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1728 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1729 } 1730 1731 void assert_hflags_rebuild_correctly(CPUARMState *env); 1732 1733 /* 1734 * Although the ARM implementation of hardware assisted debugging 1735 * allows for different breakpoints per-core, the current GDB 1736 * interface treats them as a global pool of registers (which seems to 1737 * be the case for x86, ppc and s390). As a result we store one copy 1738 * of registers which is used for all active cores. 1739 * 1740 * Write access is serialised by virtue of the GDB protocol which 1741 * updates things. Read access (i.e. when the values are copied to the 1742 * vCPU) is also gated by GDB's run control. 1743 * 1744 * This is not unreasonable as most of the time debugging kernels you 1745 * never know which core will eventually execute your function. 1746 */ 1747 1748 typedef struct { 1749 uint64_t bcr; 1750 uint64_t bvr; 1751 } HWBreakpoint; 1752 1753 /* 1754 * The watchpoint registers can cover more area than the requested 1755 * watchpoint so we need to store the additional information 1756 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1757 * when the watchpoint is hit. 1758 */ 1759 typedef struct { 1760 uint64_t wcr; 1761 uint64_t wvr; 1762 CPUWatchpoint details; 1763 } HWWatchpoint; 1764 1765 /* Maximum and current break/watch point counts */ 1766 extern int max_hw_bps, max_hw_wps; 1767 extern GArray *hw_breakpoints, *hw_watchpoints; 1768 1769 #define cur_hw_wps (hw_watchpoints->len) 1770 #define cur_hw_bps (hw_breakpoints->len) 1771 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1772 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1773 1774 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1775 int insert_hw_breakpoint(target_ulong pc); 1776 int delete_hw_breakpoint(target_ulong pc); 1777 1778 bool check_watchpoint_in_range(int i, target_ulong addr); 1779 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1780 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1781 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1782 1783 /* Return the current value of the system counter in ticks */ 1784 uint64_t gt_get_countervalue(CPUARMState *env); 1785 /* 1786 * Return the currently applicable offset between the system counter 1787 * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2). 1788 */ 1789 uint64_t gt_virt_cnt_offset(CPUARMState *env); 1790 #endif 1791