1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/breakpoint.h" 29 #include "hw/registerfields.h" 30 #include "tcg/tcg-gvec-desc.h" 31 #include "syndrome.h" 32 #include "cpu-features.h" 33 34 /* register banks for CPU modes */ 35 #define BANK_USRSYS 0 36 #define BANK_SVC 1 37 #define BANK_ABT 2 38 #define BANK_UND 3 39 #define BANK_IRQ 4 40 #define BANK_FIQ 5 41 #define BANK_HYP 6 42 #define BANK_MON 7 43 44 static inline int arm_env_mmu_index(CPUARMState *env) 45 { 46 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 47 } 48 49 static inline bool excp_is_internal(int excp) 50 { 51 /* Return true if this exception number represents a QEMU-internal 52 * exception that will not be passed to the guest. 53 */ 54 return excp == EXCP_INTERRUPT 55 || excp == EXCP_HLT 56 || excp == EXCP_DEBUG 57 || excp == EXCP_HALTED 58 || excp == EXCP_EXCEPTION_EXIT 59 || excp == EXCP_KERNEL_TRAP 60 || excp == EXCP_SEMIHOST; 61 } 62 63 /* 64 * Default frequency for the generic timer, in Hz. 65 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 66 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 67 * which gives a 16ns tick period. 68 * 69 * We will use the back-compat value: 70 * - for QEMU CPU types added before we standardized on 1GHz 71 * - for versioned machine types with a version of 9.0 or earlier 72 * In any case, the machine model may override via the cntfrq property. 73 */ 74 #define GTIMER_DEFAULT_HZ 1000000000 75 #define GTIMER_BACKCOMPAT_HZ 62500000 76 77 /* Bit definitions for the v7M CONTROL register */ 78 FIELD(V7M_CONTROL, NPRIV, 0, 1) 79 FIELD(V7M_CONTROL, SPSEL, 1, 1) 80 FIELD(V7M_CONTROL, FPCA, 2, 1) 81 FIELD(V7M_CONTROL, SFPA, 3, 1) 82 83 /* Bit definitions for v7M exception return payload */ 84 FIELD(V7M_EXCRET, ES, 0, 1) 85 FIELD(V7M_EXCRET, RES0, 1, 1) 86 FIELD(V7M_EXCRET, SPSEL, 2, 1) 87 FIELD(V7M_EXCRET, MODE, 3, 1) 88 FIELD(V7M_EXCRET, FTYPE, 4, 1) 89 FIELD(V7M_EXCRET, DCRS, 5, 1) 90 FIELD(V7M_EXCRET, S, 6, 1) 91 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 92 93 /* Minimum value which is a magic number for exception return */ 94 #define EXC_RETURN_MIN_MAGIC 0xff000000 95 /* Minimum number which is a magic number for function or exception return 96 * when using v8M security extension 97 */ 98 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 99 100 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 101 FIELD(DBGWCR, E, 0, 1) 102 FIELD(DBGWCR, PAC, 1, 2) 103 FIELD(DBGWCR, LSC, 3, 2) 104 FIELD(DBGWCR, BAS, 5, 8) 105 FIELD(DBGWCR, HMC, 13, 1) 106 FIELD(DBGWCR, SSC, 14, 2) 107 FIELD(DBGWCR, LBN, 16, 4) 108 FIELD(DBGWCR, WT, 20, 1) 109 FIELD(DBGWCR, MASK, 24, 5) 110 FIELD(DBGWCR, SSCE, 29, 1) 111 112 #define VTCR_NSW (1u << 29) 113 #define VTCR_NSA (1u << 30) 114 #define VSTCR_SW VTCR_NSW 115 #define VSTCR_SA VTCR_NSA 116 117 /* Bit definitions for CPACR (AArch32 only) */ 118 FIELD(CPACR, CP10, 20, 2) 119 FIELD(CPACR, CP11, 22, 2) 120 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 121 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 122 FIELD(CPACR, ASEDIS, 31, 1) 123 124 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 125 FIELD(CPACR_EL1, ZEN, 16, 2) 126 FIELD(CPACR_EL1, FPEN, 20, 2) 127 FIELD(CPACR_EL1, SMEN, 24, 2) 128 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 129 130 /* Bit definitions for HCPTR (AArch32 only) */ 131 FIELD(HCPTR, TCP10, 10, 1) 132 FIELD(HCPTR, TCP11, 11, 1) 133 FIELD(HCPTR, TASE, 15, 1) 134 FIELD(HCPTR, TTA, 20, 1) 135 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 136 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 137 138 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 139 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 140 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 141 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 142 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 143 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 144 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 145 FIELD(CPTR_EL2, TTA, 28, 1) 146 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 147 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 148 149 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 150 FIELD(CPTR_EL3, EZ, 8, 1) 151 FIELD(CPTR_EL3, TFP, 10, 1) 152 FIELD(CPTR_EL3, ESM, 12, 1) 153 FIELD(CPTR_EL3, TTA, 20, 1) 154 FIELD(CPTR_EL3, TAM, 30, 1) 155 FIELD(CPTR_EL3, TCPAC, 31, 1) 156 157 #define MDCR_MTPME (1U << 28) 158 #define MDCR_TDCC (1U << 27) 159 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 160 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 161 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 162 #define MDCR_EPMAD (1U << 21) 163 #define MDCR_EDAD (1U << 20) 164 #define MDCR_TTRF (1U << 19) 165 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 166 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 167 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 168 #define MDCR_SDD (1U << 16) 169 #define MDCR_SPD (3U << 14) 170 #define MDCR_TDRA (1U << 11) 171 #define MDCR_TDOSA (1U << 10) 172 #define MDCR_TDA (1U << 9) 173 #define MDCR_TDE (1U << 8) 174 #define MDCR_HPME (1U << 7) 175 #define MDCR_TPM (1U << 6) 176 #define MDCR_TPMCR (1U << 5) 177 #define MDCR_HPMN (0x1fU) 178 179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 181 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 182 MDCR_STE | MDCR_SPME | MDCR_SPD) 183 184 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 185 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 186 #define TTBCR_PD0 (1U << 4) 187 #define TTBCR_PD1 (1U << 5) 188 #define TTBCR_EPD0 (1U << 7) 189 #define TTBCR_IRGN0 (3U << 8) 190 #define TTBCR_ORGN0 (3U << 10) 191 #define TTBCR_SH0 (3U << 12) 192 #define TTBCR_T1SZ (3U << 16) 193 #define TTBCR_A1 (1U << 22) 194 #define TTBCR_EPD1 (1U << 23) 195 #define TTBCR_IRGN1 (3U << 24) 196 #define TTBCR_ORGN1 (3U << 26) 197 #define TTBCR_SH1 (1U << 28) 198 #define TTBCR_EAE (1U << 31) 199 200 FIELD(VTCR, T0SZ, 0, 6) 201 FIELD(VTCR, SL0, 6, 2) 202 FIELD(VTCR, IRGN0, 8, 2) 203 FIELD(VTCR, ORGN0, 10, 2) 204 FIELD(VTCR, SH0, 12, 2) 205 FIELD(VTCR, TG0, 14, 2) 206 FIELD(VTCR, PS, 16, 3) 207 FIELD(VTCR, VS, 19, 1) 208 FIELD(VTCR, HA, 21, 1) 209 FIELD(VTCR, HD, 22, 1) 210 FIELD(VTCR, HWU59, 25, 1) 211 FIELD(VTCR, HWU60, 26, 1) 212 FIELD(VTCR, HWU61, 27, 1) 213 FIELD(VTCR, HWU62, 28, 1) 214 FIELD(VTCR, NSW, 29, 1) 215 FIELD(VTCR, NSA, 30, 1) 216 FIELD(VTCR, DS, 32, 1) 217 FIELD(VTCR, SL2, 33, 1) 218 219 #define HCRX_ENAS0 (1ULL << 0) 220 #define HCRX_ENALS (1ULL << 1) 221 #define HCRX_ENASR (1ULL << 2) 222 #define HCRX_FNXS (1ULL << 3) 223 #define HCRX_FGTNXS (1ULL << 4) 224 #define HCRX_SMPME (1ULL << 5) 225 #define HCRX_TALLINT (1ULL << 6) 226 #define HCRX_VINMI (1ULL << 7) 227 #define HCRX_VFNMI (1ULL << 8) 228 #define HCRX_CMOW (1ULL << 9) 229 #define HCRX_MCE2 (1ULL << 10) 230 #define HCRX_MSCEN (1ULL << 11) 231 232 #define HPFAR_NS (1ULL << 63) 233 234 #define HSTR_TTEE (1 << 16) 235 #define HSTR_TJDBX (1 << 17) 236 237 /* 238 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 239 * have different bit definitions, and EL1PCTEN might be 240 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 241 * disambiguate if necessary. 242 */ 243 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 244 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 245 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 246 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 247 FIELD(CNTHCTL, EVNTEN, 2, 1) 248 FIELD(CNTHCTL, EVNTDIR, 3, 1) 249 FIELD(CNTHCTL, EVNTI, 4, 4) 250 FIELD(CNTHCTL, EL0VTEN, 8, 1) 251 FIELD(CNTHCTL, EL0PTEN, 9, 1) 252 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 253 FIELD(CNTHCTL, EL1PTEN, 11, 1) 254 FIELD(CNTHCTL, ECV, 12, 1) 255 FIELD(CNTHCTL, EL1TVT, 13, 1) 256 FIELD(CNTHCTL, EL1TVCT, 14, 1) 257 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 258 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 259 FIELD(CNTHCTL, EVNTIS, 17, 1) 260 FIELD(CNTHCTL, CNTVMASK, 18, 1) 261 FIELD(CNTHCTL, CNTPMASK, 19, 1) 262 263 /* We use a few fake FSR values for internal purposes in M profile. 264 * M profile cores don't have A/R format FSRs, but currently our 265 * get_phys_addr() code assumes A/R profile and reports failures via 266 * an A/R format FSR value. We then translate that into the proper 267 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 268 * Mostly the FSR values we use for this are those defined for v7PMSA, 269 * since we share some of that codepath. A few kinds of fault are 270 * only for M profile and have no A/R equivalent, though, so we have 271 * to pick a value from the reserved range (which we never otherwise 272 * generate) to use for these. 273 * These values will never be visible to the guest. 274 */ 275 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 276 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 277 278 /** 279 * arm_aa32_secure_pl1_0(): Return true if in Secure PL1&0 regime 280 * 281 * Return true if the CPU is in the Secure PL1&0 translation regime. 282 * This requires that EL3 exists and is AArch32 and we are currently 283 * Secure. If this is the case then the ARMMMUIdx_E10* apply and 284 * mean we are in EL3, not EL1. 285 */ 286 static inline bool arm_aa32_secure_pl1_0(CPUARMState *env) 287 { 288 return arm_feature(env, ARM_FEATURE_EL3) && 289 !arm_el_is_aa64(env, 3) && arm_is_secure(env); 290 } 291 292 /** 293 * raise_exception: Raise the specified exception. 294 * Raise a guest exception with the specified value, syndrome register 295 * and target exception level. This should be called from helper functions, 296 * and never returns because we will longjump back up to the CPU main loop. 297 */ 298 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 299 uint32_t syndrome, uint32_t target_el); 300 301 /* 302 * Similarly, but also use unwinding to restore cpu state. 303 */ 304 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 305 uint32_t syndrome, uint32_t target_el, 306 uintptr_t ra); 307 308 /* 309 * For AArch64, map a given EL to an index in the banked_spsr array. 310 * Note that this mapping and the AArch32 mapping defined in bank_number() 311 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 312 * mandated mapping between each other. 313 */ 314 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 315 { 316 static const unsigned int map[4] = { 317 [1] = BANK_SVC, /* EL1. */ 318 [2] = BANK_HYP, /* EL2. */ 319 [3] = BANK_MON, /* EL3. */ 320 }; 321 assert(el >= 1 && el <= 3); 322 return map[el]; 323 } 324 325 /* Map CPU modes onto saved register banks. */ 326 static inline int bank_number(int mode) 327 { 328 switch (mode) { 329 case ARM_CPU_MODE_USR: 330 case ARM_CPU_MODE_SYS: 331 return BANK_USRSYS; 332 case ARM_CPU_MODE_SVC: 333 return BANK_SVC; 334 case ARM_CPU_MODE_ABT: 335 return BANK_ABT; 336 case ARM_CPU_MODE_UND: 337 return BANK_UND; 338 case ARM_CPU_MODE_IRQ: 339 return BANK_IRQ; 340 case ARM_CPU_MODE_FIQ: 341 return BANK_FIQ; 342 case ARM_CPU_MODE_HYP: 343 return BANK_HYP; 344 case ARM_CPU_MODE_MON: 345 return BANK_MON; 346 } 347 g_assert_not_reached(); 348 } 349 350 /** 351 * r14_bank_number: Map CPU mode onto register bank for r14 352 * 353 * Given an AArch32 CPU mode, return the index into the saved register 354 * banks to use for the R14 (LR) in that mode. This is the same as 355 * bank_number(), except for the special case of Hyp mode, where 356 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 357 * This should be used as the index into env->banked_r14[], and 358 * bank_number() used for the index into env->banked_r13[] and 359 * env->banked_spsr[]. 360 */ 361 static inline int r14_bank_number(int mode) 362 { 363 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 364 } 365 366 void arm_cpu_register(const ARMCPUInfo *info); 367 void aarch64_cpu_register(const ARMCPUInfo *info); 368 369 void register_cp_regs_for_features(ARMCPU *cpu); 370 void init_cpreg_list(ARMCPU *cpu); 371 372 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 373 void arm_translate_init(void); 374 375 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 376 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 377 GPtrArray *, GPtrArray *); 378 379 void arm_restore_state_to_opc(CPUState *cs, 380 const TranslationBlock *tb, 381 const uint64_t *data); 382 383 #ifdef CONFIG_TCG 384 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 385 386 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 387 bool arm_cpu_exec_halt(CPUState *cs); 388 #endif /* CONFIG_TCG */ 389 390 typedef enum ARMFPRounding { 391 FPROUNDING_TIEEVEN, 392 FPROUNDING_POSINF, 393 FPROUNDING_NEGINF, 394 FPROUNDING_ZERO, 395 FPROUNDING_TIEAWAY, 396 FPROUNDING_ODD 397 } ARMFPRounding; 398 399 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 400 401 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 402 { 403 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 404 return arm_rmode_to_sf_map[rmode]; 405 } 406 407 static inline void aarch64_save_sp(CPUARMState *env, int el) 408 { 409 if (env->pstate & PSTATE_SP) { 410 env->sp_el[el] = env->xregs[31]; 411 } else { 412 env->sp_el[0] = env->xregs[31]; 413 } 414 } 415 416 static inline void aarch64_restore_sp(CPUARMState *env, int el) 417 { 418 if (env->pstate & PSTATE_SP) { 419 env->xregs[31] = env->sp_el[el]; 420 } else { 421 env->xregs[31] = env->sp_el[0]; 422 } 423 } 424 425 static inline void update_spsel(CPUARMState *env, uint32_t imm) 426 { 427 unsigned int cur_el = arm_current_el(env); 428 /* Update PSTATE SPSel bit; this requires us to update the 429 * working stack pointer in xregs[31]. 430 */ 431 if (!((imm ^ env->pstate) & PSTATE_SP)) { 432 return; 433 } 434 aarch64_save_sp(env, cur_el); 435 env->pstate = deposit32(env->pstate, 0, 1, imm); 436 437 /* We rely on illegal updates to SPsel from EL0 to get trapped 438 * at translation time. 439 */ 440 assert(cur_el >= 1 && cur_el <= 3); 441 aarch64_restore_sp(env, cur_el); 442 } 443 444 /* 445 * arm_pamax 446 * @cpu: ARMCPU 447 * 448 * Returns the implementation defined bit-width of physical addresses. 449 * The ARMv8 reference manuals refer to this as PAMax(). 450 */ 451 unsigned int arm_pamax(ARMCPU *cpu); 452 453 /* Return true if extended addresses are enabled. 454 * This is always the case if our translation regime is 64 bit, 455 * but depends on TTBCR.EAE for 32 bit. 456 */ 457 static inline bool extended_addresses_enabled(CPUARMState *env) 458 { 459 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 460 if (arm_feature(env, ARM_FEATURE_PMSA) && 461 arm_feature(env, ARM_FEATURE_V8)) { 462 return true; 463 } 464 return arm_el_is_aa64(env, 1) || 465 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 466 } 467 468 /* Update a QEMU watchpoint based on the information the guest has set in the 469 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 470 */ 471 void hw_watchpoint_update(ARMCPU *cpu, int n); 472 /* Update the QEMU watchpoints for every guest watchpoint. This does a 473 * complete delete-and-reinstate of the QEMU watchpoint list and so is 474 * suitable for use after migration or on reset. 475 */ 476 void hw_watchpoint_update_all(ARMCPU *cpu); 477 /* Update a QEMU breakpoint based on the information the guest has set in the 478 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 479 */ 480 void hw_breakpoint_update(ARMCPU *cpu, int n); 481 /* Update the QEMU breakpoints for every guest breakpoint. This does a 482 * complete delete-and-reinstate of the QEMU breakpoint list and so is 483 * suitable for use after migration or on reset. 484 */ 485 void hw_breakpoint_update_all(ARMCPU *cpu); 486 487 /* Callback function for checking if a breakpoint should trigger. */ 488 bool arm_debug_check_breakpoint(CPUState *cs); 489 490 /* Callback function for checking if a watchpoint should trigger. */ 491 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 492 493 /* Adjust addresses (in BE32 mode) before testing against watchpoint 494 * addresses. 495 */ 496 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 497 498 /* Callback function for when a watchpoint or breakpoint triggers. */ 499 void arm_debug_excp_handler(CPUState *cs); 500 501 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 502 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 503 { 504 return false; 505 } 506 static inline void arm_handle_psci_call(ARMCPU *cpu) 507 { 508 g_assert_not_reached(); 509 } 510 #else 511 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 512 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 513 /* Actually handle a PSCI call */ 514 void arm_handle_psci_call(ARMCPU *cpu); 515 #endif 516 517 /** 518 * arm_clear_exclusive: clear the exclusive monitor 519 * @env: CPU env 520 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 521 */ 522 static inline void arm_clear_exclusive(CPUARMState *env) 523 { 524 env->exclusive_addr = -1; 525 } 526 527 /** 528 * ARMFaultType: type of an ARM MMU fault 529 * This corresponds to the v8A pseudocode's Fault enumeration, 530 * with extensions for QEMU internal conditions. 531 */ 532 typedef enum ARMFaultType { 533 ARMFault_None, 534 ARMFault_AccessFlag, 535 ARMFault_Alignment, 536 ARMFault_Background, 537 ARMFault_Domain, 538 ARMFault_Permission, 539 ARMFault_Translation, 540 ARMFault_AddressSize, 541 ARMFault_SyncExternal, 542 ARMFault_SyncExternalOnWalk, 543 ARMFault_SyncParity, 544 ARMFault_SyncParityOnWalk, 545 ARMFault_AsyncParity, 546 ARMFault_AsyncExternal, 547 ARMFault_Debug, 548 ARMFault_TLBConflict, 549 ARMFault_UnsuppAtomicUpdate, 550 ARMFault_Lockdown, 551 ARMFault_Exclusive, 552 ARMFault_ICacheMaint, 553 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 554 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 555 ARMFault_GPCFOnWalk, 556 ARMFault_GPCFOnOutput, 557 } ARMFaultType; 558 559 typedef enum ARMGPCF { 560 GPCF_None, 561 GPCF_AddressSize, 562 GPCF_Walk, 563 GPCF_EABT, 564 GPCF_Fail, 565 } ARMGPCF; 566 567 /** 568 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 569 * @type: Type of fault 570 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 571 * @level: Table walk level (for translation, access flag and permission faults) 572 * @domain: Domain of the fault address (for non-LPAE CPUs only) 573 * @s2addr: Address that caused a fault at stage 2 574 * @paddr: physical address that caused a fault for gpc 575 * @paddr_space: physical address space that caused a fault for gpc 576 * @stage2: True if we faulted at stage 2 577 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 578 * @s1ns: True if we faulted on a non-secure IPA while in secure state 579 * @ea: True if we should set the EA (external abort type) bit in syndrome 580 */ 581 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 582 struct ARMMMUFaultInfo { 583 ARMFaultType type; 584 ARMGPCF gpcf; 585 target_ulong s2addr; 586 target_ulong paddr; 587 ARMSecuritySpace paddr_space; 588 int level; 589 int domain; 590 bool stage2; 591 bool s1ptw; 592 bool s1ns; 593 bool ea; 594 }; 595 596 /** 597 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 598 * Compare pseudocode EncodeSDFSC(), though unlike that function 599 * we set up a whole FSR-format code including domain field and 600 * putting the high bit of the FSC into bit 10. 601 */ 602 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 603 { 604 uint32_t fsc; 605 606 switch (fi->type) { 607 case ARMFault_None: 608 return 0; 609 case ARMFault_AccessFlag: 610 fsc = fi->level == 1 ? 0x3 : 0x6; 611 break; 612 case ARMFault_Alignment: 613 fsc = 0x1; 614 break; 615 case ARMFault_Permission: 616 fsc = fi->level == 1 ? 0xd : 0xf; 617 break; 618 case ARMFault_Domain: 619 fsc = fi->level == 1 ? 0x9 : 0xb; 620 break; 621 case ARMFault_Translation: 622 fsc = fi->level == 1 ? 0x5 : 0x7; 623 break; 624 case ARMFault_SyncExternal: 625 fsc = 0x8 | (fi->ea << 12); 626 break; 627 case ARMFault_SyncExternalOnWalk: 628 fsc = fi->level == 1 ? 0xc : 0xe; 629 fsc |= (fi->ea << 12); 630 break; 631 case ARMFault_SyncParity: 632 fsc = 0x409; 633 break; 634 case ARMFault_SyncParityOnWalk: 635 fsc = fi->level == 1 ? 0x40c : 0x40e; 636 break; 637 case ARMFault_AsyncParity: 638 fsc = 0x408; 639 break; 640 case ARMFault_AsyncExternal: 641 fsc = 0x406 | (fi->ea << 12); 642 break; 643 case ARMFault_Debug: 644 fsc = 0x2; 645 break; 646 case ARMFault_TLBConflict: 647 fsc = 0x400; 648 break; 649 case ARMFault_Lockdown: 650 fsc = 0x404; 651 break; 652 case ARMFault_Exclusive: 653 fsc = 0x405; 654 break; 655 case ARMFault_ICacheMaint: 656 fsc = 0x4; 657 break; 658 case ARMFault_Background: 659 fsc = 0x0; 660 break; 661 case ARMFault_QEMU_NSCExec: 662 fsc = M_FAKE_FSR_NSC_EXEC; 663 break; 664 case ARMFault_QEMU_SFault: 665 fsc = M_FAKE_FSR_SFAULT; 666 break; 667 default: 668 /* Other faults can't occur in a context that requires a 669 * short-format status code. 670 */ 671 g_assert_not_reached(); 672 } 673 674 fsc |= (fi->domain << 4); 675 return fsc; 676 } 677 678 /** 679 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 680 * Compare pseudocode EncodeLDFSC(), though unlike that function 681 * we fill in also the LPAE bit 9 of a DFSR format. 682 */ 683 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 684 { 685 uint32_t fsc; 686 687 switch (fi->type) { 688 case ARMFault_None: 689 return 0; 690 case ARMFault_AddressSize: 691 assert(fi->level >= -1 && fi->level <= 3); 692 if (fi->level < 0) { 693 fsc = 0b101001; 694 } else { 695 fsc = fi->level; 696 } 697 break; 698 case ARMFault_AccessFlag: 699 assert(fi->level >= 0 && fi->level <= 3); 700 fsc = 0b001000 | fi->level; 701 break; 702 case ARMFault_Permission: 703 assert(fi->level >= 0 && fi->level <= 3); 704 fsc = 0b001100 | fi->level; 705 break; 706 case ARMFault_Translation: 707 assert(fi->level >= -1 && fi->level <= 3); 708 if (fi->level < 0) { 709 fsc = 0b101011; 710 } else { 711 fsc = 0b000100 | fi->level; 712 } 713 break; 714 case ARMFault_SyncExternal: 715 fsc = 0x10 | (fi->ea << 12); 716 break; 717 case ARMFault_SyncExternalOnWalk: 718 assert(fi->level >= -1 && fi->level <= 3); 719 if (fi->level < 0) { 720 fsc = 0b010011; 721 } else { 722 fsc = 0b010100 | fi->level; 723 } 724 fsc |= fi->ea << 12; 725 break; 726 case ARMFault_SyncParity: 727 fsc = 0x18; 728 break; 729 case ARMFault_SyncParityOnWalk: 730 assert(fi->level >= -1 && fi->level <= 3); 731 if (fi->level < 0) { 732 fsc = 0b011011; 733 } else { 734 fsc = 0b011100 | fi->level; 735 } 736 break; 737 case ARMFault_AsyncParity: 738 fsc = 0x19; 739 break; 740 case ARMFault_AsyncExternal: 741 fsc = 0x11 | (fi->ea << 12); 742 break; 743 case ARMFault_Alignment: 744 fsc = 0x21; 745 break; 746 case ARMFault_Debug: 747 fsc = 0x22; 748 break; 749 case ARMFault_TLBConflict: 750 fsc = 0x30; 751 break; 752 case ARMFault_UnsuppAtomicUpdate: 753 fsc = 0x31; 754 break; 755 case ARMFault_Lockdown: 756 fsc = 0x34; 757 break; 758 case ARMFault_Exclusive: 759 fsc = 0x35; 760 break; 761 case ARMFault_GPCFOnWalk: 762 assert(fi->level >= -1 && fi->level <= 3); 763 if (fi->level < 0) { 764 fsc = 0b100011; 765 } else { 766 fsc = 0b100100 | fi->level; 767 } 768 break; 769 case ARMFault_GPCFOnOutput: 770 fsc = 0b101000; 771 break; 772 default: 773 /* Other faults can't occur in a context that requires a 774 * long-format status code. 775 */ 776 g_assert_not_reached(); 777 } 778 779 fsc |= 1 << 9; 780 return fsc; 781 } 782 783 static inline bool arm_extabort_type(MemTxResult result) 784 { 785 /* The EA bit in syndromes and fault status registers is an 786 * IMPDEF classification of external aborts. ARM implementations 787 * usually use this to indicate AXI bus Decode error (0) or 788 * Slave error (1); in QEMU we follow that. 789 */ 790 return result != MEMTX_DECODE_ERROR; 791 } 792 793 #ifdef CONFIG_USER_ONLY 794 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 795 MMUAccessType access_type, 796 bool maperr, uintptr_t ra); 797 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 798 MMUAccessType access_type, uintptr_t ra); 799 #else 800 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 801 MMUAccessType access_type, int mmu_idx, 802 bool probe, uintptr_t retaddr); 803 #endif 804 805 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 806 { 807 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 808 } 809 810 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 811 { 812 if (arm_feature(env, ARM_FEATURE_M)) { 813 return mmu_idx | ARM_MMU_IDX_M; 814 } else { 815 return mmu_idx | ARM_MMU_IDX_A; 816 } 817 } 818 819 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 820 { 821 /* AArch64 is always a-profile. */ 822 return mmu_idx | ARM_MMU_IDX_A; 823 } 824 825 /** 826 * Return the exception level we're running at if our current MMU index 827 * is @mmu_idx. @s_pl1_0 should be true if this is the AArch32 828 * Secure PL1&0 translation regime. 829 */ 830 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0); 831 832 /* Return the MMU index for a v7M CPU in the specified security state */ 833 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 834 835 /* 836 * Return true if the stage 1 translation regime is using LPAE 837 * format page tables 838 */ 839 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 840 841 /* Raise a data fault alignment exception for the specified virtual address */ 842 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 843 MMUAccessType access_type, 844 int mmu_idx, uintptr_t retaddr); 845 846 #ifndef CONFIG_USER_ONLY 847 /* arm_cpu_do_transaction_failed: handle a memory system error response 848 * (eg "no device/memory present at address") by raising an external abort 849 * exception 850 */ 851 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 852 vaddr addr, unsigned size, 853 MMUAccessType access_type, 854 int mmu_idx, MemTxAttrs attrs, 855 MemTxResult response, uintptr_t retaddr); 856 #endif 857 858 /* Call any registered EL change hooks */ 859 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 860 { 861 ARMELChangeHook *hook, *next; 862 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 863 hook->hook(cpu, hook->opaque); 864 } 865 } 866 static inline void arm_call_el_change_hook(ARMCPU *cpu) 867 { 868 ARMELChangeHook *hook, *next; 869 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 870 hook->hook(cpu, hook->opaque); 871 } 872 } 873 874 /* Return true if this address translation regime has two ranges. */ 875 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 876 { 877 switch (mmu_idx) { 878 case ARMMMUIdx_Stage1_E0: 879 case ARMMMUIdx_Stage1_E1: 880 case ARMMMUIdx_Stage1_E1_PAN: 881 case ARMMMUIdx_E10_0: 882 case ARMMMUIdx_E10_1: 883 case ARMMMUIdx_E10_1_PAN: 884 case ARMMMUIdx_E20_0: 885 case ARMMMUIdx_E20_2: 886 case ARMMMUIdx_E20_2_PAN: 887 return true; 888 default: 889 return false; 890 } 891 } 892 893 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 894 { 895 switch (mmu_idx) { 896 case ARMMMUIdx_Stage1_E1_PAN: 897 case ARMMMUIdx_E10_1_PAN: 898 case ARMMMUIdx_E20_2_PAN: 899 return true; 900 default: 901 return false; 902 } 903 } 904 905 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 906 { 907 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 908 } 909 910 /* Return the exception level which controls this address translation regime */ 911 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 912 { 913 switch (mmu_idx) { 914 case ARMMMUIdx_E20_0: 915 case ARMMMUIdx_E20_2: 916 case ARMMMUIdx_E20_2_PAN: 917 case ARMMMUIdx_Stage2: 918 case ARMMMUIdx_Stage2_S: 919 case ARMMMUIdx_E2: 920 return 2; 921 case ARMMMUIdx_E3: 922 return 3; 923 case ARMMMUIdx_E10_0: 924 case ARMMMUIdx_Stage1_E0: 925 case ARMMMUIdx_E10_1: 926 case ARMMMUIdx_E10_1_PAN: 927 case ARMMMUIdx_Stage1_E1: 928 case ARMMMUIdx_Stage1_E1_PAN: 929 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; 930 case ARMMMUIdx_MPrivNegPri: 931 case ARMMMUIdx_MUserNegPri: 932 case ARMMMUIdx_MPriv: 933 case ARMMMUIdx_MUser: 934 case ARMMMUIdx_MSPrivNegPri: 935 case ARMMMUIdx_MSUserNegPri: 936 case ARMMMUIdx_MSPriv: 937 case ARMMMUIdx_MSUser: 938 return 1; 939 default: 940 g_assert_not_reached(); 941 } 942 } 943 944 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 945 { 946 switch (mmu_idx) { 947 case ARMMMUIdx_E20_0: 948 case ARMMMUIdx_Stage1_E0: 949 case ARMMMUIdx_MUser: 950 case ARMMMUIdx_MSUser: 951 case ARMMMUIdx_MUserNegPri: 952 case ARMMMUIdx_MSUserNegPri: 953 return true; 954 default: 955 return false; 956 case ARMMMUIdx_E10_0: 957 case ARMMMUIdx_E10_1: 958 case ARMMMUIdx_E10_1_PAN: 959 g_assert_not_reached(); 960 } 961 } 962 963 /* Return the SCTLR value which controls this address translation regime */ 964 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 965 { 966 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 967 } 968 969 /* 970 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 971 * and the Non-Secure stage 2 translation regimes (and hence which are 972 * not present in VSTCR_EL2). 973 */ 974 #define VTCR_SHARED_FIELD_MASK \ 975 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 976 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 977 R_VTCR_DS_MASK) 978 979 /* Return the value of the TCR controlling this translation regime */ 980 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 981 { 982 if (mmu_idx == ARMMMUIdx_Stage2) { 983 return env->cp15.vtcr_el2; 984 } 985 if (mmu_idx == ARMMMUIdx_Stage2_S) { 986 /* 987 * Secure stage 2 shares fields from VTCR_EL2. We merge those 988 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 989 * value so the callers don't need to special case this. 990 * 991 * If a future architecture change defines bits in VSTCR_EL2 that 992 * overlap with these VTCR_EL2 fields we may need to revisit this. 993 */ 994 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 995 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 996 return v; 997 } 998 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 999 } 1000 1001 /* Return true if the translation regime is using LPAE format page tables */ 1002 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 1003 { 1004 int el = regime_el(env, mmu_idx); 1005 if (el == 2 || arm_el_is_aa64(env, el)) { 1006 return true; 1007 } 1008 if (arm_feature(env, ARM_FEATURE_PMSA) && 1009 arm_feature(env, ARM_FEATURE_V8)) { 1010 return true; 1011 } 1012 if (arm_feature(env, ARM_FEATURE_LPAE) 1013 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 1014 return true; 1015 } 1016 return false; 1017 } 1018 1019 /** 1020 * arm_num_brps: Return number of implemented breakpoints. 1021 * Note that the ID register BRPS field is "number of bps - 1", 1022 * and we return the actual number of breakpoints. 1023 */ 1024 static inline int arm_num_brps(ARMCPU *cpu) 1025 { 1026 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1027 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 1028 } else { 1029 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1030 } 1031 } 1032 1033 /** 1034 * arm_num_wrps: Return number of implemented watchpoints. 1035 * Note that the ID register WRPS field is "number of wps - 1", 1036 * and we return the actual number of watchpoints. 1037 */ 1038 static inline int arm_num_wrps(ARMCPU *cpu) 1039 { 1040 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1041 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1042 } else { 1043 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1044 } 1045 } 1046 1047 /** 1048 * arm_num_ctx_cmps: Return number of implemented context comparators. 1049 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1050 * and we return the actual number of comparators. 1051 */ 1052 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1053 { 1054 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1055 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1056 } else { 1057 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1058 } 1059 } 1060 1061 /** 1062 * v7m_using_psp: Return true if using process stack pointer 1063 * Return true if the CPU is currently using the process stack 1064 * pointer, or false if it is using the main stack pointer. 1065 */ 1066 static inline bool v7m_using_psp(CPUARMState *env) 1067 { 1068 /* Handler mode always uses the main stack; for thread mode 1069 * the CONTROL.SPSEL bit determines the answer. 1070 * Note that in v7M it is not possible to be in Handler mode with 1071 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1072 */ 1073 return !arm_v7m_is_handler_mode(env) && 1074 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1075 } 1076 1077 /** 1078 * v7m_sp_limit: Return SP limit for current CPU state 1079 * Return the SP limit value for the current CPU security state 1080 * and stack pointer. 1081 */ 1082 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1083 { 1084 if (v7m_using_psp(env)) { 1085 return env->v7m.psplim[env->v7m.secure]; 1086 } else { 1087 return env->v7m.msplim[env->v7m.secure]; 1088 } 1089 } 1090 1091 /** 1092 * v7m_cpacr_pass: 1093 * Return true if the v7M CPACR permits access to the FPU for the specified 1094 * security state and privilege level. 1095 */ 1096 static inline bool v7m_cpacr_pass(CPUARMState *env, 1097 bool is_secure, bool is_priv) 1098 { 1099 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1100 case 0: 1101 case 2: /* UNPREDICTABLE: we treat like 0 */ 1102 return false; 1103 case 1: 1104 return is_priv; 1105 case 3: 1106 return true; 1107 default: 1108 g_assert_not_reached(); 1109 } 1110 } 1111 1112 /** 1113 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1114 * @psr: Program Status Register indicating CPU mode 1115 * 1116 * Returns, for debug logging purposes, a printable representation 1117 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1118 * the low bits of the specified PSR. 1119 */ 1120 static inline const char *aarch32_mode_name(uint32_t psr) 1121 { 1122 static const char cpu_mode_names[16][4] = { 1123 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1124 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1125 }; 1126 1127 return cpu_mode_names[psr & 0xf]; 1128 } 1129 1130 /** 1131 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1132 * 1133 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1134 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1135 * Must be called with the BQL held. 1136 */ 1137 void arm_cpu_update_virq(ARMCPU *cpu); 1138 1139 /** 1140 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1141 * 1142 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1143 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1144 * Must be called with the BQL held. 1145 */ 1146 void arm_cpu_update_vfiq(ARMCPU *cpu); 1147 1148 /** 1149 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1150 * 1151 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1152 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1153 * Must be called with the BQL held. 1154 */ 1155 void arm_cpu_update_vinmi(ARMCPU *cpu); 1156 1157 /** 1158 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1159 * 1160 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1161 * a change to the HCRX_EL2.VFNMI. 1162 * Must be called with the BQL held. 1163 */ 1164 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1165 1166 /** 1167 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1168 * 1169 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1170 * following a change to the HCR_EL2.VSE bit. 1171 */ 1172 void arm_cpu_update_vserr(ARMCPU *cpu); 1173 1174 /** 1175 * arm_mmu_idx_el: 1176 * @env: The cpu environment 1177 * @el: The EL to use. 1178 * 1179 * Return the full ARMMMUIdx for the translation regime for EL. 1180 */ 1181 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1182 1183 /** 1184 * arm_mmu_idx: 1185 * @env: The cpu environment 1186 * 1187 * Return the full ARMMMUIdx for the current translation regime. 1188 */ 1189 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1190 1191 /** 1192 * arm_stage1_mmu_idx: 1193 * @env: The cpu environment 1194 * 1195 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1196 */ 1197 #ifdef CONFIG_USER_ONLY 1198 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1199 { 1200 return ARMMMUIdx_Stage1_E0; 1201 } 1202 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1203 { 1204 return ARMMMUIdx_Stage1_E0; 1205 } 1206 #else 1207 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1208 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1209 #endif 1210 1211 /** 1212 * arm_mmu_idx_is_stage1_of_2: 1213 * @mmu_idx: The ARMMMUIdx to test 1214 * 1215 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1216 * first stage of a two stage regime. 1217 */ 1218 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1219 { 1220 switch (mmu_idx) { 1221 case ARMMMUIdx_Stage1_E0: 1222 case ARMMMUIdx_Stage1_E1: 1223 case ARMMMUIdx_Stage1_E1_PAN: 1224 return true; 1225 default: 1226 return false; 1227 } 1228 } 1229 1230 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1231 const ARMISARegisters *id) 1232 { 1233 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1234 1235 if ((features >> ARM_FEATURE_V4T) & 1) { 1236 valid |= CPSR_T; 1237 } 1238 if ((features >> ARM_FEATURE_V5) & 1) { 1239 valid |= CPSR_Q; /* V5TE in reality*/ 1240 } 1241 if ((features >> ARM_FEATURE_V6) & 1) { 1242 valid |= CPSR_E | CPSR_GE; 1243 } 1244 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1245 valid |= CPSR_IT; 1246 } 1247 if (isar_feature_aa32_jazelle(id)) { 1248 valid |= CPSR_J; 1249 } 1250 if (isar_feature_aa32_pan(id)) { 1251 valid |= CPSR_PAN; 1252 } 1253 if (isar_feature_aa32_dit(id)) { 1254 valid |= CPSR_DIT; 1255 } 1256 if (isar_feature_aa32_ssbs(id)) { 1257 valid |= CPSR_SSBS; 1258 } 1259 1260 return valid; 1261 } 1262 1263 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1264 { 1265 uint32_t valid; 1266 1267 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1268 if (isar_feature_aa64_bti(id)) { 1269 valid |= PSTATE_BTYPE; 1270 } 1271 if (isar_feature_aa64_pan(id)) { 1272 valid |= PSTATE_PAN; 1273 } 1274 if (isar_feature_aa64_uao(id)) { 1275 valid |= PSTATE_UAO; 1276 } 1277 if (isar_feature_aa64_dit(id)) { 1278 valid |= PSTATE_DIT; 1279 } 1280 if (isar_feature_aa64_ssbs(id)) { 1281 valid |= PSTATE_SSBS; 1282 } 1283 if (isar_feature_aa64_mte(id)) { 1284 valid |= PSTATE_TCO; 1285 } 1286 if (isar_feature_aa64_nmi(id)) { 1287 valid |= PSTATE_ALLINT; 1288 } 1289 1290 return valid; 1291 } 1292 1293 /* Granule size (i.e. page size) */ 1294 typedef enum ARMGranuleSize { 1295 /* Same order as TG0 encoding */ 1296 Gran4K, 1297 Gran64K, 1298 Gran16K, 1299 GranInvalid, 1300 } ARMGranuleSize; 1301 1302 /** 1303 * arm_granule_bits: Return address size of the granule in bits 1304 * 1305 * Return the address size of the granule in bits. This corresponds 1306 * to the pseudocode TGxGranuleBits(). 1307 */ 1308 static inline int arm_granule_bits(ARMGranuleSize gran) 1309 { 1310 switch (gran) { 1311 case Gran64K: 1312 return 16; 1313 case Gran16K: 1314 return 14; 1315 case Gran4K: 1316 return 12; 1317 default: 1318 g_assert_not_reached(); 1319 } 1320 } 1321 1322 /* 1323 * Parameters of a given virtual address, as extracted from the 1324 * translation control register (TCR) for a given regime. 1325 */ 1326 typedef struct ARMVAParameters { 1327 unsigned tsz : 8; 1328 unsigned ps : 3; 1329 unsigned sh : 2; 1330 unsigned select : 1; 1331 bool tbi : 1; 1332 bool epd : 1; 1333 bool hpd : 1; 1334 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1335 bool ds : 1; 1336 bool ha : 1; 1337 bool hd : 1; 1338 ARMGranuleSize gran : 2; 1339 } ARMVAParameters; 1340 1341 /** 1342 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1343 * @env: CPU 1344 * @va: virtual address to look up 1345 * @mmu_idx: determines translation regime to use 1346 * @data: true if this is a data access 1347 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1348 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1349 */ 1350 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1351 ARMMMUIdx mmu_idx, bool data, 1352 bool el1_is_aa32); 1353 1354 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1355 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1356 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1357 1358 /* Determine if allocation tags are available. */ 1359 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1360 uint64_t sctlr) 1361 { 1362 if (el < 3 1363 && arm_feature(env, ARM_FEATURE_EL3) 1364 && !(env->cp15.scr_el3 & SCR_ATA)) { 1365 return false; 1366 } 1367 if (el < 2 && arm_is_el2_enabled(env)) { 1368 uint64_t hcr = arm_hcr_el2_eff(env); 1369 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1370 return false; 1371 } 1372 } 1373 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1374 return sctlr != 0; 1375 } 1376 1377 #ifndef CONFIG_USER_ONLY 1378 1379 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1380 typedef struct V8M_SAttributes { 1381 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1382 bool ns; 1383 bool nsc; 1384 uint8_t sregion; 1385 bool srvalid; 1386 uint8_t iregion; 1387 bool irvalid; 1388 } V8M_SAttributes; 1389 1390 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1391 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1392 bool secure, V8M_SAttributes *sattrs); 1393 1394 /* Cacheability and shareability attributes for a memory access */ 1395 typedef struct ARMCacheAttrs { 1396 /* 1397 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1398 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1399 */ 1400 unsigned int attrs:8; 1401 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1402 bool is_s2_format:1; 1403 } ARMCacheAttrs; 1404 1405 /* Fields that are valid upon success. */ 1406 typedef struct GetPhysAddrResult { 1407 CPUTLBEntryFull f; 1408 ARMCacheAttrs cacheattrs; 1409 } GetPhysAddrResult; 1410 1411 /** 1412 * get_phys_addr: get the physical address for a virtual address 1413 * @env: CPUARMState 1414 * @address: virtual address to get physical address for 1415 * @access_type: 0 for read, 1 for write, 2 for execute 1416 * @mmu_idx: MMU index indicating required translation regime 1417 * @result: set on translation success. 1418 * @fi: set to fault info if the translation fails 1419 * 1420 * Find the physical address corresponding to the given virtual address, 1421 * by doing a translation table walk on MMU based systems or using the 1422 * MPU state on MPU based systems. 1423 * 1424 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1425 * prot and page_size may not be filled in, and the populated fsr value provides 1426 * information on why the translation aborted, in the format of a 1427 * DFSR/IFSR fault register, with the following caveats: 1428 * * we honour the short vs long DFSR format differences. 1429 * * the WnR bit is never set (the caller must do this). 1430 * * for PSMAv5 based systems we don't bother to return a full FSR format 1431 * value. 1432 */ 1433 bool get_phys_addr(CPUARMState *env, target_ulong address, 1434 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1435 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1436 __attribute__((nonnull)); 1437 1438 /** 1439 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1440 * address 1441 * @env: CPUARMState 1442 * @address: virtual address to get physical address for 1443 * @access_type: 0 for read, 1 for write, 2 for execute 1444 * @mmu_idx: MMU index indicating required translation regime 1445 * @space: security space for the access 1446 * @result: set on translation success. 1447 * @fi: set to fault info if the translation fails 1448 * 1449 * Similar to get_phys_addr, but use the given security space and don't perform 1450 * a Granule Protection Check on the resulting address. 1451 */ 1452 bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, 1453 MMUAccessType access_type, 1454 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1455 GetPhysAddrResult *result, 1456 ARMMMUFaultInfo *fi) 1457 __attribute__((nonnull)); 1458 1459 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1460 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1461 bool is_secure, GetPhysAddrResult *result, 1462 ARMMMUFaultInfo *fi, uint32_t *mregion); 1463 1464 void arm_log_exception(CPUState *cs); 1465 1466 #endif /* !CONFIG_USER_ONLY */ 1467 1468 /* 1469 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1470 * the same simd_desc() encoding due to restrictions on size. 1471 * Use these instead. 1472 */ 1473 FIELD(PREDDESC, OPRSZ, 0, 6) 1474 FIELD(PREDDESC, ESZ, 6, 2) 1475 FIELD(PREDDESC, DATA, 8, 24) 1476 1477 /* 1478 * The SVE simd_data field, for memory ops, contains either 1479 * rd (5 bits) or a shift count (2 bits). 1480 */ 1481 #define SVE_MTEDESC_SHIFT 5 1482 1483 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1484 FIELD(MTEDESC, MIDX, 0, 4) 1485 FIELD(MTEDESC, TBI, 4, 2) 1486 FIELD(MTEDESC, TCMA, 6, 2) 1487 FIELD(MTEDESC, WRITE, 8, 1) 1488 FIELD(MTEDESC, ALIGN, 9, 3) 1489 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1490 1491 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1492 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1493 1494 /** 1495 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1496 * @env: CPU env 1497 * @ptr: start address of memory region (dirty pointer) 1498 * @size: length of region (guaranteed not to cross a page boundary) 1499 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1500 * Returns: the size of the region that can be copied without hitting 1501 * an MTE tag failure 1502 * 1503 * Note that we assume that the caller has already checked the TBI 1504 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1505 * required. 1506 */ 1507 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1508 uint32_t desc); 1509 1510 /** 1511 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1512 * operation going in the reverse direction 1513 * @env: CPU env 1514 * @ptr: *end* address of memory region (dirty pointer) 1515 * @size: length of region (guaranteed not to cross a page boundary) 1516 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1517 * Returns: the size of the region that can be copied without hitting 1518 * an MTE tag failure 1519 * 1520 * Note that we assume that the caller has already checked the TBI 1521 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1522 * required. 1523 */ 1524 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1525 uint32_t desc); 1526 1527 /** 1528 * mte_check_fail: Record an MTE tag check failure 1529 * @env: CPU env 1530 * @desc: MTEDESC descriptor word 1531 * @dirty_ptr: Failing dirty address 1532 * @ra: TCG retaddr 1533 * 1534 * This may never return (if the MTE tag checks are configured to fault). 1535 */ 1536 void mte_check_fail(CPUARMState *env, uint32_t desc, 1537 uint64_t dirty_ptr, uintptr_t ra); 1538 1539 /** 1540 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1541 * @env: CPU env 1542 * @dirty_ptr: Start address of memory region (dirty pointer) 1543 * @size: length of region (guaranteed not to cross page boundary) 1544 * @desc: MTEDESC descriptor word 1545 */ 1546 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1547 uint32_t desc); 1548 1549 static inline int allocation_tag_from_addr(uint64_t ptr) 1550 { 1551 return extract64(ptr, 56, 4); 1552 } 1553 1554 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1555 { 1556 return deposit64(ptr, 56, 4, rtag); 1557 } 1558 1559 /* Return true if tbi bits mean that the access is checked. */ 1560 static inline bool tbi_check(uint32_t desc, int bit55) 1561 { 1562 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1563 } 1564 1565 /* Return true if tcma bits mean that the access is unchecked. */ 1566 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1567 { 1568 /* 1569 * We had extracted bit55 and ptr_tag for other reasons, so fold 1570 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1571 */ 1572 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1573 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1574 return tcma && match; 1575 } 1576 1577 /* 1578 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1579 * for the tag to be present in the FAR_ELx register. But for user-only 1580 * mode, we do not have a TLB with which to implement this, so we must 1581 * remove the top byte. 1582 */ 1583 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1584 { 1585 #ifdef CONFIG_USER_ONLY 1586 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1587 ptr &= sextract64(ptr, 0, 56); 1588 #endif 1589 return ptr; 1590 } 1591 1592 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1593 { 1594 #ifdef CONFIG_USER_ONLY 1595 int64_t clean_ptr = sextract64(ptr, 0, 56); 1596 if (tbi_check(desc, clean_ptr < 0)) { 1597 ptr = clean_ptr; 1598 } 1599 #endif 1600 return ptr; 1601 } 1602 1603 /* Values for M-profile PSR.ECI for MVE insns */ 1604 enum MVEECIState { 1605 ECI_NONE = 0, /* No completed beats */ 1606 ECI_A0 = 1, /* Completed: A0 */ 1607 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1608 /* 3 is reserved */ 1609 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1610 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1611 /* All other values reserved */ 1612 }; 1613 1614 /* Definitions for the PMU registers */ 1615 #define PMCRN_MASK 0xf800 1616 #define PMCRN_SHIFT 11 1617 #define PMCRLP 0x80 1618 #define PMCRLC 0x40 1619 #define PMCRDP 0x20 1620 #define PMCRX 0x10 1621 #define PMCRD 0x8 1622 #define PMCRC 0x4 1623 #define PMCRP 0x2 1624 #define PMCRE 0x1 1625 /* 1626 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1627 * which can be written as 1 to trigger behaviour but which stay RAZ). 1628 */ 1629 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1630 1631 #define PMXEVTYPER_P 0x80000000 1632 #define PMXEVTYPER_U 0x40000000 1633 #define PMXEVTYPER_NSK 0x20000000 1634 #define PMXEVTYPER_NSU 0x10000000 1635 #define PMXEVTYPER_NSH 0x08000000 1636 #define PMXEVTYPER_M 0x04000000 1637 #define PMXEVTYPER_MT 0x02000000 1638 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1639 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1640 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1641 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1642 PMXEVTYPER_EVTCOUNT) 1643 1644 #define PMCCFILTR 0xf8000000 1645 #define PMCCFILTR_M PMXEVTYPER_M 1646 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1647 1648 static inline uint32_t pmu_num_counters(CPUARMState *env) 1649 { 1650 ARMCPU *cpu = env_archcpu(env); 1651 1652 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1653 } 1654 1655 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1656 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1657 { 1658 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1659 } 1660 1661 #ifdef TARGET_AARCH64 1662 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1663 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1664 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1665 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1666 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1667 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1668 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1669 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1670 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1671 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1672 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1673 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1674 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1675 void aarch64_max_tcg_initfn(Object *obj); 1676 void aarch64_add_pauth_properties(Object *obj); 1677 void aarch64_add_sve_properties(Object *obj); 1678 void aarch64_add_sme_properties(Object *obj); 1679 #endif 1680 1681 /* Read the CONTROL register as the MRS instruction would. */ 1682 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1683 1684 /* 1685 * Return a pointer to the location where we currently store the 1686 * stack pointer for the requested security state and thread mode. 1687 * This pointer will become invalid if the CPU state is updated 1688 * such that the stack pointers are switched around (eg changing 1689 * the SPSEL control bit). 1690 */ 1691 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1692 bool threadmode, bool spsel); 1693 1694 bool el_is_in_host(CPUARMState *env, int el); 1695 1696 void aa32_max_features(ARMCPU *cpu); 1697 int exception_target_el(CPUARMState *env); 1698 bool arm_singlestep_active(CPUARMState *env); 1699 bool arm_generate_debug_exceptions(CPUARMState *env); 1700 1701 /** 1702 * pauth_ptr_mask: 1703 * @param: parameters defining the MMU setup 1704 * 1705 * Return a mask of the address bits that contain the authentication code, 1706 * given the MMU config defined by @param. 1707 */ 1708 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1709 { 1710 int bot_pac_bit = 64 - param.tsz; 1711 int top_pac_bit = 64 - 8 * param.tbi; 1712 1713 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1714 } 1715 1716 /* Add the cpreg definitions for debug related system registers */ 1717 void define_debug_regs(ARMCPU *cpu); 1718 1719 /* Effective value of MDCR_EL2 */ 1720 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1721 { 1722 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1723 } 1724 1725 /* Powers of 2 for sve_vq_map et al. */ 1726 #define SVE_VQ_POW2_MAP \ 1727 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1728 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1729 1730 /* 1731 * Return true if it is possible to take a fine-grained-trap to EL2. 1732 */ 1733 static inline bool arm_fgt_active(CPUARMState *env, int el) 1734 { 1735 /* 1736 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1737 * that can affect EL0, but it is harmless to do the test also for 1738 * traps on registers that are only accessible at EL1 because if the test 1739 * returns true then we can't be executing at EL1 anyway. 1740 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1741 * traps from AArch32 only happen for the EL0 is AArch32 case. 1742 */ 1743 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1744 el < 2 && arm_is_el2_enabled(env) && 1745 arm_el_is_aa64(env, 1) && 1746 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1747 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1748 } 1749 1750 void assert_hflags_rebuild_correctly(CPUARMState *env); 1751 1752 /* 1753 * Although the ARM implementation of hardware assisted debugging 1754 * allows for different breakpoints per-core, the current GDB 1755 * interface treats them as a global pool of registers (which seems to 1756 * be the case for x86, ppc and s390). As a result we store one copy 1757 * of registers which is used for all active cores. 1758 * 1759 * Write access is serialised by virtue of the GDB protocol which 1760 * updates things. Read access (i.e. when the values are copied to the 1761 * vCPU) is also gated by GDB's run control. 1762 * 1763 * This is not unreasonable as most of the time debugging kernels you 1764 * never know which core will eventually execute your function. 1765 */ 1766 1767 typedef struct { 1768 uint64_t bcr; 1769 uint64_t bvr; 1770 } HWBreakpoint; 1771 1772 /* 1773 * The watchpoint registers can cover more area than the requested 1774 * watchpoint so we need to store the additional information 1775 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1776 * when the watchpoint is hit. 1777 */ 1778 typedef struct { 1779 uint64_t wcr; 1780 uint64_t wvr; 1781 CPUWatchpoint details; 1782 } HWWatchpoint; 1783 1784 /* Maximum and current break/watch point counts */ 1785 extern int max_hw_bps, max_hw_wps; 1786 extern GArray *hw_breakpoints, *hw_watchpoints; 1787 1788 #define cur_hw_wps (hw_watchpoints->len) 1789 #define cur_hw_bps (hw_breakpoints->len) 1790 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1791 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1792 1793 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1794 int insert_hw_breakpoint(target_ulong pc); 1795 int delete_hw_breakpoint(target_ulong pc); 1796 1797 bool check_watchpoint_in_range(int i, target_ulong addr); 1798 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1799 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1800 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1801 1802 /* Return the current value of the system counter in ticks */ 1803 uint64_t gt_get_countervalue(CPUARMState *env); 1804 /* 1805 * Return the currently applicable offset between the system counter 1806 * and CNTVCT_EL0 (this will be either 0 or the value of CNTVOFF_EL2). 1807 */ 1808 uint64_t gt_virt_cnt_offset(CPUARMState *env); 1809 #endif 1810