1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "hw/registerfields.h" 29 #include "tcg/tcg-gvec-desc.h" 30 #include "syndrome.h" 31 #include "cpu-features.h" 32 33 /* register banks for CPU modes */ 34 #define BANK_USRSYS 0 35 #define BANK_SVC 1 36 #define BANK_ABT 2 37 #define BANK_UND 3 38 #define BANK_IRQ 4 39 #define BANK_FIQ 5 40 #define BANK_HYP 6 41 #define BANK_MON 7 42 43 static inline int arm_env_mmu_index(CPUARMState *env) 44 { 45 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 46 } 47 48 static inline bool excp_is_internal(int excp) 49 { 50 /* Return true if this exception number represents a QEMU-internal 51 * exception that will not be passed to the guest. 52 */ 53 return excp == EXCP_INTERRUPT 54 || excp == EXCP_HLT 55 || excp == EXCP_DEBUG 56 || excp == EXCP_HALTED 57 || excp == EXCP_EXCEPTION_EXIT 58 || excp == EXCP_KERNEL_TRAP 59 || excp == EXCP_SEMIHOST; 60 } 61 62 /* Scale factor for generic timers, ie number of ns per tick. 63 * This gives a 62.5MHz timer. 64 */ 65 #define GTIMER_SCALE 16 66 67 /* Bit definitions for the v7M CONTROL register */ 68 FIELD(V7M_CONTROL, NPRIV, 0, 1) 69 FIELD(V7M_CONTROL, SPSEL, 1, 1) 70 FIELD(V7M_CONTROL, FPCA, 2, 1) 71 FIELD(V7M_CONTROL, SFPA, 3, 1) 72 73 /* Bit definitions for v7M exception return payload */ 74 FIELD(V7M_EXCRET, ES, 0, 1) 75 FIELD(V7M_EXCRET, RES0, 1, 1) 76 FIELD(V7M_EXCRET, SPSEL, 2, 1) 77 FIELD(V7M_EXCRET, MODE, 3, 1) 78 FIELD(V7M_EXCRET, FTYPE, 4, 1) 79 FIELD(V7M_EXCRET, DCRS, 5, 1) 80 FIELD(V7M_EXCRET, S, 6, 1) 81 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 82 83 /* Minimum value which is a magic number for exception return */ 84 #define EXC_RETURN_MIN_MAGIC 0xff000000 85 /* Minimum number which is a magic number for function or exception return 86 * when using v8M security extension 87 */ 88 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 89 90 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 91 FIELD(DBGWCR, E, 0, 1) 92 FIELD(DBGWCR, PAC, 1, 2) 93 FIELD(DBGWCR, LSC, 3, 2) 94 FIELD(DBGWCR, BAS, 5, 8) 95 FIELD(DBGWCR, HMC, 13, 1) 96 FIELD(DBGWCR, SSC, 14, 2) 97 FIELD(DBGWCR, LBN, 16, 4) 98 FIELD(DBGWCR, WT, 20, 1) 99 FIELD(DBGWCR, MASK, 24, 5) 100 FIELD(DBGWCR, SSCE, 29, 1) 101 102 #define VTCR_NSW (1u << 29) 103 #define VTCR_NSA (1u << 30) 104 #define VSTCR_SW VTCR_NSW 105 #define VSTCR_SA VTCR_NSA 106 107 /* Bit definitions for CPACR (AArch32 only) */ 108 FIELD(CPACR, CP10, 20, 2) 109 FIELD(CPACR, CP11, 22, 2) 110 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 111 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 112 FIELD(CPACR, ASEDIS, 31, 1) 113 114 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 115 FIELD(CPACR_EL1, ZEN, 16, 2) 116 FIELD(CPACR_EL1, FPEN, 20, 2) 117 FIELD(CPACR_EL1, SMEN, 24, 2) 118 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 119 120 /* Bit definitions for HCPTR (AArch32 only) */ 121 FIELD(HCPTR, TCP10, 10, 1) 122 FIELD(HCPTR, TCP11, 11, 1) 123 FIELD(HCPTR, TASE, 15, 1) 124 FIELD(HCPTR, TTA, 20, 1) 125 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 126 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 127 128 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 129 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 130 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 131 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 132 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 133 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 134 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 135 FIELD(CPTR_EL2, TTA, 28, 1) 136 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 137 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 138 139 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 140 FIELD(CPTR_EL3, EZ, 8, 1) 141 FIELD(CPTR_EL3, TFP, 10, 1) 142 FIELD(CPTR_EL3, ESM, 12, 1) 143 FIELD(CPTR_EL3, TTA, 20, 1) 144 FIELD(CPTR_EL3, TAM, 30, 1) 145 FIELD(CPTR_EL3, TCPAC, 31, 1) 146 147 #define MDCR_MTPME (1U << 28) 148 #define MDCR_TDCC (1U << 27) 149 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 150 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 151 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 152 #define MDCR_EPMAD (1U << 21) 153 #define MDCR_EDAD (1U << 20) 154 #define MDCR_TTRF (1U << 19) 155 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 156 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 157 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 158 #define MDCR_SDD (1U << 16) 159 #define MDCR_SPD (3U << 14) 160 #define MDCR_TDRA (1U << 11) 161 #define MDCR_TDOSA (1U << 10) 162 #define MDCR_TDA (1U << 9) 163 #define MDCR_TDE (1U << 8) 164 #define MDCR_HPME (1U << 7) 165 #define MDCR_TPM (1U << 6) 166 #define MDCR_TPMCR (1U << 5) 167 #define MDCR_HPMN (0x1fU) 168 169 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 170 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 171 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 172 MDCR_STE | MDCR_SPME | MDCR_SPD) 173 174 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 175 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 176 #define TTBCR_PD0 (1U << 4) 177 #define TTBCR_PD1 (1U << 5) 178 #define TTBCR_EPD0 (1U << 7) 179 #define TTBCR_IRGN0 (3U << 8) 180 #define TTBCR_ORGN0 (3U << 10) 181 #define TTBCR_SH0 (3U << 12) 182 #define TTBCR_T1SZ (3U << 16) 183 #define TTBCR_A1 (1U << 22) 184 #define TTBCR_EPD1 (1U << 23) 185 #define TTBCR_IRGN1 (3U << 24) 186 #define TTBCR_ORGN1 (3U << 26) 187 #define TTBCR_SH1 (1U << 28) 188 #define TTBCR_EAE (1U << 31) 189 190 FIELD(VTCR, T0SZ, 0, 6) 191 FIELD(VTCR, SL0, 6, 2) 192 FIELD(VTCR, IRGN0, 8, 2) 193 FIELD(VTCR, ORGN0, 10, 2) 194 FIELD(VTCR, SH0, 12, 2) 195 FIELD(VTCR, TG0, 14, 2) 196 FIELD(VTCR, PS, 16, 3) 197 FIELD(VTCR, VS, 19, 1) 198 FIELD(VTCR, HA, 21, 1) 199 FIELD(VTCR, HD, 22, 1) 200 FIELD(VTCR, HWU59, 25, 1) 201 FIELD(VTCR, HWU60, 26, 1) 202 FIELD(VTCR, HWU61, 27, 1) 203 FIELD(VTCR, HWU62, 28, 1) 204 FIELD(VTCR, NSW, 29, 1) 205 FIELD(VTCR, NSA, 30, 1) 206 FIELD(VTCR, DS, 32, 1) 207 FIELD(VTCR, SL2, 33, 1) 208 209 #define HCRX_ENAS0 (1ULL << 0) 210 #define HCRX_ENALS (1ULL << 1) 211 #define HCRX_ENASR (1ULL << 2) 212 #define HCRX_FNXS (1ULL << 3) 213 #define HCRX_FGTNXS (1ULL << 4) 214 #define HCRX_SMPME (1ULL << 5) 215 #define HCRX_TALLINT (1ULL << 6) 216 #define HCRX_VINMI (1ULL << 7) 217 #define HCRX_VFNMI (1ULL << 8) 218 #define HCRX_CMOW (1ULL << 9) 219 #define HCRX_MCE2 (1ULL << 10) 220 #define HCRX_MSCEN (1ULL << 11) 221 222 #define HPFAR_NS (1ULL << 63) 223 224 #define HSTR_TTEE (1 << 16) 225 #define HSTR_TJDBX (1 << 17) 226 227 /* 228 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 229 * have different bit definitions, and EL1PCTEN might be 230 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 231 * disambiguate if necessary. 232 */ 233 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 234 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 235 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 236 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 237 FIELD(CNTHCTL, EVNTEN, 2, 1) 238 FIELD(CNTHCTL, EVNTDIR, 3, 1) 239 FIELD(CNTHCTL, EVNTI, 4, 4) 240 FIELD(CNTHCTL, EL0VTEN, 8, 1) 241 FIELD(CNTHCTL, EL0PTEN, 9, 1) 242 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 243 FIELD(CNTHCTL, EL1PTEN, 11, 1) 244 FIELD(CNTHCTL, ECV, 12, 1) 245 FIELD(CNTHCTL, EL1TVT, 13, 1) 246 FIELD(CNTHCTL, EL1TVCT, 14, 1) 247 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 248 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 249 FIELD(CNTHCTL, EVNTIS, 17, 1) 250 FIELD(CNTHCTL, CNTVMASK, 18, 1) 251 FIELD(CNTHCTL, CNTPMASK, 19, 1) 252 253 /* We use a few fake FSR values for internal purposes in M profile. 254 * M profile cores don't have A/R format FSRs, but currently our 255 * get_phys_addr() code assumes A/R profile and reports failures via 256 * an A/R format FSR value. We then translate that into the proper 257 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 258 * Mostly the FSR values we use for this are those defined for v7PMSA, 259 * since we share some of that codepath. A few kinds of fault are 260 * only for M profile and have no A/R equivalent, though, so we have 261 * to pick a value from the reserved range (which we never otherwise 262 * generate) to use for these. 263 * These values will never be visible to the guest. 264 */ 265 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 266 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 267 268 /** 269 * raise_exception: Raise the specified exception. 270 * Raise a guest exception with the specified value, syndrome register 271 * and target exception level. This should be called from helper functions, 272 * and never returns because we will longjump back up to the CPU main loop. 273 */ 274 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 275 uint32_t syndrome, uint32_t target_el); 276 277 /* 278 * Similarly, but also use unwinding to restore cpu state. 279 */ 280 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 281 uint32_t syndrome, uint32_t target_el, 282 uintptr_t ra); 283 284 /* 285 * For AArch64, map a given EL to an index in the banked_spsr array. 286 * Note that this mapping and the AArch32 mapping defined in bank_number() 287 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 288 * mandated mapping between each other. 289 */ 290 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 291 { 292 static const unsigned int map[4] = { 293 [1] = BANK_SVC, /* EL1. */ 294 [2] = BANK_HYP, /* EL2. */ 295 [3] = BANK_MON, /* EL3. */ 296 }; 297 assert(el >= 1 && el <= 3); 298 return map[el]; 299 } 300 301 /* Map CPU modes onto saved register banks. */ 302 static inline int bank_number(int mode) 303 { 304 switch (mode) { 305 case ARM_CPU_MODE_USR: 306 case ARM_CPU_MODE_SYS: 307 return BANK_USRSYS; 308 case ARM_CPU_MODE_SVC: 309 return BANK_SVC; 310 case ARM_CPU_MODE_ABT: 311 return BANK_ABT; 312 case ARM_CPU_MODE_UND: 313 return BANK_UND; 314 case ARM_CPU_MODE_IRQ: 315 return BANK_IRQ; 316 case ARM_CPU_MODE_FIQ: 317 return BANK_FIQ; 318 case ARM_CPU_MODE_HYP: 319 return BANK_HYP; 320 case ARM_CPU_MODE_MON: 321 return BANK_MON; 322 } 323 g_assert_not_reached(); 324 } 325 326 /** 327 * r14_bank_number: Map CPU mode onto register bank for r14 328 * 329 * Given an AArch32 CPU mode, return the index into the saved register 330 * banks to use for the R14 (LR) in that mode. This is the same as 331 * bank_number(), except for the special case of Hyp mode, where 332 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 333 * This should be used as the index into env->banked_r14[], and 334 * bank_number() used for the index into env->banked_r13[] and 335 * env->banked_spsr[]. 336 */ 337 static inline int r14_bank_number(int mode) 338 { 339 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 340 } 341 342 void arm_cpu_register(const ARMCPUInfo *info); 343 void aarch64_cpu_register(const ARMCPUInfo *info); 344 345 void register_cp_regs_for_features(ARMCPU *cpu); 346 void init_cpreg_list(ARMCPU *cpu); 347 348 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 349 void arm_translate_init(void); 350 351 void arm_restore_state_to_opc(CPUState *cs, 352 const TranslationBlock *tb, 353 const uint64_t *data); 354 355 #ifdef CONFIG_TCG 356 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 357 #endif /* CONFIG_TCG */ 358 359 typedef enum ARMFPRounding { 360 FPROUNDING_TIEEVEN, 361 FPROUNDING_POSINF, 362 FPROUNDING_NEGINF, 363 FPROUNDING_ZERO, 364 FPROUNDING_TIEAWAY, 365 FPROUNDING_ODD 366 } ARMFPRounding; 367 368 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 369 370 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 371 { 372 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 373 return arm_rmode_to_sf_map[rmode]; 374 } 375 376 static inline void aarch64_save_sp(CPUARMState *env, int el) 377 { 378 if (env->pstate & PSTATE_SP) { 379 env->sp_el[el] = env->xregs[31]; 380 } else { 381 env->sp_el[0] = env->xregs[31]; 382 } 383 } 384 385 static inline void aarch64_restore_sp(CPUARMState *env, int el) 386 { 387 if (env->pstate & PSTATE_SP) { 388 env->xregs[31] = env->sp_el[el]; 389 } else { 390 env->xregs[31] = env->sp_el[0]; 391 } 392 } 393 394 static inline void update_spsel(CPUARMState *env, uint32_t imm) 395 { 396 unsigned int cur_el = arm_current_el(env); 397 /* Update PSTATE SPSel bit; this requires us to update the 398 * working stack pointer in xregs[31]. 399 */ 400 if (!((imm ^ env->pstate) & PSTATE_SP)) { 401 return; 402 } 403 aarch64_save_sp(env, cur_el); 404 env->pstate = deposit32(env->pstate, 0, 1, imm); 405 406 /* We rely on illegal updates to SPsel from EL0 to get trapped 407 * at translation time. 408 */ 409 assert(cur_el >= 1 && cur_el <= 3); 410 aarch64_restore_sp(env, cur_el); 411 } 412 413 /* 414 * arm_pamax 415 * @cpu: ARMCPU 416 * 417 * Returns the implementation defined bit-width of physical addresses. 418 * The ARMv8 reference manuals refer to this as PAMax(). 419 */ 420 unsigned int arm_pamax(ARMCPU *cpu); 421 422 /* Return true if extended addresses are enabled. 423 * This is always the case if our translation regime is 64 bit, 424 * but depends on TTBCR.EAE for 32 bit. 425 */ 426 static inline bool extended_addresses_enabled(CPUARMState *env) 427 { 428 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 429 if (arm_feature(env, ARM_FEATURE_PMSA) && 430 arm_feature(env, ARM_FEATURE_V8)) { 431 return true; 432 } 433 return arm_el_is_aa64(env, 1) || 434 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 435 } 436 437 /* Update a QEMU watchpoint based on the information the guest has set in the 438 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 439 */ 440 void hw_watchpoint_update(ARMCPU *cpu, int n); 441 /* Update the QEMU watchpoints for every guest watchpoint. This does a 442 * complete delete-and-reinstate of the QEMU watchpoint list and so is 443 * suitable for use after migration or on reset. 444 */ 445 void hw_watchpoint_update_all(ARMCPU *cpu); 446 /* Update a QEMU breakpoint based on the information the guest has set in the 447 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 448 */ 449 void hw_breakpoint_update(ARMCPU *cpu, int n); 450 /* Update the QEMU breakpoints for every guest breakpoint. This does a 451 * complete delete-and-reinstate of the QEMU breakpoint list and so is 452 * suitable for use after migration or on reset. 453 */ 454 void hw_breakpoint_update_all(ARMCPU *cpu); 455 456 /* Callback function for checking if a breakpoint should trigger. */ 457 bool arm_debug_check_breakpoint(CPUState *cs); 458 459 /* Callback function for checking if a watchpoint should trigger. */ 460 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 461 462 /* Adjust addresses (in BE32 mode) before testing against watchpoint 463 * addresses. 464 */ 465 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 466 467 /* Callback function for when a watchpoint or breakpoint triggers. */ 468 void arm_debug_excp_handler(CPUState *cs); 469 470 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 471 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 472 { 473 return false; 474 } 475 static inline void arm_handle_psci_call(ARMCPU *cpu) 476 { 477 g_assert_not_reached(); 478 } 479 #else 480 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 481 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 482 /* Actually handle a PSCI call */ 483 void arm_handle_psci_call(ARMCPU *cpu); 484 #endif 485 486 /** 487 * arm_clear_exclusive: clear the exclusive monitor 488 * @env: CPU env 489 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 490 */ 491 static inline void arm_clear_exclusive(CPUARMState *env) 492 { 493 env->exclusive_addr = -1; 494 } 495 496 /** 497 * ARMFaultType: type of an ARM MMU fault 498 * This corresponds to the v8A pseudocode's Fault enumeration, 499 * with extensions for QEMU internal conditions. 500 */ 501 typedef enum ARMFaultType { 502 ARMFault_None, 503 ARMFault_AccessFlag, 504 ARMFault_Alignment, 505 ARMFault_Background, 506 ARMFault_Domain, 507 ARMFault_Permission, 508 ARMFault_Translation, 509 ARMFault_AddressSize, 510 ARMFault_SyncExternal, 511 ARMFault_SyncExternalOnWalk, 512 ARMFault_SyncParity, 513 ARMFault_SyncParityOnWalk, 514 ARMFault_AsyncParity, 515 ARMFault_AsyncExternal, 516 ARMFault_Debug, 517 ARMFault_TLBConflict, 518 ARMFault_UnsuppAtomicUpdate, 519 ARMFault_Lockdown, 520 ARMFault_Exclusive, 521 ARMFault_ICacheMaint, 522 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 523 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 524 ARMFault_GPCFOnWalk, 525 ARMFault_GPCFOnOutput, 526 } ARMFaultType; 527 528 typedef enum ARMGPCF { 529 GPCF_None, 530 GPCF_AddressSize, 531 GPCF_Walk, 532 GPCF_EABT, 533 GPCF_Fail, 534 } ARMGPCF; 535 536 /** 537 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 538 * @type: Type of fault 539 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 540 * @level: Table walk level (for translation, access flag and permission faults) 541 * @domain: Domain of the fault address (for non-LPAE CPUs only) 542 * @s2addr: Address that caused a fault at stage 2 543 * @paddr: physical address that caused a fault for gpc 544 * @paddr_space: physical address space that caused a fault for gpc 545 * @stage2: True if we faulted at stage 2 546 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 547 * @s1ns: True if we faulted on a non-secure IPA while in secure state 548 * @ea: True if we should set the EA (external abort type) bit in syndrome 549 */ 550 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 551 struct ARMMMUFaultInfo { 552 ARMFaultType type; 553 ARMGPCF gpcf; 554 target_ulong s2addr; 555 target_ulong paddr; 556 ARMSecuritySpace paddr_space; 557 int level; 558 int domain; 559 bool stage2; 560 bool s1ptw; 561 bool s1ns; 562 bool ea; 563 }; 564 565 /** 566 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 567 * Compare pseudocode EncodeSDFSC(), though unlike that function 568 * we set up a whole FSR-format code including domain field and 569 * putting the high bit of the FSC into bit 10. 570 */ 571 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 572 { 573 uint32_t fsc; 574 575 switch (fi->type) { 576 case ARMFault_None: 577 return 0; 578 case ARMFault_AccessFlag: 579 fsc = fi->level == 1 ? 0x3 : 0x6; 580 break; 581 case ARMFault_Alignment: 582 fsc = 0x1; 583 break; 584 case ARMFault_Permission: 585 fsc = fi->level == 1 ? 0xd : 0xf; 586 break; 587 case ARMFault_Domain: 588 fsc = fi->level == 1 ? 0x9 : 0xb; 589 break; 590 case ARMFault_Translation: 591 fsc = fi->level == 1 ? 0x5 : 0x7; 592 break; 593 case ARMFault_SyncExternal: 594 fsc = 0x8 | (fi->ea << 12); 595 break; 596 case ARMFault_SyncExternalOnWalk: 597 fsc = fi->level == 1 ? 0xc : 0xe; 598 fsc |= (fi->ea << 12); 599 break; 600 case ARMFault_SyncParity: 601 fsc = 0x409; 602 break; 603 case ARMFault_SyncParityOnWalk: 604 fsc = fi->level == 1 ? 0x40c : 0x40e; 605 break; 606 case ARMFault_AsyncParity: 607 fsc = 0x408; 608 break; 609 case ARMFault_AsyncExternal: 610 fsc = 0x406 | (fi->ea << 12); 611 break; 612 case ARMFault_Debug: 613 fsc = 0x2; 614 break; 615 case ARMFault_TLBConflict: 616 fsc = 0x400; 617 break; 618 case ARMFault_Lockdown: 619 fsc = 0x404; 620 break; 621 case ARMFault_Exclusive: 622 fsc = 0x405; 623 break; 624 case ARMFault_ICacheMaint: 625 fsc = 0x4; 626 break; 627 case ARMFault_Background: 628 fsc = 0x0; 629 break; 630 case ARMFault_QEMU_NSCExec: 631 fsc = M_FAKE_FSR_NSC_EXEC; 632 break; 633 case ARMFault_QEMU_SFault: 634 fsc = M_FAKE_FSR_SFAULT; 635 break; 636 default: 637 /* Other faults can't occur in a context that requires a 638 * short-format status code. 639 */ 640 g_assert_not_reached(); 641 } 642 643 fsc |= (fi->domain << 4); 644 return fsc; 645 } 646 647 /** 648 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 649 * Compare pseudocode EncodeLDFSC(), though unlike that function 650 * we fill in also the LPAE bit 9 of a DFSR format. 651 */ 652 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 653 { 654 uint32_t fsc; 655 656 switch (fi->type) { 657 case ARMFault_None: 658 return 0; 659 case ARMFault_AddressSize: 660 assert(fi->level >= -1 && fi->level <= 3); 661 if (fi->level < 0) { 662 fsc = 0b101001; 663 } else { 664 fsc = fi->level; 665 } 666 break; 667 case ARMFault_AccessFlag: 668 assert(fi->level >= 0 && fi->level <= 3); 669 fsc = 0b001000 | fi->level; 670 break; 671 case ARMFault_Permission: 672 assert(fi->level >= 0 && fi->level <= 3); 673 fsc = 0b001100 | fi->level; 674 break; 675 case ARMFault_Translation: 676 assert(fi->level >= -1 && fi->level <= 3); 677 if (fi->level < 0) { 678 fsc = 0b101011; 679 } else { 680 fsc = 0b000100 | fi->level; 681 } 682 break; 683 case ARMFault_SyncExternal: 684 fsc = 0x10 | (fi->ea << 12); 685 break; 686 case ARMFault_SyncExternalOnWalk: 687 assert(fi->level >= -1 && fi->level <= 3); 688 if (fi->level < 0) { 689 fsc = 0b010011; 690 } else { 691 fsc = 0b010100 | fi->level; 692 } 693 fsc |= fi->ea << 12; 694 break; 695 case ARMFault_SyncParity: 696 fsc = 0x18; 697 break; 698 case ARMFault_SyncParityOnWalk: 699 assert(fi->level >= -1 && fi->level <= 3); 700 if (fi->level < 0) { 701 fsc = 0b011011; 702 } else { 703 fsc = 0b011100 | fi->level; 704 } 705 break; 706 case ARMFault_AsyncParity: 707 fsc = 0x19; 708 break; 709 case ARMFault_AsyncExternal: 710 fsc = 0x11 | (fi->ea << 12); 711 break; 712 case ARMFault_Alignment: 713 fsc = 0x21; 714 break; 715 case ARMFault_Debug: 716 fsc = 0x22; 717 break; 718 case ARMFault_TLBConflict: 719 fsc = 0x30; 720 break; 721 case ARMFault_UnsuppAtomicUpdate: 722 fsc = 0x31; 723 break; 724 case ARMFault_Lockdown: 725 fsc = 0x34; 726 break; 727 case ARMFault_Exclusive: 728 fsc = 0x35; 729 break; 730 case ARMFault_GPCFOnWalk: 731 assert(fi->level >= -1 && fi->level <= 3); 732 if (fi->level < 0) { 733 fsc = 0b100011; 734 } else { 735 fsc = 0b100100 | fi->level; 736 } 737 break; 738 case ARMFault_GPCFOnOutput: 739 fsc = 0b101000; 740 break; 741 default: 742 /* Other faults can't occur in a context that requires a 743 * long-format status code. 744 */ 745 g_assert_not_reached(); 746 } 747 748 fsc |= 1 << 9; 749 return fsc; 750 } 751 752 static inline bool arm_extabort_type(MemTxResult result) 753 { 754 /* The EA bit in syndromes and fault status registers is an 755 * IMPDEF classification of external aborts. ARM implementations 756 * usually use this to indicate AXI bus Decode error (0) or 757 * Slave error (1); in QEMU we follow that. 758 */ 759 return result != MEMTX_DECODE_ERROR; 760 } 761 762 #ifdef CONFIG_USER_ONLY 763 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 764 MMUAccessType access_type, 765 bool maperr, uintptr_t ra); 766 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 767 MMUAccessType access_type, uintptr_t ra); 768 #else 769 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 770 MMUAccessType access_type, int mmu_idx, 771 bool probe, uintptr_t retaddr); 772 #endif 773 774 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 775 { 776 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 777 } 778 779 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 780 { 781 if (arm_feature(env, ARM_FEATURE_M)) { 782 return mmu_idx | ARM_MMU_IDX_M; 783 } else { 784 return mmu_idx | ARM_MMU_IDX_A; 785 } 786 } 787 788 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 789 { 790 /* AArch64 is always a-profile. */ 791 return mmu_idx | ARM_MMU_IDX_A; 792 } 793 794 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 795 796 /* Return the MMU index for a v7M CPU in the specified security state */ 797 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 798 799 /* 800 * Return true if the stage 1 translation regime is using LPAE 801 * format page tables 802 */ 803 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 804 805 /* Raise a data fault alignment exception for the specified virtual address */ 806 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 807 MMUAccessType access_type, 808 int mmu_idx, uintptr_t retaddr); 809 810 #ifndef CONFIG_USER_ONLY 811 /* arm_cpu_do_transaction_failed: handle a memory system error response 812 * (eg "no device/memory present at address") by raising an external abort 813 * exception 814 */ 815 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 816 vaddr addr, unsigned size, 817 MMUAccessType access_type, 818 int mmu_idx, MemTxAttrs attrs, 819 MemTxResult response, uintptr_t retaddr); 820 #endif 821 822 /* Call any registered EL change hooks */ 823 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 824 { 825 ARMELChangeHook *hook, *next; 826 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 827 hook->hook(cpu, hook->opaque); 828 } 829 } 830 static inline void arm_call_el_change_hook(ARMCPU *cpu) 831 { 832 ARMELChangeHook *hook, *next; 833 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 834 hook->hook(cpu, hook->opaque); 835 } 836 } 837 838 /* Return true if this address translation regime has two ranges. */ 839 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 840 { 841 switch (mmu_idx) { 842 case ARMMMUIdx_Stage1_E0: 843 case ARMMMUIdx_Stage1_E1: 844 case ARMMMUIdx_Stage1_E1_PAN: 845 case ARMMMUIdx_E10_0: 846 case ARMMMUIdx_E10_1: 847 case ARMMMUIdx_E10_1_PAN: 848 case ARMMMUIdx_E20_0: 849 case ARMMMUIdx_E20_2: 850 case ARMMMUIdx_E20_2_PAN: 851 return true; 852 default: 853 return false; 854 } 855 } 856 857 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 858 { 859 switch (mmu_idx) { 860 case ARMMMUIdx_Stage1_E1_PAN: 861 case ARMMMUIdx_E10_1_PAN: 862 case ARMMMUIdx_E20_2_PAN: 863 return true; 864 default: 865 return false; 866 } 867 } 868 869 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 870 { 871 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 872 } 873 874 /* Return the exception level which controls this address translation regime */ 875 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 876 { 877 switch (mmu_idx) { 878 case ARMMMUIdx_E20_0: 879 case ARMMMUIdx_E20_2: 880 case ARMMMUIdx_E20_2_PAN: 881 case ARMMMUIdx_Stage2: 882 case ARMMMUIdx_Stage2_S: 883 case ARMMMUIdx_E2: 884 return 2; 885 case ARMMMUIdx_E3: 886 return 3; 887 case ARMMMUIdx_E10_0: 888 case ARMMMUIdx_Stage1_E0: 889 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; 890 case ARMMMUIdx_Stage1_E1: 891 case ARMMMUIdx_Stage1_E1_PAN: 892 case ARMMMUIdx_E10_1: 893 case ARMMMUIdx_E10_1_PAN: 894 case ARMMMUIdx_MPrivNegPri: 895 case ARMMMUIdx_MUserNegPri: 896 case ARMMMUIdx_MPriv: 897 case ARMMMUIdx_MUser: 898 case ARMMMUIdx_MSPrivNegPri: 899 case ARMMMUIdx_MSUserNegPri: 900 case ARMMMUIdx_MSPriv: 901 case ARMMMUIdx_MSUser: 902 return 1; 903 default: 904 g_assert_not_reached(); 905 } 906 } 907 908 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 909 { 910 switch (mmu_idx) { 911 case ARMMMUIdx_E20_0: 912 case ARMMMUIdx_Stage1_E0: 913 case ARMMMUIdx_MUser: 914 case ARMMMUIdx_MSUser: 915 case ARMMMUIdx_MUserNegPri: 916 case ARMMMUIdx_MSUserNegPri: 917 return true; 918 default: 919 return false; 920 case ARMMMUIdx_E10_0: 921 case ARMMMUIdx_E10_1: 922 case ARMMMUIdx_E10_1_PAN: 923 g_assert_not_reached(); 924 } 925 } 926 927 /* Return the SCTLR value which controls this address translation regime */ 928 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 929 { 930 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 931 } 932 933 /* 934 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 935 * and the Non-Secure stage 2 translation regimes (and hence which are 936 * not present in VSTCR_EL2). 937 */ 938 #define VTCR_SHARED_FIELD_MASK \ 939 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 940 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 941 R_VTCR_DS_MASK) 942 943 /* Return the value of the TCR controlling this translation regime */ 944 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 945 { 946 if (mmu_idx == ARMMMUIdx_Stage2) { 947 return env->cp15.vtcr_el2; 948 } 949 if (mmu_idx == ARMMMUIdx_Stage2_S) { 950 /* 951 * Secure stage 2 shares fields from VTCR_EL2. We merge those 952 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 953 * value so the callers don't need to special case this. 954 * 955 * If a future architecture change defines bits in VSTCR_EL2 that 956 * overlap with these VTCR_EL2 fields we may need to revisit this. 957 */ 958 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 959 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 960 return v; 961 } 962 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 963 } 964 965 /* Return true if the translation regime is using LPAE format page tables */ 966 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 967 { 968 int el = regime_el(env, mmu_idx); 969 if (el == 2 || arm_el_is_aa64(env, el)) { 970 return true; 971 } 972 if (arm_feature(env, ARM_FEATURE_PMSA) && 973 arm_feature(env, ARM_FEATURE_V8)) { 974 return true; 975 } 976 if (arm_feature(env, ARM_FEATURE_LPAE) 977 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 978 return true; 979 } 980 return false; 981 } 982 983 /** 984 * arm_num_brps: Return number of implemented breakpoints. 985 * Note that the ID register BRPS field is "number of bps - 1", 986 * and we return the actual number of breakpoints. 987 */ 988 static inline int arm_num_brps(ARMCPU *cpu) 989 { 990 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 991 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 992 } else { 993 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 994 } 995 } 996 997 /** 998 * arm_num_wrps: Return number of implemented watchpoints. 999 * Note that the ID register WRPS field is "number of wps - 1", 1000 * and we return the actual number of watchpoints. 1001 */ 1002 static inline int arm_num_wrps(ARMCPU *cpu) 1003 { 1004 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1005 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1006 } else { 1007 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1008 } 1009 } 1010 1011 /** 1012 * arm_num_ctx_cmps: Return number of implemented context comparators. 1013 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1014 * and we return the actual number of comparators. 1015 */ 1016 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1017 { 1018 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1019 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1020 } else { 1021 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1022 } 1023 } 1024 1025 /** 1026 * v7m_using_psp: Return true if using process stack pointer 1027 * Return true if the CPU is currently using the process stack 1028 * pointer, or false if it is using the main stack pointer. 1029 */ 1030 static inline bool v7m_using_psp(CPUARMState *env) 1031 { 1032 /* Handler mode always uses the main stack; for thread mode 1033 * the CONTROL.SPSEL bit determines the answer. 1034 * Note that in v7M it is not possible to be in Handler mode with 1035 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1036 */ 1037 return !arm_v7m_is_handler_mode(env) && 1038 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1039 } 1040 1041 /** 1042 * v7m_sp_limit: Return SP limit for current CPU state 1043 * Return the SP limit value for the current CPU security state 1044 * and stack pointer. 1045 */ 1046 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1047 { 1048 if (v7m_using_psp(env)) { 1049 return env->v7m.psplim[env->v7m.secure]; 1050 } else { 1051 return env->v7m.msplim[env->v7m.secure]; 1052 } 1053 } 1054 1055 /** 1056 * v7m_cpacr_pass: 1057 * Return true if the v7M CPACR permits access to the FPU for the specified 1058 * security state and privilege level. 1059 */ 1060 static inline bool v7m_cpacr_pass(CPUARMState *env, 1061 bool is_secure, bool is_priv) 1062 { 1063 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1064 case 0: 1065 case 2: /* UNPREDICTABLE: we treat like 0 */ 1066 return false; 1067 case 1: 1068 return is_priv; 1069 case 3: 1070 return true; 1071 default: 1072 g_assert_not_reached(); 1073 } 1074 } 1075 1076 /** 1077 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1078 * @psr: Program Status Register indicating CPU mode 1079 * 1080 * Returns, for debug logging purposes, a printable representation 1081 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1082 * the low bits of the specified PSR. 1083 */ 1084 static inline const char *aarch32_mode_name(uint32_t psr) 1085 { 1086 static const char cpu_mode_names[16][4] = { 1087 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1088 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1089 }; 1090 1091 return cpu_mode_names[psr & 0xf]; 1092 } 1093 1094 /** 1095 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1096 * 1097 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1098 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1099 * Must be called with the BQL held. 1100 */ 1101 void arm_cpu_update_virq(ARMCPU *cpu); 1102 1103 /** 1104 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1105 * 1106 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1107 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1108 * Must be called with the BQL held. 1109 */ 1110 void arm_cpu_update_vfiq(ARMCPU *cpu); 1111 1112 /** 1113 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1114 * 1115 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1116 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1117 * Must be called with the BQL held. 1118 */ 1119 void arm_cpu_update_vinmi(ARMCPU *cpu); 1120 1121 /** 1122 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1123 * 1124 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1125 * a change to the HCRX_EL2.VFNMI. 1126 * Must be called with the BQL held. 1127 */ 1128 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1129 1130 /** 1131 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1132 * 1133 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1134 * following a change to the HCR_EL2.VSE bit. 1135 */ 1136 void arm_cpu_update_vserr(ARMCPU *cpu); 1137 1138 /** 1139 * arm_mmu_idx_el: 1140 * @env: The cpu environment 1141 * @el: The EL to use. 1142 * 1143 * Return the full ARMMMUIdx for the translation regime for EL. 1144 */ 1145 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1146 1147 /** 1148 * arm_mmu_idx: 1149 * @env: The cpu environment 1150 * 1151 * Return the full ARMMMUIdx for the current translation regime. 1152 */ 1153 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1154 1155 /** 1156 * arm_stage1_mmu_idx: 1157 * @env: The cpu environment 1158 * 1159 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1160 */ 1161 #ifdef CONFIG_USER_ONLY 1162 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1163 { 1164 return ARMMMUIdx_Stage1_E0; 1165 } 1166 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1167 { 1168 return ARMMMUIdx_Stage1_E0; 1169 } 1170 #else 1171 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1172 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1173 #endif 1174 1175 /** 1176 * arm_mmu_idx_is_stage1_of_2: 1177 * @mmu_idx: The ARMMMUIdx to test 1178 * 1179 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1180 * first stage of a two stage regime. 1181 */ 1182 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1183 { 1184 switch (mmu_idx) { 1185 case ARMMMUIdx_Stage1_E0: 1186 case ARMMMUIdx_Stage1_E1: 1187 case ARMMMUIdx_Stage1_E1_PAN: 1188 return true; 1189 default: 1190 return false; 1191 } 1192 } 1193 1194 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1195 const ARMISARegisters *id) 1196 { 1197 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1198 1199 if ((features >> ARM_FEATURE_V4T) & 1) { 1200 valid |= CPSR_T; 1201 } 1202 if ((features >> ARM_FEATURE_V5) & 1) { 1203 valid |= CPSR_Q; /* V5TE in reality*/ 1204 } 1205 if ((features >> ARM_FEATURE_V6) & 1) { 1206 valid |= CPSR_E | CPSR_GE; 1207 } 1208 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1209 valid |= CPSR_IT; 1210 } 1211 if (isar_feature_aa32_jazelle(id)) { 1212 valid |= CPSR_J; 1213 } 1214 if (isar_feature_aa32_pan(id)) { 1215 valid |= CPSR_PAN; 1216 } 1217 if (isar_feature_aa32_dit(id)) { 1218 valid |= CPSR_DIT; 1219 } 1220 if (isar_feature_aa32_ssbs(id)) { 1221 valid |= CPSR_SSBS; 1222 } 1223 1224 return valid; 1225 } 1226 1227 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1228 { 1229 uint32_t valid; 1230 1231 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1232 if (isar_feature_aa64_bti(id)) { 1233 valid |= PSTATE_BTYPE; 1234 } 1235 if (isar_feature_aa64_pan(id)) { 1236 valid |= PSTATE_PAN; 1237 } 1238 if (isar_feature_aa64_uao(id)) { 1239 valid |= PSTATE_UAO; 1240 } 1241 if (isar_feature_aa64_dit(id)) { 1242 valid |= PSTATE_DIT; 1243 } 1244 if (isar_feature_aa64_ssbs(id)) { 1245 valid |= PSTATE_SSBS; 1246 } 1247 if (isar_feature_aa64_mte(id)) { 1248 valid |= PSTATE_TCO; 1249 } 1250 if (isar_feature_aa64_nmi(id)) { 1251 valid |= PSTATE_ALLINT; 1252 } 1253 1254 return valid; 1255 } 1256 1257 /* Granule size (i.e. page size) */ 1258 typedef enum ARMGranuleSize { 1259 /* Same order as TG0 encoding */ 1260 Gran4K, 1261 Gran64K, 1262 Gran16K, 1263 GranInvalid, 1264 } ARMGranuleSize; 1265 1266 /** 1267 * arm_granule_bits: Return address size of the granule in bits 1268 * 1269 * Return the address size of the granule in bits. This corresponds 1270 * to the pseudocode TGxGranuleBits(). 1271 */ 1272 static inline int arm_granule_bits(ARMGranuleSize gran) 1273 { 1274 switch (gran) { 1275 case Gran64K: 1276 return 16; 1277 case Gran16K: 1278 return 14; 1279 case Gran4K: 1280 return 12; 1281 default: 1282 g_assert_not_reached(); 1283 } 1284 } 1285 1286 /* 1287 * Parameters of a given virtual address, as extracted from the 1288 * translation control register (TCR) for a given regime. 1289 */ 1290 typedef struct ARMVAParameters { 1291 unsigned tsz : 8; 1292 unsigned ps : 3; 1293 unsigned sh : 2; 1294 unsigned select : 1; 1295 bool tbi : 1; 1296 bool epd : 1; 1297 bool hpd : 1; 1298 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1299 bool ds : 1; 1300 bool ha : 1; 1301 bool hd : 1; 1302 ARMGranuleSize gran : 2; 1303 } ARMVAParameters; 1304 1305 /** 1306 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1307 * @env: CPU 1308 * @va: virtual address to look up 1309 * @mmu_idx: determines translation regime to use 1310 * @data: true if this is a data access 1311 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1312 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1313 */ 1314 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1315 ARMMMUIdx mmu_idx, bool data, 1316 bool el1_is_aa32); 1317 1318 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1319 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1320 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1321 1322 /* Determine if allocation tags are available. */ 1323 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1324 uint64_t sctlr) 1325 { 1326 if (el < 3 1327 && arm_feature(env, ARM_FEATURE_EL3) 1328 && !(env->cp15.scr_el3 & SCR_ATA)) { 1329 return false; 1330 } 1331 if (el < 2 && arm_is_el2_enabled(env)) { 1332 uint64_t hcr = arm_hcr_el2_eff(env); 1333 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1334 return false; 1335 } 1336 } 1337 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1338 return sctlr != 0; 1339 } 1340 1341 #ifndef CONFIG_USER_ONLY 1342 1343 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1344 typedef struct V8M_SAttributes { 1345 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1346 bool ns; 1347 bool nsc; 1348 uint8_t sregion; 1349 bool srvalid; 1350 uint8_t iregion; 1351 bool irvalid; 1352 } V8M_SAttributes; 1353 1354 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1355 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1356 bool secure, V8M_SAttributes *sattrs); 1357 1358 /* Cacheability and shareability attributes for a memory access */ 1359 typedef struct ARMCacheAttrs { 1360 /* 1361 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1362 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1363 */ 1364 unsigned int attrs:8; 1365 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1366 bool is_s2_format:1; 1367 } ARMCacheAttrs; 1368 1369 /* Fields that are valid upon success. */ 1370 typedef struct GetPhysAddrResult { 1371 CPUTLBEntryFull f; 1372 ARMCacheAttrs cacheattrs; 1373 } GetPhysAddrResult; 1374 1375 /** 1376 * get_phys_addr: get the physical address for a virtual address 1377 * @env: CPUARMState 1378 * @address: virtual address to get physical address for 1379 * @access_type: 0 for read, 1 for write, 2 for execute 1380 * @mmu_idx: MMU index indicating required translation regime 1381 * @result: set on translation success. 1382 * @fi: set to fault info if the translation fails 1383 * 1384 * Find the physical address corresponding to the given virtual address, 1385 * by doing a translation table walk on MMU based systems or using the 1386 * MPU state on MPU based systems. 1387 * 1388 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1389 * prot and page_size may not be filled in, and the populated fsr value provides 1390 * information on why the translation aborted, in the format of a 1391 * DFSR/IFSR fault register, with the following caveats: 1392 * * we honour the short vs long DFSR format differences. 1393 * * the WnR bit is never set (the caller must do this). 1394 * * for PSMAv5 based systems we don't bother to return a full FSR format 1395 * value. 1396 */ 1397 bool get_phys_addr(CPUARMState *env, target_ulong address, 1398 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1399 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1400 __attribute__((nonnull)); 1401 1402 /** 1403 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1404 * address 1405 * @env: CPUARMState 1406 * @address: virtual address to get physical address for 1407 * @access_type: 0 for read, 1 for write, 2 for execute 1408 * @mmu_idx: MMU index indicating required translation regime 1409 * @space: security space for the access 1410 * @result: set on translation success. 1411 * @fi: set to fault info if the translation fails 1412 * 1413 * Similar to get_phys_addr, but use the given security space and don't perform 1414 * a Granule Protection Check on the resulting address. 1415 */ 1416 bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, 1417 MMUAccessType access_type, 1418 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1419 GetPhysAddrResult *result, 1420 ARMMMUFaultInfo *fi) 1421 __attribute__((nonnull)); 1422 1423 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1424 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1425 bool is_secure, GetPhysAddrResult *result, 1426 ARMMMUFaultInfo *fi, uint32_t *mregion); 1427 1428 void arm_log_exception(CPUState *cs); 1429 1430 #endif /* !CONFIG_USER_ONLY */ 1431 1432 /* 1433 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1434 * the same simd_desc() encoding due to restrictions on size. 1435 * Use these instead. 1436 */ 1437 FIELD(PREDDESC, OPRSZ, 0, 6) 1438 FIELD(PREDDESC, ESZ, 6, 2) 1439 FIELD(PREDDESC, DATA, 8, 24) 1440 1441 /* 1442 * The SVE simd_data field, for memory ops, contains either 1443 * rd (5 bits) or a shift count (2 bits). 1444 */ 1445 #define SVE_MTEDESC_SHIFT 5 1446 1447 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1448 FIELD(MTEDESC, MIDX, 0, 4) 1449 FIELD(MTEDESC, TBI, 4, 2) 1450 FIELD(MTEDESC, TCMA, 6, 2) 1451 FIELD(MTEDESC, WRITE, 8, 1) 1452 FIELD(MTEDESC, ALIGN, 9, 3) 1453 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1454 1455 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1456 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1457 1458 /** 1459 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1460 * @env: CPU env 1461 * @ptr: start address of memory region (dirty pointer) 1462 * @size: length of region (guaranteed not to cross a page boundary) 1463 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1464 * Returns: the size of the region that can be copied without hitting 1465 * an MTE tag failure 1466 * 1467 * Note that we assume that the caller has already checked the TBI 1468 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1469 * required. 1470 */ 1471 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1472 uint32_t desc); 1473 1474 /** 1475 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1476 * operation going in the reverse direction 1477 * @env: CPU env 1478 * @ptr: *end* address of memory region (dirty pointer) 1479 * @size: length of region (guaranteed not to cross a page boundary) 1480 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1481 * Returns: the size of the region that can be copied without hitting 1482 * an MTE tag failure 1483 * 1484 * Note that we assume that the caller has already checked the TBI 1485 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1486 * required. 1487 */ 1488 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1489 uint32_t desc); 1490 1491 /** 1492 * mte_check_fail: Record an MTE tag check failure 1493 * @env: CPU env 1494 * @desc: MTEDESC descriptor word 1495 * @dirty_ptr: Failing dirty address 1496 * @ra: TCG retaddr 1497 * 1498 * This may never return (if the MTE tag checks are configured to fault). 1499 */ 1500 void mte_check_fail(CPUARMState *env, uint32_t desc, 1501 uint64_t dirty_ptr, uintptr_t ra); 1502 1503 /** 1504 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1505 * @env: CPU env 1506 * @dirty_ptr: Start address of memory region (dirty pointer) 1507 * @size: length of region (guaranteed not to cross page boundary) 1508 * @desc: MTEDESC descriptor word 1509 */ 1510 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1511 uint32_t desc); 1512 1513 static inline int allocation_tag_from_addr(uint64_t ptr) 1514 { 1515 return extract64(ptr, 56, 4); 1516 } 1517 1518 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1519 { 1520 return deposit64(ptr, 56, 4, rtag); 1521 } 1522 1523 /* Return true if tbi bits mean that the access is checked. */ 1524 static inline bool tbi_check(uint32_t desc, int bit55) 1525 { 1526 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1527 } 1528 1529 /* Return true if tcma bits mean that the access is unchecked. */ 1530 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1531 { 1532 /* 1533 * We had extracted bit55 and ptr_tag for other reasons, so fold 1534 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1535 */ 1536 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1537 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1538 return tcma && match; 1539 } 1540 1541 /* 1542 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1543 * for the tag to be present in the FAR_ELx register. But for user-only 1544 * mode, we do not have a TLB with which to implement this, so we must 1545 * remove the top byte. 1546 */ 1547 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1548 { 1549 #ifdef CONFIG_USER_ONLY 1550 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1551 ptr &= sextract64(ptr, 0, 56); 1552 #endif 1553 return ptr; 1554 } 1555 1556 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1557 { 1558 #ifdef CONFIG_USER_ONLY 1559 int64_t clean_ptr = sextract64(ptr, 0, 56); 1560 if (tbi_check(desc, clean_ptr < 0)) { 1561 ptr = clean_ptr; 1562 } 1563 #endif 1564 return ptr; 1565 } 1566 1567 /* Values for M-profile PSR.ECI for MVE insns */ 1568 enum MVEECIState { 1569 ECI_NONE = 0, /* No completed beats */ 1570 ECI_A0 = 1, /* Completed: A0 */ 1571 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1572 /* 3 is reserved */ 1573 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1574 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1575 /* All other values reserved */ 1576 }; 1577 1578 /* Definitions for the PMU registers */ 1579 #define PMCRN_MASK 0xf800 1580 #define PMCRN_SHIFT 11 1581 #define PMCRLP 0x80 1582 #define PMCRLC 0x40 1583 #define PMCRDP 0x20 1584 #define PMCRX 0x10 1585 #define PMCRD 0x8 1586 #define PMCRC 0x4 1587 #define PMCRP 0x2 1588 #define PMCRE 0x1 1589 /* 1590 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1591 * which can be written as 1 to trigger behaviour but which stay RAZ). 1592 */ 1593 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1594 1595 #define PMXEVTYPER_P 0x80000000 1596 #define PMXEVTYPER_U 0x40000000 1597 #define PMXEVTYPER_NSK 0x20000000 1598 #define PMXEVTYPER_NSU 0x10000000 1599 #define PMXEVTYPER_NSH 0x08000000 1600 #define PMXEVTYPER_M 0x04000000 1601 #define PMXEVTYPER_MT 0x02000000 1602 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1603 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1604 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1605 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1606 PMXEVTYPER_EVTCOUNT) 1607 1608 #define PMCCFILTR 0xf8000000 1609 #define PMCCFILTR_M PMXEVTYPER_M 1610 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1611 1612 static inline uint32_t pmu_num_counters(CPUARMState *env) 1613 { 1614 ARMCPU *cpu = env_archcpu(env); 1615 1616 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1617 } 1618 1619 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1620 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1621 { 1622 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1623 } 1624 1625 #ifdef TARGET_AARCH64 1626 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1627 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1628 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1629 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1630 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1631 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1632 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1633 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1634 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1635 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1636 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1637 void aarch64_max_tcg_initfn(Object *obj); 1638 void aarch64_add_pauth_properties(Object *obj); 1639 void aarch64_add_sve_properties(Object *obj); 1640 void aarch64_add_sme_properties(Object *obj); 1641 #endif 1642 1643 /* Read the CONTROL register as the MRS instruction would. */ 1644 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1645 1646 /* 1647 * Return a pointer to the location where we currently store the 1648 * stack pointer for the requested security state and thread mode. 1649 * This pointer will become invalid if the CPU state is updated 1650 * such that the stack pointers are switched around (eg changing 1651 * the SPSEL control bit). 1652 */ 1653 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1654 bool threadmode, bool spsel); 1655 1656 bool el_is_in_host(CPUARMState *env, int el); 1657 1658 void aa32_max_features(ARMCPU *cpu); 1659 int exception_target_el(CPUARMState *env); 1660 bool arm_singlestep_active(CPUARMState *env); 1661 bool arm_generate_debug_exceptions(CPUARMState *env); 1662 1663 /** 1664 * pauth_ptr_mask: 1665 * @param: parameters defining the MMU setup 1666 * 1667 * Return a mask of the address bits that contain the authentication code, 1668 * given the MMU config defined by @param. 1669 */ 1670 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1671 { 1672 int bot_pac_bit = 64 - param.tsz; 1673 int top_pac_bit = 64 - 8 * param.tbi; 1674 1675 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1676 } 1677 1678 /* Add the cpreg definitions for debug related system registers */ 1679 void define_debug_regs(ARMCPU *cpu); 1680 1681 /* Effective value of MDCR_EL2 */ 1682 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1683 { 1684 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1685 } 1686 1687 /* Powers of 2 for sve_vq_map et al. */ 1688 #define SVE_VQ_POW2_MAP \ 1689 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1690 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1691 1692 /* 1693 * Return true if it is possible to take a fine-grained-trap to EL2. 1694 */ 1695 static inline bool arm_fgt_active(CPUARMState *env, int el) 1696 { 1697 /* 1698 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1699 * that can affect EL0, but it is harmless to do the test also for 1700 * traps on registers that are only accessible at EL1 because if the test 1701 * returns true then we can't be executing at EL1 anyway. 1702 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1703 * traps from AArch32 only happen for the EL0 is AArch32 case. 1704 */ 1705 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1706 el < 2 && arm_is_el2_enabled(env) && 1707 arm_el_is_aa64(env, 1) && 1708 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1709 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1710 } 1711 1712 void assert_hflags_rebuild_correctly(CPUARMState *env); 1713 1714 /* 1715 * Although the ARM implementation of hardware assisted debugging 1716 * allows for different breakpoints per-core, the current GDB 1717 * interface treats them as a global pool of registers (which seems to 1718 * be the case for x86, ppc and s390). As a result we store one copy 1719 * of registers which is used for all active cores. 1720 * 1721 * Write access is serialised by virtue of the GDB protocol which 1722 * updates things. Read access (i.e. when the values are copied to the 1723 * vCPU) is also gated by GDB's run control. 1724 * 1725 * This is not unreasonable as most of the time debugging kernels you 1726 * never know which core will eventually execute your function. 1727 */ 1728 1729 typedef struct { 1730 uint64_t bcr; 1731 uint64_t bvr; 1732 } HWBreakpoint; 1733 1734 /* 1735 * The watchpoint registers can cover more area than the requested 1736 * watchpoint so we need to store the additional information 1737 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1738 * when the watchpoint is hit. 1739 */ 1740 typedef struct { 1741 uint64_t wcr; 1742 uint64_t wvr; 1743 CPUWatchpoint details; 1744 } HWWatchpoint; 1745 1746 /* Maximum and current break/watch point counts */ 1747 extern int max_hw_bps, max_hw_wps; 1748 extern GArray *hw_breakpoints, *hw_watchpoints; 1749 1750 #define cur_hw_wps (hw_watchpoints->len) 1751 #define cur_hw_bps (hw_breakpoints->len) 1752 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1753 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1754 1755 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1756 int insert_hw_breakpoint(target_ulong pc); 1757 int delete_hw_breakpoint(target_ulong pc); 1758 1759 bool check_watchpoint_in_range(int i, target_ulong addr); 1760 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1761 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1762 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1763 #endif 1764