1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/breakpoint.h" 29 #include "hw/registerfields.h" 30 #include "tcg/tcg-gvec-desc.h" 31 #include "syndrome.h" 32 #include "cpu-features.h" 33 34 /* register banks for CPU modes */ 35 #define BANK_USRSYS 0 36 #define BANK_SVC 1 37 #define BANK_ABT 2 38 #define BANK_UND 3 39 #define BANK_IRQ 4 40 #define BANK_FIQ 5 41 #define BANK_HYP 6 42 #define BANK_MON 7 43 44 static inline int arm_env_mmu_index(CPUARMState *env) 45 { 46 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 47 } 48 49 static inline bool excp_is_internal(int excp) 50 { 51 /* Return true if this exception number represents a QEMU-internal 52 * exception that will not be passed to the guest. 53 */ 54 return excp == EXCP_INTERRUPT 55 || excp == EXCP_HLT 56 || excp == EXCP_DEBUG 57 || excp == EXCP_HALTED 58 || excp == EXCP_EXCEPTION_EXIT 59 || excp == EXCP_KERNEL_TRAP 60 || excp == EXCP_SEMIHOST; 61 } 62 63 /* Scale factor for generic timers, ie number of ns per tick. 64 * This gives a 62.5MHz timer. 65 */ 66 #define GTIMER_SCALE 16 67 68 /* Bit definitions for the v7M CONTROL register */ 69 FIELD(V7M_CONTROL, NPRIV, 0, 1) 70 FIELD(V7M_CONTROL, SPSEL, 1, 1) 71 FIELD(V7M_CONTROL, FPCA, 2, 1) 72 FIELD(V7M_CONTROL, SFPA, 3, 1) 73 74 /* Bit definitions for v7M exception return payload */ 75 FIELD(V7M_EXCRET, ES, 0, 1) 76 FIELD(V7M_EXCRET, RES0, 1, 1) 77 FIELD(V7M_EXCRET, SPSEL, 2, 1) 78 FIELD(V7M_EXCRET, MODE, 3, 1) 79 FIELD(V7M_EXCRET, FTYPE, 4, 1) 80 FIELD(V7M_EXCRET, DCRS, 5, 1) 81 FIELD(V7M_EXCRET, S, 6, 1) 82 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 83 84 /* Minimum value which is a magic number for exception return */ 85 #define EXC_RETURN_MIN_MAGIC 0xff000000 86 /* Minimum number which is a magic number for function or exception return 87 * when using v8M security extension 88 */ 89 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 90 91 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 92 FIELD(DBGWCR, E, 0, 1) 93 FIELD(DBGWCR, PAC, 1, 2) 94 FIELD(DBGWCR, LSC, 3, 2) 95 FIELD(DBGWCR, BAS, 5, 8) 96 FIELD(DBGWCR, HMC, 13, 1) 97 FIELD(DBGWCR, SSC, 14, 2) 98 FIELD(DBGWCR, LBN, 16, 4) 99 FIELD(DBGWCR, WT, 20, 1) 100 FIELD(DBGWCR, MASK, 24, 5) 101 FIELD(DBGWCR, SSCE, 29, 1) 102 103 #define VTCR_NSW (1u << 29) 104 #define VTCR_NSA (1u << 30) 105 #define VSTCR_SW VTCR_NSW 106 #define VSTCR_SA VTCR_NSA 107 108 /* Bit definitions for CPACR (AArch32 only) */ 109 FIELD(CPACR, CP10, 20, 2) 110 FIELD(CPACR, CP11, 22, 2) 111 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 112 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 113 FIELD(CPACR, ASEDIS, 31, 1) 114 115 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 116 FIELD(CPACR_EL1, ZEN, 16, 2) 117 FIELD(CPACR_EL1, FPEN, 20, 2) 118 FIELD(CPACR_EL1, SMEN, 24, 2) 119 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 120 121 /* Bit definitions for HCPTR (AArch32 only) */ 122 FIELD(HCPTR, TCP10, 10, 1) 123 FIELD(HCPTR, TCP11, 11, 1) 124 FIELD(HCPTR, TASE, 15, 1) 125 FIELD(HCPTR, TTA, 20, 1) 126 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 127 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 128 129 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 130 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 131 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 132 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 133 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 134 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 135 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 136 FIELD(CPTR_EL2, TTA, 28, 1) 137 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 138 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 139 140 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 141 FIELD(CPTR_EL3, EZ, 8, 1) 142 FIELD(CPTR_EL3, TFP, 10, 1) 143 FIELD(CPTR_EL3, ESM, 12, 1) 144 FIELD(CPTR_EL3, TTA, 20, 1) 145 FIELD(CPTR_EL3, TAM, 30, 1) 146 FIELD(CPTR_EL3, TCPAC, 31, 1) 147 148 #define MDCR_MTPME (1U << 28) 149 #define MDCR_TDCC (1U << 27) 150 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 151 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 152 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 153 #define MDCR_EPMAD (1U << 21) 154 #define MDCR_EDAD (1U << 20) 155 #define MDCR_TTRF (1U << 19) 156 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 157 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 158 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 159 #define MDCR_SDD (1U << 16) 160 #define MDCR_SPD (3U << 14) 161 #define MDCR_TDRA (1U << 11) 162 #define MDCR_TDOSA (1U << 10) 163 #define MDCR_TDA (1U << 9) 164 #define MDCR_TDE (1U << 8) 165 #define MDCR_HPME (1U << 7) 166 #define MDCR_TPM (1U << 6) 167 #define MDCR_TPMCR (1U << 5) 168 #define MDCR_HPMN (0x1fU) 169 170 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 171 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 172 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 173 MDCR_STE | MDCR_SPME | MDCR_SPD) 174 175 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 176 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 177 #define TTBCR_PD0 (1U << 4) 178 #define TTBCR_PD1 (1U << 5) 179 #define TTBCR_EPD0 (1U << 7) 180 #define TTBCR_IRGN0 (3U << 8) 181 #define TTBCR_ORGN0 (3U << 10) 182 #define TTBCR_SH0 (3U << 12) 183 #define TTBCR_T1SZ (3U << 16) 184 #define TTBCR_A1 (1U << 22) 185 #define TTBCR_EPD1 (1U << 23) 186 #define TTBCR_IRGN1 (3U << 24) 187 #define TTBCR_ORGN1 (3U << 26) 188 #define TTBCR_SH1 (1U << 28) 189 #define TTBCR_EAE (1U << 31) 190 191 FIELD(VTCR, T0SZ, 0, 6) 192 FIELD(VTCR, SL0, 6, 2) 193 FIELD(VTCR, IRGN0, 8, 2) 194 FIELD(VTCR, ORGN0, 10, 2) 195 FIELD(VTCR, SH0, 12, 2) 196 FIELD(VTCR, TG0, 14, 2) 197 FIELD(VTCR, PS, 16, 3) 198 FIELD(VTCR, VS, 19, 1) 199 FIELD(VTCR, HA, 21, 1) 200 FIELD(VTCR, HD, 22, 1) 201 FIELD(VTCR, HWU59, 25, 1) 202 FIELD(VTCR, HWU60, 26, 1) 203 FIELD(VTCR, HWU61, 27, 1) 204 FIELD(VTCR, HWU62, 28, 1) 205 FIELD(VTCR, NSW, 29, 1) 206 FIELD(VTCR, NSA, 30, 1) 207 FIELD(VTCR, DS, 32, 1) 208 FIELD(VTCR, SL2, 33, 1) 209 210 #define HCRX_ENAS0 (1ULL << 0) 211 #define HCRX_ENALS (1ULL << 1) 212 #define HCRX_ENASR (1ULL << 2) 213 #define HCRX_FNXS (1ULL << 3) 214 #define HCRX_FGTNXS (1ULL << 4) 215 #define HCRX_SMPME (1ULL << 5) 216 #define HCRX_TALLINT (1ULL << 6) 217 #define HCRX_VINMI (1ULL << 7) 218 #define HCRX_VFNMI (1ULL << 8) 219 #define HCRX_CMOW (1ULL << 9) 220 #define HCRX_MCE2 (1ULL << 10) 221 #define HCRX_MSCEN (1ULL << 11) 222 223 #define HPFAR_NS (1ULL << 63) 224 225 #define HSTR_TTEE (1 << 16) 226 #define HSTR_TJDBX (1 << 17) 227 228 /* 229 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 230 * have different bit definitions, and EL1PCTEN might be 231 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 232 * disambiguate if necessary. 233 */ 234 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 235 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 236 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 237 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 238 FIELD(CNTHCTL, EVNTEN, 2, 1) 239 FIELD(CNTHCTL, EVNTDIR, 3, 1) 240 FIELD(CNTHCTL, EVNTI, 4, 4) 241 FIELD(CNTHCTL, EL0VTEN, 8, 1) 242 FIELD(CNTHCTL, EL0PTEN, 9, 1) 243 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 244 FIELD(CNTHCTL, EL1PTEN, 11, 1) 245 FIELD(CNTHCTL, ECV, 12, 1) 246 FIELD(CNTHCTL, EL1TVT, 13, 1) 247 FIELD(CNTHCTL, EL1TVCT, 14, 1) 248 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 249 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 250 FIELD(CNTHCTL, EVNTIS, 17, 1) 251 FIELD(CNTHCTL, CNTVMASK, 18, 1) 252 FIELD(CNTHCTL, CNTPMASK, 19, 1) 253 254 /* We use a few fake FSR values for internal purposes in M profile. 255 * M profile cores don't have A/R format FSRs, but currently our 256 * get_phys_addr() code assumes A/R profile and reports failures via 257 * an A/R format FSR value. We then translate that into the proper 258 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 259 * Mostly the FSR values we use for this are those defined for v7PMSA, 260 * since we share some of that codepath. A few kinds of fault are 261 * only for M profile and have no A/R equivalent, though, so we have 262 * to pick a value from the reserved range (which we never otherwise 263 * generate) to use for these. 264 * These values will never be visible to the guest. 265 */ 266 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 267 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 268 269 /** 270 * raise_exception: Raise the specified exception. 271 * Raise a guest exception with the specified value, syndrome register 272 * and target exception level. This should be called from helper functions, 273 * and never returns because we will longjump back up to the CPU main loop. 274 */ 275 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 276 uint32_t syndrome, uint32_t target_el); 277 278 /* 279 * Similarly, but also use unwinding to restore cpu state. 280 */ 281 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 282 uint32_t syndrome, uint32_t target_el, 283 uintptr_t ra); 284 285 /* 286 * For AArch64, map a given EL to an index in the banked_spsr array. 287 * Note that this mapping and the AArch32 mapping defined in bank_number() 288 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 289 * mandated mapping between each other. 290 */ 291 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 292 { 293 static const unsigned int map[4] = { 294 [1] = BANK_SVC, /* EL1. */ 295 [2] = BANK_HYP, /* EL2. */ 296 [3] = BANK_MON, /* EL3. */ 297 }; 298 assert(el >= 1 && el <= 3); 299 return map[el]; 300 } 301 302 /* Map CPU modes onto saved register banks. */ 303 static inline int bank_number(int mode) 304 { 305 switch (mode) { 306 case ARM_CPU_MODE_USR: 307 case ARM_CPU_MODE_SYS: 308 return BANK_USRSYS; 309 case ARM_CPU_MODE_SVC: 310 return BANK_SVC; 311 case ARM_CPU_MODE_ABT: 312 return BANK_ABT; 313 case ARM_CPU_MODE_UND: 314 return BANK_UND; 315 case ARM_CPU_MODE_IRQ: 316 return BANK_IRQ; 317 case ARM_CPU_MODE_FIQ: 318 return BANK_FIQ; 319 case ARM_CPU_MODE_HYP: 320 return BANK_HYP; 321 case ARM_CPU_MODE_MON: 322 return BANK_MON; 323 } 324 g_assert_not_reached(); 325 } 326 327 /** 328 * r14_bank_number: Map CPU mode onto register bank for r14 329 * 330 * Given an AArch32 CPU mode, return the index into the saved register 331 * banks to use for the R14 (LR) in that mode. This is the same as 332 * bank_number(), except for the special case of Hyp mode, where 333 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 334 * This should be used as the index into env->banked_r14[], and 335 * bank_number() used for the index into env->banked_r13[] and 336 * env->banked_spsr[]. 337 */ 338 static inline int r14_bank_number(int mode) 339 { 340 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 341 } 342 343 void arm_cpu_register(const ARMCPUInfo *info); 344 void aarch64_cpu_register(const ARMCPUInfo *info); 345 346 void register_cp_regs_for_features(ARMCPU *cpu); 347 void init_cpreg_list(ARMCPU *cpu); 348 349 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 350 void arm_translate_init(void); 351 352 void arm_restore_state_to_opc(CPUState *cs, 353 const TranslationBlock *tb, 354 const uint64_t *data); 355 356 #ifdef CONFIG_TCG 357 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 358 #endif /* CONFIG_TCG */ 359 360 typedef enum ARMFPRounding { 361 FPROUNDING_TIEEVEN, 362 FPROUNDING_POSINF, 363 FPROUNDING_NEGINF, 364 FPROUNDING_ZERO, 365 FPROUNDING_TIEAWAY, 366 FPROUNDING_ODD 367 } ARMFPRounding; 368 369 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 370 371 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 372 { 373 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 374 return arm_rmode_to_sf_map[rmode]; 375 } 376 377 static inline void aarch64_save_sp(CPUARMState *env, int el) 378 { 379 if (env->pstate & PSTATE_SP) { 380 env->sp_el[el] = env->xregs[31]; 381 } else { 382 env->sp_el[0] = env->xregs[31]; 383 } 384 } 385 386 static inline void aarch64_restore_sp(CPUARMState *env, int el) 387 { 388 if (env->pstate & PSTATE_SP) { 389 env->xregs[31] = env->sp_el[el]; 390 } else { 391 env->xregs[31] = env->sp_el[0]; 392 } 393 } 394 395 static inline void update_spsel(CPUARMState *env, uint32_t imm) 396 { 397 unsigned int cur_el = arm_current_el(env); 398 /* Update PSTATE SPSel bit; this requires us to update the 399 * working stack pointer in xregs[31]. 400 */ 401 if (!((imm ^ env->pstate) & PSTATE_SP)) { 402 return; 403 } 404 aarch64_save_sp(env, cur_el); 405 env->pstate = deposit32(env->pstate, 0, 1, imm); 406 407 /* We rely on illegal updates to SPsel from EL0 to get trapped 408 * at translation time. 409 */ 410 assert(cur_el >= 1 && cur_el <= 3); 411 aarch64_restore_sp(env, cur_el); 412 } 413 414 /* 415 * arm_pamax 416 * @cpu: ARMCPU 417 * 418 * Returns the implementation defined bit-width of physical addresses. 419 * The ARMv8 reference manuals refer to this as PAMax(). 420 */ 421 unsigned int arm_pamax(ARMCPU *cpu); 422 423 /* Return true if extended addresses are enabled. 424 * This is always the case if our translation regime is 64 bit, 425 * but depends on TTBCR.EAE for 32 bit. 426 */ 427 static inline bool extended_addresses_enabled(CPUARMState *env) 428 { 429 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 430 if (arm_feature(env, ARM_FEATURE_PMSA) && 431 arm_feature(env, ARM_FEATURE_V8)) { 432 return true; 433 } 434 return arm_el_is_aa64(env, 1) || 435 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 436 } 437 438 /* Update a QEMU watchpoint based on the information the guest has set in the 439 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 440 */ 441 void hw_watchpoint_update(ARMCPU *cpu, int n); 442 /* Update the QEMU watchpoints for every guest watchpoint. This does a 443 * complete delete-and-reinstate of the QEMU watchpoint list and so is 444 * suitable for use after migration or on reset. 445 */ 446 void hw_watchpoint_update_all(ARMCPU *cpu); 447 /* Update a QEMU breakpoint based on the information the guest has set in the 448 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 449 */ 450 void hw_breakpoint_update(ARMCPU *cpu, int n); 451 /* Update the QEMU breakpoints for every guest breakpoint. This does a 452 * complete delete-and-reinstate of the QEMU breakpoint list and so is 453 * suitable for use after migration or on reset. 454 */ 455 void hw_breakpoint_update_all(ARMCPU *cpu); 456 457 /* Callback function for checking if a breakpoint should trigger. */ 458 bool arm_debug_check_breakpoint(CPUState *cs); 459 460 /* Callback function for checking if a watchpoint should trigger. */ 461 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 462 463 /* Adjust addresses (in BE32 mode) before testing against watchpoint 464 * addresses. 465 */ 466 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 467 468 /* Callback function for when a watchpoint or breakpoint triggers. */ 469 void arm_debug_excp_handler(CPUState *cs); 470 471 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 472 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 473 { 474 return false; 475 } 476 static inline void arm_handle_psci_call(ARMCPU *cpu) 477 { 478 g_assert_not_reached(); 479 } 480 #else 481 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 482 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 483 /* Actually handle a PSCI call */ 484 void arm_handle_psci_call(ARMCPU *cpu); 485 #endif 486 487 /** 488 * arm_clear_exclusive: clear the exclusive monitor 489 * @env: CPU env 490 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 491 */ 492 static inline void arm_clear_exclusive(CPUARMState *env) 493 { 494 env->exclusive_addr = -1; 495 } 496 497 /** 498 * ARMFaultType: type of an ARM MMU fault 499 * This corresponds to the v8A pseudocode's Fault enumeration, 500 * with extensions for QEMU internal conditions. 501 */ 502 typedef enum ARMFaultType { 503 ARMFault_None, 504 ARMFault_AccessFlag, 505 ARMFault_Alignment, 506 ARMFault_Background, 507 ARMFault_Domain, 508 ARMFault_Permission, 509 ARMFault_Translation, 510 ARMFault_AddressSize, 511 ARMFault_SyncExternal, 512 ARMFault_SyncExternalOnWalk, 513 ARMFault_SyncParity, 514 ARMFault_SyncParityOnWalk, 515 ARMFault_AsyncParity, 516 ARMFault_AsyncExternal, 517 ARMFault_Debug, 518 ARMFault_TLBConflict, 519 ARMFault_UnsuppAtomicUpdate, 520 ARMFault_Lockdown, 521 ARMFault_Exclusive, 522 ARMFault_ICacheMaint, 523 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 524 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 525 ARMFault_GPCFOnWalk, 526 ARMFault_GPCFOnOutput, 527 } ARMFaultType; 528 529 typedef enum ARMGPCF { 530 GPCF_None, 531 GPCF_AddressSize, 532 GPCF_Walk, 533 GPCF_EABT, 534 GPCF_Fail, 535 } ARMGPCF; 536 537 /** 538 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 539 * @type: Type of fault 540 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 541 * @level: Table walk level (for translation, access flag and permission faults) 542 * @domain: Domain of the fault address (for non-LPAE CPUs only) 543 * @s2addr: Address that caused a fault at stage 2 544 * @paddr: physical address that caused a fault for gpc 545 * @paddr_space: physical address space that caused a fault for gpc 546 * @stage2: True if we faulted at stage 2 547 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 548 * @s1ns: True if we faulted on a non-secure IPA while in secure state 549 * @ea: True if we should set the EA (external abort type) bit in syndrome 550 */ 551 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 552 struct ARMMMUFaultInfo { 553 ARMFaultType type; 554 ARMGPCF gpcf; 555 target_ulong s2addr; 556 target_ulong paddr; 557 ARMSecuritySpace paddr_space; 558 int level; 559 int domain; 560 bool stage2; 561 bool s1ptw; 562 bool s1ns; 563 bool ea; 564 }; 565 566 /** 567 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 568 * Compare pseudocode EncodeSDFSC(), though unlike that function 569 * we set up a whole FSR-format code including domain field and 570 * putting the high bit of the FSC into bit 10. 571 */ 572 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 573 { 574 uint32_t fsc; 575 576 switch (fi->type) { 577 case ARMFault_None: 578 return 0; 579 case ARMFault_AccessFlag: 580 fsc = fi->level == 1 ? 0x3 : 0x6; 581 break; 582 case ARMFault_Alignment: 583 fsc = 0x1; 584 break; 585 case ARMFault_Permission: 586 fsc = fi->level == 1 ? 0xd : 0xf; 587 break; 588 case ARMFault_Domain: 589 fsc = fi->level == 1 ? 0x9 : 0xb; 590 break; 591 case ARMFault_Translation: 592 fsc = fi->level == 1 ? 0x5 : 0x7; 593 break; 594 case ARMFault_SyncExternal: 595 fsc = 0x8 | (fi->ea << 12); 596 break; 597 case ARMFault_SyncExternalOnWalk: 598 fsc = fi->level == 1 ? 0xc : 0xe; 599 fsc |= (fi->ea << 12); 600 break; 601 case ARMFault_SyncParity: 602 fsc = 0x409; 603 break; 604 case ARMFault_SyncParityOnWalk: 605 fsc = fi->level == 1 ? 0x40c : 0x40e; 606 break; 607 case ARMFault_AsyncParity: 608 fsc = 0x408; 609 break; 610 case ARMFault_AsyncExternal: 611 fsc = 0x406 | (fi->ea << 12); 612 break; 613 case ARMFault_Debug: 614 fsc = 0x2; 615 break; 616 case ARMFault_TLBConflict: 617 fsc = 0x400; 618 break; 619 case ARMFault_Lockdown: 620 fsc = 0x404; 621 break; 622 case ARMFault_Exclusive: 623 fsc = 0x405; 624 break; 625 case ARMFault_ICacheMaint: 626 fsc = 0x4; 627 break; 628 case ARMFault_Background: 629 fsc = 0x0; 630 break; 631 case ARMFault_QEMU_NSCExec: 632 fsc = M_FAKE_FSR_NSC_EXEC; 633 break; 634 case ARMFault_QEMU_SFault: 635 fsc = M_FAKE_FSR_SFAULT; 636 break; 637 default: 638 /* Other faults can't occur in a context that requires a 639 * short-format status code. 640 */ 641 g_assert_not_reached(); 642 } 643 644 fsc |= (fi->domain << 4); 645 return fsc; 646 } 647 648 /** 649 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 650 * Compare pseudocode EncodeLDFSC(), though unlike that function 651 * we fill in also the LPAE bit 9 of a DFSR format. 652 */ 653 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 654 { 655 uint32_t fsc; 656 657 switch (fi->type) { 658 case ARMFault_None: 659 return 0; 660 case ARMFault_AddressSize: 661 assert(fi->level >= -1 && fi->level <= 3); 662 if (fi->level < 0) { 663 fsc = 0b101001; 664 } else { 665 fsc = fi->level; 666 } 667 break; 668 case ARMFault_AccessFlag: 669 assert(fi->level >= 0 && fi->level <= 3); 670 fsc = 0b001000 | fi->level; 671 break; 672 case ARMFault_Permission: 673 assert(fi->level >= 0 && fi->level <= 3); 674 fsc = 0b001100 | fi->level; 675 break; 676 case ARMFault_Translation: 677 assert(fi->level >= -1 && fi->level <= 3); 678 if (fi->level < 0) { 679 fsc = 0b101011; 680 } else { 681 fsc = 0b000100 | fi->level; 682 } 683 break; 684 case ARMFault_SyncExternal: 685 fsc = 0x10 | (fi->ea << 12); 686 break; 687 case ARMFault_SyncExternalOnWalk: 688 assert(fi->level >= -1 && fi->level <= 3); 689 if (fi->level < 0) { 690 fsc = 0b010011; 691 } else { 692 fsc = 0b010100 | fi->level; 693 } 694 fsc |= fi->ea << 12; 695 break; 696 case ARMFault_SyncParity: 697 fsc = 0x18; 698 break; 699 case ARMFault_SyncParityOnWalk: 700 assert(fi->level >= -1 && fi->level <= 3); 701 if (fi->level < 0) { 702 fsc = 0b011011; 703 } else { 704 fsc = 0b011100 | fi->level; 705 } 706 break; 707 case ARMFault_AsyncParity: 708 fsc = 0x19; 709 break; 710 case ARMFault_AsyncExternal: 711 fsc = 0x11 | (fi->ea << 12); 712 break; 713 case ARMFault_Alignment: 714 fsc = 0x21; 715 break; 716 case ARMFault_Debug: 717 fsc = 0x22; 718 break; 719 case ARMFault_TLBConflict: 720 fsc = 0x30; 721 break; 722 case ARMFault_UnsuppAtomicUpdate: 723 fsc = 0x31; 724 break; 725 case ARMFault_Lockdown: 726 fsc = 0x34; 727 break; 728 case ARMFault_Exclusive: 729 fsc = 0x35; 730 break; 731 case ARMFault_GPCFOnWalk: 732 assert(fi->level >= -1 && fi->level <= 3); 733 if (fi->level < 0) { 734 fsc = 0b100011; 735 } else { 736 fsc = 0b100100 | fi->level; 737 } 738 break; 739 case ARMFault_GPCFOnOutput: 740 fsc = 0b101000; 741 break; 742 default: 743 /* Other faults can't occur in a context that requires a 744 * long-format status code. 745 */ 746 g_assert_not_reached(); 747 } 748 749 fsc |= 1 << 9; 750 return fsc; 751 } 752 753 static inline bool arm_extabort_type(MemTxResult result) 754 { 755 /* The EA bit in syndromes and fault status registers is an 756 * IMPDEF classification of external aborts. ARM implementations 757 * usually use this to indicate AXI bus Decode error (0) or 758 * Slave error (1); in QEMU we follow that. 759 */ 760 return result != MEMTX_DECODE_ERROR; 761 } 762 763 #ifdef CONFIG_USER_ONLY 764 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 765 MMUAccessType access_type, 766 bool maperr, uintptr_t ra); 767 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 768 MMUAccessType access_type, uintptr_t ra); 769 #else 770 bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 771 MMUAccessType access_type, int mmu_idx, 772 bool probe, uintptr_t retaddr); 773 #endif 774 775 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 776 { 777 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 778 } 779 780 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 781 { 782 if (arm_feature(env, ARM_FEATURE_M)) { 783 return mmu_idx | ARM_MMU_IDX_M; 784 } else { 785 return mmu_idx | ARM_MMU_IDX_A; 786 } 787 } 788 789 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 790 { 791 /* AArch64 is always a-profile. */ 792 return mmu_idx | ARM_MMU_IDX_A; 793 } 794 795 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 796 797 /* Return the MMU index for a v7M CPU in the specified security state */ 798 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 799 800 /* 801 * Return true if the stage 1 translation regime is using LPAE 802 * format page tables 803 */ 804 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 805 806 /* Raise a data fault alignment exception for the specified virtual address */ 807 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 808 MMUAccessType access_type, 809 int mmu_idx, uintptr_t retaddr); 810 811 #ifndef CONFIG_USER_ONLY 812 /* arm_cpu_do_transaction_failed: handle a memory system error response 813 * (eg "no device/memory present at address") by raising an external abort 814 * exception 815 */ 816 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 817 vaddr addr, unsigned size, 818 MMUAccessType access_type, 819 int mmu_idx, MemTxAttrs attrs, 820 MemTxResult response, uintptr_t retaddr); 821 #endif 822 823 /* Call any registered EL change hooks */ 824 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 825 { 826 ARMELChangeHook *hook, *next; 827 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 828 hook->hook(cpu, hook->opaque); 829 } 830 } 831 static inline void arm_call_el_change_hook(ARMCPU *cpu) 832 { 833 ARMELChangeHook *hook, *next; 834 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 835 hook->hook(cpu, hook->opaque); 836 } 837 } 838 839 /* Return true if this address translation regime has two ranges. */ 840 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 841 { 842 switch (mmu_idx) { 843 case ARMMMUIdx_Stage1_E0: 844 case ARMMMUIdx_Stage1_E1: 845 case ARMMMUIdx_Stage1_E1_PAN: 846 case ARMMMUIdx_E10_0: 847 case ARMMMUIdx_E10_1: 848 case ARMMMUIdx_E10_1_PAN: 849 case ARMMMUIdx_E20_0: 850 case ARMMMUIdx_E20_2: 851 case ARMMMUIdx_E20_2_PAN: 852 return true; 853 default: 854 return false; 855 } 856 } 857 858 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 859 { 860 switch (mmu_idx) { 861 case ARMMMUIdx_Stage1_E1_PAN: 862 case ARMMMUIdx_E10_1_PAN: 863 case ARMMMUIdx_E20_2_PAN: 864 return true; 865 default: 866 return false; 867 } 868 } 869 870 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 871 { 872 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 873 } 874 875 /* Return the exception level which controls this address translation regime */ 876 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 877 { 878 switch (mmu_idx) { 879 case ARMMMUIdx_E20_0: 880 case ARMMMUIdx_E20_2: 881 case ARMMMUIdx_E20_2_PAN: 882 case ARMMMUIdx_Stage2: 883 case ARMMMUIdx_Stage2_S: 884 case ARMMMUIdx_E2: 885 return 2; 886 case ARMMMUIdx_E3: 887 return 3; 888 case ARMMMUIdx_E10_0: 889 case ARMMMUIdx_Stage1_E0: 890 return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3; 891 case ARMMMUIdx_Stage1_E1: 892 case ARMMMUIdx_Stage1_E1_PAN: 893 case ARMMMUIdx_E10_1: 894 case ARMMMUIdx_E10_1_PAN: 895 case ARMMMUIdx_MPrivNegPri: 896 case ARMMMUIdx_MUserNegPri: 897 case ARMMMUIdx_MPriv: 898 case ARMMMUIdx_MUser: 899 case ARMMMUIdx_MSPrivNegPri: 900 case ARMMMUIdx_MSUserNegPri: 901 case ARMMMUIdx_MSPriv: 902 case ARMMMUIdx_MSUser: 903 return 1; 904 default: 905 g_assert_not_reached(); 906 } 907 } 908 909 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 910 { 911 switch (mmu_idx) { 912 case ARMMMUIdx_E20_0: 913 case ARMMMUIdx_Stage1_E0: 914 case ARMMMUIdx_MUser: 915 case ARMMMUIdx_MSUser: 916 case ARMMMUIdx_MUserNegPri: 917 case ARMMMUIdx_MSUserNegPri: 918 return true; 919 default: 920 return false; 921 case ARMMMUIdx_E10_0: 922 case ARMMMUIdx_E10_1: 923 case ARMMMUIdx_E10_1_PAN: 924 g_assert_not_reached(); 925 } 926 } 927 928 /* Return the SCTLR value which controls this address translation regime */ 929 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 930 { 931 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 932 } 933 934 /* 935 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 936 * and the Non-Secure stage 2 translation regimes (and hence which are 937 * not present in VSTCR_EL2). 938 */ 939 #define VTCR_SHARED_FIELD_MASK \ 940 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 941 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 942 R_VTCR_DS_MASK) 943 944 /* Return the value of the TCR controlling this translation regime */ 945 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 946 { 947 if (mmu_idx == ARMMMUIdx_Stage2) { 948 return env->cp15.vtcr_el2; 949 } 950 if (mmu_idx == ARMMMUIdx_Stage2_S) { 951 /* 952 * Secure stage 2 shares fields from VTCR_EL2. We merge those 953 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 954 * value so the callers don't need to special case this. 955 * 956 * If a future architecture change defines bits in VSTCR_EL2 that 957 * overlap with these VTCR_EL2 fields we may need to revisit this. 958 */ 959 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 960 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 961 return v; 962 } 963 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 964 } 965 966 /* Return true if the translation regime is using LPAE format page tables */ 967 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 968 { 969 int el = regime_el(env, mmu_idx); 970 if (el == 2 || arm_el_is_aa64(env, el)) { 971 return true; 972 } 973 if (arm_feature(env, ARM_FEATURE_PMSA) && 974 arm_feature(env, ARM_FEATURE_V8)) { 975 return true; 976 } 977 if (arm_feature(env, ARM_FEATURE_LPAE) 978 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 979 return true; 980 } 981 return false; 982 } 983 984 /** 985 * arm_num_brps: Return number of implemented breakpoints. 986 * Note that the ID register BRPS field is "number of bps - 1", 987 * and we return the actual number of breakpoints. 988 */ 989 static inline int arm_num_brps(ARMCPU *cpu) 990 { 991 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 992 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 993 } else { 994 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 995 } 996 } 997 998 /** 999 * arm_num_wrps: Return number of implemented watchpoints. 1000 * Note that the ID register WRPS field is "number of wps - 1", 1001 * and we return the actual number of watchpoints. 1002 */ 1003 static inline int arm_num_wrps(ARMCPU *cpu) 1004 { 1005 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1006 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1007 } else { 1008 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1009 } 1010 } 1011 1012 /** 1013 * arm_num_ctx_cmps: Return number of implemented context comparators. 1014 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1015 * and we return the actual number of comparators. 1016 */ 1017 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1018 { 1019 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1020 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1021 } else { 1022 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1023 } 1024 } 1025 1026 /** 1027 * v7m_using_psp: Return true if using process stack pointer 1028 * Return true if the CPU is currently using the process stack 1029 * pointer, or false if it is using the main stack pointer. 1030 */ 1031 static inline bool v7m_using_psp(CPUARMState *env) 1032 { 1033 /* Handler mode always uses the main stack; for thread mode 1034 * the CONTROL.SPSEL bit determines the answer. 1035 * Note that in v7M it is not possible to be in Handler mode with 1036 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1037 */ 1038 return !arm_v7m_is_handler_mode(env) && 1039 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1040 } 1041 1042 /** 1043 * v7m_sp_limit: Return SP limit for current CPU state 1044 * Return the SP limit value for the current CPU security state 1045 * and stack pointer. 1046 */ 1047 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1048 { 1049 if (v7m_using_psp(env)) { 1050 return env->v7m.psplim[env->v7m.secure]; 1051 } else { 1052 return env->v7m.msplim[env->v7m.secure]; 1053 } 1054 } 1055 1056 /** 1057 * v7m_cpacr_pass: 1058 * Return true if the v7M CPACR permits access to the FPU for the specified 1059 * security state and privilege level. 1060 */ 1061 static inline bool v7m_cpacr_pass(CPUARMState *env, 1062 bool is_secure, bool is_priv) 1063 { 1064 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1065 case 0: 1066 case 2: /* UNPREDICTABLE: we treat like 0 */ 1067 return false; 1068 case 1: 1069 return is_priv; 1070 case 3: 1071 return true; 1072 default: 1073 g_assert_not_reached(); 1074 } 1075 } 1076 1077 /** 1078 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1079 * @psr: Program Status Register indicating CPU mode 1080 * 1081 * Returns, for debug logging purposes, a printable representation 1082 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1083 * the low bits of the specified PSR. 1084 */ 1085 static inline const char *aarch32_mode_name(uint32_t psr) 1086 { 1087 static const char cpu_mode_names[16][4] = { 1088 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1089 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1090 }; 1091 1092 return cpu_mode_names[psr & 0xf]; 1093 } 1094 1095 /** 1096 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1097 * 1098 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1099 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1100 * Must be called with the BQL held. 1101 */ 1102 void arm_cpu_update_virq(ARMCPU *cpu); 1103 1104 /** 1105 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1106 * 1107 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1108 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1109 * Must be called with the BQL held. 1110 */ 1111 void arm_cpu_update_vfiq(ARMCPU *cpu); 1112 1113 /** 1114 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1115 * 1116 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1117 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1118 * Must be called with the BQL held. 1119 */ 1120 void arm_cpu_update_vinmi(ARMCPU *cpu); 1121 1122 /** 1123 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1124 * 1125 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1126 * a change to the HCRX_EL2.VFNMI. 1127 * Must be called with the BQL held. 1128 */ 1129 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1130 1131 /** 1132 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1133 * 1134 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1135 * following a change to the HCR_EL2.VSE bit. 1136 */ 1137 void arm_cpu_update_vserr(ARMCPU *cpu); 1138 1139 /** 1140 * arm_mmu_idx_el: 1141 * @env: The cpu environment 1142 * @el: The EL to use. 1143 * 1144 * Return the full ARMMMUIdx for the translation regime for EL. 1145 */ 1146 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1147 1148 /** 1149 * arm_mmu_idx: 1150 * @env: The cpu environment 1151 * 1152 * Return the full ARMMMUIdx for the current translation regime. 1153 */ 1154 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1155 1156 /** 1157 * arm_stage1_mmu_idx: 1158 * @env: The cpu environment 1159 * 1160 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1161 */ 1162 #ifdef CONFIG_USER_ONLY 1163 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1164 { 1165 return ARMMMUIdx_Stage1_E0; 1166 } 1167 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1168 { 1169 return ARMMMUIdx_Stage1_E0; 1170 } 1171 #else 1172 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1173 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1174 #endif 1175 1176 /** 1177 * arm_mmu_idx_is_stage1_of_2: 1178 * @mmu_idx: The ARMMMUIdx to test 1179 * 1180 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1181 * first stage of a two stage regime. 1182 */ 1183 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1184 { 1185 switch (mmu_idx) { 1186 case ARMMMUIdx_Stage1_E0: 1187 case ARMMMUIdx_Stage1_E1: 1188 case ARMMMUIdx_Stage1_E1_PAN: 1189 return true; 1190 default: 1191 return false; 1192 } 1193 } 1194 1195 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1196 const ARMISARegisters *id) 1197 { 1198 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1199 1200 if ((features >> ARM_FEATURE_V4T) & 1) { 1201 valid |= CPSR_T; 1202 } 1203 if ((features >> ARM_FEATURE_V5) & 1) { 1204 valid |= CPSR_Q; /* V5TE in reality*/ 1205 } 1206 if ((features >> ARM_FEATURE_V6) & 1) { 1207 valid |= CPSR_E | CPSR_GE; 1208 } 1209 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1210 valid |= CPSR_IT; 1211 } 1212 if (isar_feature_aa32_jazelle(id)) { 1213 valid |= CPSR_J; 1214 } 1215 if (isar_feature_aa32_pan(id)) { 1216 valid |= CPSR_PAN; 1217 } 1218 if (isar_feature_aa32_dit(id)) { 1219 valid |= CPSR_DIT; 1220 } 1221 if (isar_feature_aa32_ssbs(id)) { 1222 valid |= CPSR_SSBS; 1223 } 1224 1225 return valid; 1226 } 1227 1228 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1229 { 1230 uint32_t valid; 1231 1232 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1233 if (isar_feature_aa64_bti(id)) { 1234 valid |= PSTATE_BTYPE; 1235 } 1236 if (isar_feature_aa64_pan(id)) { 1237 valid |= PSTATE_PAN; 1238 } 1239 if (isar_feature_aa64_uao(id)) { 1240 valid |= PSTATE_UAO; 1241 } 1242 if (isar_feature_aa64_dit(id)) { 1243 valid |= PSTATE_DIT; 1244 } 1245 if (isar_feature_aa64_ssbs(id)) { 1246 valid |= PSTATE_SSBS; 1247 } 1248 if (isar_feature_aa64_mte(id)) { 1249 valid |= PSTATE_TCO; 1250 } 1251 if (isar_feature_aa64_nmi(id)) { 1252 valid |= PSTATE_ALLINT; 1253 } 1254 1255 return valid; 1256 } 1257 1258 /* Granule size (i.e. page size) */ 1259 typedef enum ARMGranuleSize { 1260 /* Same order as TG0 encoding */ 1261 Gran4K, 1262 Gran64K, 1263 Gran16K, 1264 GranInvalid, 1265 } ARMGranuleSize; 1266 1267 /** 1268 * arm_granule_bits: Return address size of the granule in bits 1269 * 1270 * Return the address size of the granule in bits. This corresponds 1271 * to the pseudocode TGxGranuleBits(). 1272 */ 1273 static inline int arm_granule_bits(ARMGranuleSize gran) 1274 { 1275 switch (gran) { 1276 case Gran64K: 1277 return 16; 1278 case Gran16K: 1279 return 14; 1280 case Gran4K: 1281 return 12; 1282 default: 1283 g_assert_not_reached(); 1284 } 1285 } 1286 1287 /* 1288 * Parameters of a given virtual address, as extracted from the 1289 * translation control register (TCR) for a given regime. 1290 */ 1291 typedef struct ARMVAParameters { 1292 unsigned tsz : 8; 1293 unsigned ps : 3; 1294 unsigned sh : 2; 1295 unsigned select : 1; 1296 bool tbi : 1; 1297 bool epd : 1; 1298 bool hpd : 1; 1299 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1300 bool ds : 1; 1301 bool ha : 1; 1302 bool hd : 1; 1303 ARMGranuleSize gran : 2; 1304 } ARMVAParameters; 1305 1306 /** 1307 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1308 * @env: CPU 1309 * @va: virtual address to look up 1310 * @mmu_idx: determines translation regime to use 1311 * @data: true if this is a data access 1312 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1313 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1314 */ 1315 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1316 ARMMMUIdx mmu_idx, bool data, 1317 bool el1_is_aa32); 1318 1319 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1320 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1321 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1322 1323 /* Determine if allocation tags are available. */ 1324 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1325 uint64_t sctlr) 1326 { 1327 if (el < 3 1328 && arm_feature(env, ARM_FEATURE_EL3) 1329 && !(env->cp15.scr_el3 & SCR_ATA)) { 1330 return false; 1331 } 1332 if (el < 2 && arm_is_el2_enabled(env)) { 1333 uint64_t hcr = arm_hcr_el2_eff(env); 1334 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1335 return false; 1336 } 1337 } 1338 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1339 return sctlr != 0; 1340 } 1341 1342 #ifndef CONFIG_USER_ONLY 1343 1344 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1345 typedef struct V8M_SAttributes { 1346 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1347 bool ns; 1348 bool nsc; 1349 uint8_t sregion; 1350 bool srvalid; 1351 uint8_t iregion; 1352 bool irvalid; 1353 } V8M_SAttributes; 1354 1355 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1356 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1357 bool secure, V8M_SAttributes *sattrs); 1358 1359 /* Cacheability and shareability attributes for a memory access */ 1360 typedef struct ARMCacheAttrs { 1361 /* 1362 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1363 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1364 */ 1365 unsigned int attrs:8; 1366 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1367 bool is_s2_format:1; 1368 } ARMCacheAttrs; 1369 1370 /* Fields that are valid upon success. */ 1371 typedef struct GetPhysAddrResult { 1372 CPUTLBEntryFull f; 1373 ARMCacheAttrs cacheattrs; 1374 } GetPhysAddrResult; 1375 1376 /** 1377 * get_phys_addr: get the physical address for a virtual address 1378 * @env: CPUARMState 1379 * @address: virtual address to get physical address for 1380 * @access_type: 0 for read, 1 for write, 2 for execute 1381 * @mmu_idx: MMU index indicating required translation regime 1382 * @result: set on translation success. 1383 * @fi: set to fault info if the translation fails 1384 * 1385 * Find the physical address corresponding to the given virtual address, 1386 * by doing a translation table walk on MMU based systems or using the 1387 * MPU state on MPU based systems. 1388 * 1389 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1390 * prot and page_size may not be filled in, and the populated fsr value provides 1391 * information on why the translation aborted, in the format of a 1392 * DFSR/IFSR fault register, with the following caveats: 1393 * * we honour the short vs long DFSR format differences. 1394 * * the WnR bit is never set (the caller must do this). 1395 * * for PSMAv5 based systems we don't bother to return a full FSR format 1396 * value. 1397 */ 1398 bool get_phys_addr(CPUARMState *env, target_ulong address, 1399 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1400 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1401 __attribute__((nonnull)); 1402 1403 /** 1404 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1405 * address 1406 * @env: CPUARMState 1407 * @address: virtual address to get physical address for 1408 * @access_type: 0 for read, 1 for write, 2 for execute 1409 * @mmu_idx: MMU index indicating required translation regime 1410 * @space: security space for the access 1411 * @result: set on translation success. 1412 * @fi: set to fault info if the translation fails 1413 * 1414 * Similar to get_phys_addr, but use the given security space and don't perform 1415 * a Granule Protection Check on the resulting address. 1416 */ 1417 bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, 1418 MMUAccessType access_type, 1419 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1420 GetPhysAddrResult *result, 1421 ARMMMUFaultInfo *fi) 1422 __attribute__((nonnull)); 1423 1424 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1425 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1426 bool is_secure, GetPhysAddrResult *result, 1427 ARMMMUFaultInfo *fi, uint32_t *mregion); 1428 1429 void arm_log_exception(CPUState *cs); 1430 1431 #endif /* !CONFIG_USER_ONLY */ 1432 1433 /* 1434 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1435 * the same simd_desc() encoding due to restrictions on size. 1436 * Use these instead. 1437 */ 1438 FIELD(PREDDESC, OPRSZ, 0, 6) 1439 FIELD(PREDDESC, ESZ, 6, 2) 1440 FIELD(PREDDESC, DATA, 8, 24) 1441 1442 /* 1443 * The SVE simd_data field, for memory ops, contains either 1444 * rd (5 bits) or a shift count (2 bits). 1445 */ 1446 #define SVE_MTEDESC_SHIFT 5 1447 1448 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1449 FIELD(MTEDESC, MIDX, 0, 4) 1450 FIELD(MTEDESC, TBI, 4, 2) 1451 FIELD(MTEDESC, TCMA, 6, 2) 1452 FIELD(MTEDESC, WRITE, 8, 1) 1453 FIELD(MTEDESC, ALIGN, 9, 3) 1454 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1455 1456 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1457 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1458 1459 /** 1460 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1461 * @env: CPU env 1462 * @ptr: start address of memory region (dirty pointer) 1463 * @size: length of region (guaranteed not to cross a page boundary) 1464 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1465 * Returns: the size of the region that can be copied without hitting 1466 * an MTE tag failure 1467 * 1468 * Note that we assume that the caller has already checked the TBI 1469 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1470 * required. 1471 */ 1472 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1473 uint32_t desc); 1474 1475 /** 1476 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1477 * operation going in the reverse direction 1478 * @env: CPU env 1479 * @ptr: *end* address of memory region (dirty pointer) 1480 * @size: length of region (guaranteed not to cross a page boundary) 1481 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1482 * Returns: the size of the region that can be copied without hitting 1483 * an MTE tag failure 1484 * 1485 * Note that we assume that the caller has already checked the TBI 1486 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1487 * required. 1488 */ 1489 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1490 uint32_t desc); 1491 1492 /** 1493 * mte_check_fail: Record an MTE tag check failure 1494 * @env: CPU env 1495 * @desc: MTEDESC descriptor word 1496 * @dirty_ptr: Failing dirty address 1497 * @ra: TCG retaddr 1498 * 1499 * This may never return (if the MTE tag checks are configured to fault). 1500 */ 1501 void mte_check_fail(CPUARMState *env, uint32_t desc, 1502 uint64_t dirty_ptr, uintptr_t ra); 1503 1504 /** 1505 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1506 * @env: CPU env 1507 * @dirty_ptr: Start address of memory region (dirty pointer) 1508 * @size: length of region (guaranteed not to cross page boundary) 1509 * @desc: MTEDESC descriptor word 1510 */ 1511 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1512 uint32_t desc); 1513 1514 static inline int allocation_tag_from_addr(uint64_t ptr) 1515 { 1516 return extract64(ptr, 56, 4); 1517 } 1518 1519 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1520 { 1521 return deposit64(ptr, 56, 4, rtag); 1522 } 1523 1524 /* Return true if tbi bits mean that the access is checked. */ 1525 static inline bool tbi_check(uint32_t desc, int bit55) 1526 { 1527 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1528 } 1529 1530 /* Return true if tcma bits mean that the access is unchecked. */ 1531 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1532 { 1533 /* 1534 * We had extracted bit55 and ptr_tag for other reasons, so fold 1535 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1536 */ 1537 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1538 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1539 return tcma && match; 1540 } 1541 1542 /* 1543 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1544 * for the tag to be present in the FAR_ELx register. But for user-only 1545 * mode, we do not have a TLB with which to implement this, so we must 1546 * remove the top byte. 1547 */ 1548 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1549 { 1550 #ifdef CONFIG_USER_ONLY 1551 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1552 ptr &= sextract64(ptr, 0, 56); 1553 #endif 1554 return ptr; 1555 } 1556 1557 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1558 { 1559 #ifdef CONFIG_USER_ONLY 1560 int64_t clean_ptr = sextract64(ptr, 0, 56); 1561 if (tbi_check(desc, clean_ptr < 0)) { 1562 ptr = clean_ptr; 1563 } 1564 #endif 1565 return ptr; 1566 } 1567 1568 /* Values for M-profile PSR.ECI for MVE insns */ 1569 enum MVEECIState { 1570 ECI_NONE = 0, /* No completed beats */ 1571 ECI_A0 = 1, /* Completed: A0 */ 1572 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1573 /* 3 is reserved */ 1574 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1575 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1576 /* All other values reserved */ 1577 }; 1578 1579 /* Definitions for the PMU registers */ 1580 #define PMCRN_MASK 0xf800 1581 #define PMCRN_SHIFT 11 1582 #define PMCRLP 0x80 1583 #define PMCRLC 0x40 1584 #define PMCRDP 0x20 1585 #define PMCRX 0x10 1586 #define PMCRD 0x8 1587 #define PMCRC 0x4 1588 #define PMCRP 0x2 1589 #define PMCRE 0x1 1590 /* 1591 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1592 * which can be written as 1 to trigger behaviour but which stay RAZ). 1593 */ 1594 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1595 1596 #define PMXEVTYPER_P 0x80000000 1597 #define PMXEVTYPER_U 0x40000000 1598 #define PMXEVTYPER_NSK 0x20000000 1599 #define PMXEVTYPER_NSU 0x10000000 1600 #define PMXEVTYPER_NSH 0x08000000 1601 #define PMXEVTYPER_M 0x04000000 1602 #define PMXEVTYPER_MT 0x02000000 1603 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1604 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1605 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1606 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1607 PMXEVTYPER_EVTCOUNT) 1608 1609 #define PMCCFILTR 0xf8000000 1610 #define PMCCFILTR_M PMXEVTYPER_M 1611 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1612 1613 static inline uint32_t pmu_num_counters(CPUARMState *env) 1614 { 1615 ARMCPU *cpu = env_archcpu(env); 1616 1617 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1618 } 1619 1620 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1621 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1622 { 1623 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1624 } 1625 1626 #ifdef TARGET_AARCH64 1627 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1628 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1629 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1630 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1631 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1632 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1633 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1634 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1635 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1636 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1637 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1638 void aarch64_max_tcg_initfn(Object *obj); 1639 void aarch64_add_pauth_properties(Object *obj); 1640 void aarch64_add_sve_properties(Object *obj); 1641 void aarch64_add_sme_properties(Object *obj); 1642 #endif 1643 1644 /* Read the CONTROL register as the MRS instruction would. */ 1645 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1646 1647 /* 1648 * Return a pointer to the location where we currently store the 1649 * stack pointer for the requested security state and thread mode. 1650 * This pointer will become invalid if the CPU state is updated 1651 * such that the stack pointers are switched around (eg changing 1652 * the SPSEL control bit). 1653 */ 1654 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1655 bool threadmode, bool spsel); 1656 1657 bool el_is_in_host(CPUARMState *env, int el); 1658 1659 void aa32_max_features(ARMCPU *cpu); 1660 int exception_target_el(CPUARMState *env); 1661 bool arm_singlestep_active(CPUARMState *env); 1662 bool arm_generate_debug_exceptions(CPUARMState *env); 1663 1664 /** 1665 * pauth_ptr_mask: 1666 * @param: parameters defining the MMU setup 1667 * 1668 * Return a mask of the address bits that contain the authentication code, 1669 * given the MMU config defined by @param. 1670 */ 1671 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1672 { 1673 int bot_pac_bit = 64 - param.tsz; 1674 int top_pac_bit = 64 - 8 * param.tbi; 1675 1676 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1677 } 1678 1679 /* Add the cpreg definitions for debug related system registers */ 1680 void define_debug_regs(ARMCPU *cpu); 1681 1682 /* Effective value of MDCR_EL2 */ 1683 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1684 { 1685 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1686 } 1687 1688 /* Powers of 2 for sve_vq_map et al. */ 1689 #define SVE_VQ_POW2_MAP \ 1690 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1691 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1692 1693 /* 1694 * Return true if it is possible to take a fine-grained-trap to EL2. 1695 */ 1696 static inline bool arm_fgt_active(CPUARMState *env, int el) 1697 { 1698 /* 1699 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1700 * that can affect EL0, but it is harmless to do the test also for 1701 * traps on registers that are only accessible at EL1 because if the test 1702 * returns true then we can't be executing at EL1 anyway. 1703 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1704 * traps from AArch32 only happen for the EL0 is AArch32 case. 1705 */ 1706 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1707 el < 2 && arm_is_el2_enabled(env) && 1708 arm_el_is_aa64(env, 1) && 1709 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1710 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1711 } 1712 1713 void assert_hflags_rebuild_correctly(CPUARMState *env); 1714 1715 /* 1716 * Although the ARM implementation of hardware assisted debugging 1717 * allows for different breakpoints per-core, the current GDB 1718 * interface treats them as a global pool of registers (which seems to 1719 * be the case for x86, ppc and s390). As a result we store one copy 1720 * of registers which is used for all active cores. 1721 * 1722 * Write access is serialised by virtue of the GDB protocol which 1723 * updates things. Read access (i.e. when the values are copied to the 1724 * vCPU) is also gated by GDB's run control. 1725 * 1726 * This is not unreasonable as most of the time debugging kernels you 1727 * never know which core will eventually execute your function. 1728 */ 1729 1730 typedef struct { 1731 uint64_t bcr; 1732 uint64_t bvr; 1733 } HWBreakpoint; 1734 1735 /* 1736 * The watchpoint registers can cover more area than the requested 1737 * watchpoint so we need to store the additional information 1738 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1739 * when the watchpoint is hit. 1740 */ 1741 typedef struct { 1742 uint64_t wcr; 1743 uint64_t wvr; 1744 CPUWatchpoint details; 1745 } HWWatchpoint; 1746 1747 /* Maximum and current break/watch point counts */ 1748 extern int max_hw_bps, max_hw_wps; 1749 extern GArray *hw_breakpoints, *hw_watchpoints; 1750 1751 #define cur_hw_wps (hw_watchpoints->len) 1752 #define cur_hw_bps (hw_breakpoints->len) 1753 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1754 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1755 1756 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1757 int insert_hw_breakpoint(target_ulong pc); 1758 int delete_hw_breakpoint(target_ulong pc); 1759 1760 bool check_watchpoint_in_range(int i, target_ulong addr); 1761 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1762 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1763 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1764 #endif 1765