1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/hwaddr.h" 29 #include "exec/vaddr.h" 30 #include "exec/breakpoint.h" 31 #include "accel/tcg/tb-cpu-state.h" 32 #include "hw/registerfields.h" 33 #include "tcg/tcg-gvec-desc.h" 34 #include "system/memory.h" 35 #include "syndrome.h" 36 #include "cpu-features.h" 37 38 /* register banks for CPU modes */ 39 #define BANK_USRSYS 0 40 #define BANK_SVC 1 41 #define BANK_ABT 2 42 #define BANK_UND 3 43 #define BANK_IRQ 4 44 #define BANK_FIQ 5 45 #define BANK_HYP 6 46 #define BANK_MON 7 47 48 static inline int arm_env_mmu_index(CPUARMState *env) 49 { 50 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 51 } 52 53 static inline bool excp_is_internal(int excp) 54 { 55 /* Return true if this exception number represents a QEMU-internal 56 * exception that will not be passed to the guest. 57 */ 58 return excp == EXCP_INTERRUPT 59 || excp == EXCP_HLT 60 || excp == EXCP_DEBUG 61 || excp == EXCP_HALTED 62 || excp == EXCP_EXCEPTION_EXIT 63 || excp == EXCP_KERNEL_TRAP 64 || excp == EXCP_SEMIHOST; 65 } 66 67 /* 68 * Default frequency for the generic timer, in Hz. 69 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 70 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 71 * which gives a 16ns tick period. 72 * 73 * We will use the back-compat value: 74 * - for QEMU CPU types added before we standardized on 1GHz 75 * - for versioned machine types with a version of 9.0 or earlier 76 * In any case, the machine model may override via the cntfrq property. 77 */ 78 #define GTIMER_DEFAULT_HZ 1000000000 79 #define GTIMER_BACKCOMPAT_HZ 62500000 80 81 /* Bit definitions for the v7M CONTROL register */ 82 FIELD(V7M_CONTROL, NPRIV, 0, 1) 83 FIELD(V7M_CONTROL, SPSEL, 1, 1) 84 FIELD(V7M_CONTROL, FPCA, 2, 1) 85 FIELD(V7M_CONTROL, SFPA, 3, 1) 86 87 /* Bit definitions for v7M exception return payload */ 88 FIELD(V7M_EXCRET, ES, 0, 1) 89 FIELD(V7M_EXCRET, RES0, 1, 1) 90 FIELD(V7M_EXCRET, SPSEL, 2, 1) 91 FIELD(V7M_EXCRET, MODE, 3, 1) 92 FIELD(V7M_EXCRET, FTYPE, 4, 1) 93 FIELD(V7M_EXCRET, DCRS, 5, 1) 94 FIELD(V7M_EXCRET, S, 6, 1) 95 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 96 97 /* Minimum value which is a magic number for exception return */ 98 #define EXC_RETURN_MIN_MAGIC 0xff000000 99 /* Minimum number which is a magic number for function or exception return 100 * when using v8M security extension 101 */ 102 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 103 104 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 105 FIELD(DBGWCR, E, 0, 1) 106 FIELD(DBGWCR, PAC, 1, 2) 107 FIELD(DBGWCR, LSC, 3, 2) 108 FIELD(DBGWCR, BAS, 5, 8) 109 FIELD(DBGWCR, HMC, 13, 1) 110 FIELD(DBGWCR, SSC, 14, 2) 111 FIELD(DBGWCR, LBN, 16, 4) 112 FIELD(DBGWCR, WT, 20, 1) 113 FIELD(DBGWCR, MASK, 24, 5) 114 FIELD(DBGWCR, SSCE, 29, 1) 115 116 #define VTCR_NSW (1u << 29) 117 #define VTCR_NSA (1u << 30) 118 #define VSTCR_SW VTCR_NSW 119 #define VSTCR_SA VTCR_NSA 120 121 /* Bit definitions for CPACR (AArch32 only) */ 122 FIELD(CPACR, CP10, 20, 2) 123 FIELD(CPACR, CP11, 22, 2) 124 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 125 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 126 FIELD(CPACR, ASEDIS, 31, 1) 127 128 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 129 FIELD(CPACR_EL1, ZEN, 16, 2) 130 FIELD(CPACR_EL1, FPEN, 20, 2) 131 FIELD(CPACR_EL1, SMEN, 24, 2) 132 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 133 134 /* Bit definitions for HCPTR (AArch32 only) */ 135 FIELD(HCPTR, TCP10, 10, 1) 136 FIELD(HCPTR, TCP11, 11, 1) 137 FIELD(HCPTR, TASE, 15, 1) 138 FIELD(HCPTR, TTA, 20, 1) 139 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 140 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 141 142 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 143 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 144 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 145 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 146 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 147 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 148 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 149 FIELD(CPTR_EL2, TTA, 28, 1) 150 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 151 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 152 153 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 154 FIELD(CPTR_EL3, EZ, 8, 1) 155 FIELD(CPTR_EL3, TFP, 10, 1) 156 FIELD(CPTR_EL3, ESM, 12, 1) 157 FIELD(CPTR_EL3, TTA, 20, 1) 158 FIELD(CPTR_EL3, TAM, 30, 1) 159 FIELD(CPTR_EL3, TCPAC, 31, 1) 160 161 #define MDCR_MTPME (1U << 28) 162 #define MDCR_TDCC (1U << 27) 163 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 164 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 165 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 166 #define MDCR_EPMAD (1U << 21) 167 #define MDCR_EDAD (1U << 20) 168 #define MDCR_TTRF (1U << 19) 169 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 170 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 171 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 172 #define MDCR_SDD (1U << 16) 173 #define MDCR_SPD (3U << 14) 174 #define MDCR_TDRA (1U << 11) 175 #define MDCR_TDOSA (1U << 10) 176 #define MDCR_TDA (1U << 9) 177 #define MDCR_TDE (1U << 8) 178 #define MDCR_HPME (1U << 7) 179 #define MDCR_TPM (1U << 6) 180 #define MDCR_TPMCR (1U << 5) 181 #define MDCR_HPMN (0x1fU) 182 183 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 184 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 185 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 186 MDCR_STE | MDCR_SPME | MDCR_SPD) 187 188 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 189 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 190 #define TTBCR_PD0 (1U << 4) 191 #define TTBCR_PD1 (1U << 5) 192 #define TTBCR_EPD0 (1U << 7) 193 #define TTBCR_IRGN0 (3U << 8) 194 #define TTBCR_ORGN0 (3U << 10) 195 #define TTBCR_SH0 (3U << 12) 196 #define TTBCR_T1SZ (3U << 16) 197 #define TTBCR_A1 (1U << 22) 198 #define TTBCR_EPD1 (1U << 23) 199 #define TTBCR_IRGN1 (3U << 24) 200 #define TTBCR_ORGN1 (3U << 26) 201 #define TTBCR_SH1 (1U << 28) 202 #define TTBCR_EAE (1U << 31) 203 204 FIELD(VTCR, T0SZ, 0, 6) 205 FIELD(VTCR, SL0, 6, 2) 206 FIELD(VTCR, IRGN0, 8, 2) 207 FIELD(VTCR, ORGN0, 10, 2) 208 FIELD(VTCR, SH0, 12, 2) 209 FIELD(VTCR, TG0, 14, 2) 210 FIELD(VTCR, PS, 16, 3) 211 FIELD(VTCR, VS, 19, 1) 212 FIELD(VTCR, HA, 21, 1) 213 FIELD(VTCR, HD, 22, 1) 214 FIELD(VTCR, HWU59, 25, 1) 215 FIELD(VTCR, HWU60, 26, 1) 216 FIELD(VTCR, HWU61, 27, 1) 217 FIELD(VTCR, HWU62, 28, 1) 218 FIELD(VTCR, NSW, 29, 1) 219 FIELD(VTCR, NSA, 30, 1) 220 FIELD(VTCR, DS, 32, 1) 221 FIELD(VTCR, SL2, 33, 1) 222 223 #define HCRX_ENAS0 (1ULL << 0) 224 #define HCRX_ENALS (1ULL << 1) 225 #define HCRX_ENASR (1ULL << 2) 226 #define HCRX_FNXS (1ULL << 3) 227 #define HCRX_FGTNXS (1ULL << 4) 228 #define HCRX_SMPME (1ULL << 5) 229 #define HCRX_TALLINT (1ULL << 6) 230 #define HCRX_VINMI (1ULL << 7) 231 #define HCRX_VFNMI (1ULL << 8) 232 #define HCRX_CMOW (1ULL << 9) 233 #define HCRX_MCE2 (1ULL << 10) 234 #define HCRX_MSCEN (1ULL << 11) 235 236 #define HPFAR_NS (1ULL << 63) 237 238 #define HSTR_TTEE (1 << 16) 239 #define HSTR_TJDBX (1 << 17) 240 241 /* 242 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 243 * have different bit definitions, and EL1PCTEN might be 244 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 245 * disambiguate if necessary. 246 */ 247 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 248 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 249 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 250 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 251 FIELD(CNTHCTL, EVNTEN, 2, 1) 252 FIELD(CNTHCTL, EVNTDIR, 3, 1) 253 FIELD(CNTHCTL, EVNTI, 4, 4) 254 FIELD(CNTHCTL, EL0VTEN, 8, 1) 255 FIELD(CNTHCTL, EL0PTEN, 9, 1) 256 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 257 FIELD(CNTHCTL, EL1PTEN, 11, 1) 258 FIELD(CNTHCTL, ECV, 12, 1) 259 FIELD(CNTHCTL, EL1TVT, 13, 1) 260 FIELD(CNTHCTL, EL1TVCT, 14, 1) 261 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 262 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 263 FIELD(CNTHCTL, EVNTIS, 17, 1) 264 FIELD(CNTHCTL, CNTVMASK, 18, 1) 265 FIELD(CNTHCTL, CNTPMASK, 19, 1) 266 267 /* We use a few fake FSR values for internal purposes in M profile. 268 * M profile cores don't have A/R format FSRs, but currently our 269 * get_phys_addr() code assumes A/R profile and reports failures via 270 * an A/R format FSR value. We then translate that into the proper 271 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 272 * Mostly the FSR values we use for this are those defined for v7PMSA, 273 * since we share some of that codepath. A few kinds of fault are 274 * only for M profile and have no A/R equivalent, though, so we have 275 * to pick a value from the reserved range (which we never otherwise 276 * generate) to use for these. 277 * These values will never be visible to the guest. 278 */ 279 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 280 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 281 282 /** 283 * raise_exception: Raise the specified exception. 284 * Raise a guest exception with the specified value, syndrome register 285 * and target exception level. This should be called from helper functions, 286 * and never returns because we will longjump back up to the CPU main loop. 287 */ 288 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 289 uint32_t syndrome, uint32_t target_el); 290 291 /* 292 * Similarly, but also use unwinding to restore cpu state. 293 */ 294 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 295 uint32_t syndrome, uint32_t target_el, 296 uintptr_t ra); 297 298 /* 299 * For AArch64, map a given EL to an index in the banked_spsr array. 300 * Note that this mapping and the AArch32 mapping defined in bank_number() 301 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 302 * mandated mapping between each other. 303 */ 304 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 305 { 306 static const unsigned int map[4] = { 307 [1] = BANK_SVC, /* EL1. */ 308 [2] = BANK_HYP, /* EL2. */ 309 [3] = BANK_MON, /* EL3. */ 310 }; 311 assert(el >= 1 && el <= 3); 312 return map[el]; 313 } 314 315 /* Map CPU modes onto saved register banks. */ 316 static inline int bank_number(int mode) 317 { 318 switch (mode) { 319 case ARM_CPU_MODE_USR: 320 case ARM_CPU_MODE_SYS: 321 return BANK_USRSYS; 322 case ARM_CPU_MODE_SVC: 323 return BANK_SVC; 324 case ARM_CPU_MODE_ABT: 325 return BANK_ABT; 326 case ARM_CPU_MODE_UND: 327 return BANK_UND; 328 case ARM_CPU_MODE_IRQ: 329 return BANK_IRQ; 330 case ARM_CPU_MODE_FIQ: 331 return BANK_FIQ; 332 case ARM_CPU_MODE_HYP: 333 return BANK_HYP; 334 case ARM_CPU_MODE_MON: 335 return BANK_MON; 336 } 337 g_assert_not_reached(); 338 } 339 340 /** 341 * r14_bank_number: Map CPU mode onto register bank for r14 342 * 343 * Given an AArch32 CPU mode, return the index into the saved register 344 * banks to use for the R14 (LR) in that mode. This is the same as 345 * bank_number(), except for the special case of Hyp mode, where 346 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 347 * This should be used as the index into env->banked_r14[], and 348 * bank_number() used for the index into env->banked_r13[] and 349 * env->banked_spsr[]. 350 */ 351 static inline int r14_bank_number(int mode) 352 { 353 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 354 } 355 356 void arm_cpu_register(const ARMCPUInfo *info); 357 358 void register_cp_regs_for_features(ARMCPU *cpu); 359 void init_cpreg_list(ARMCPU *cpu); 360 361 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 362 void arm_translate_init(void); 363 void arm_translate_code(CPUState *cs, TranslationBlock *tb, 364 int *max_insns, vaddr pc, void *host_pc); 365 366 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 367 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 368 GPtrArray *, GPtrArray *); 369 370 void arm_restore_state_to_opc(CPUState *cs, 371 const TranslationBlock *tb, 372 const uint64_t *data); 373 374 #ifdef CONFIG_TCG 375 TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs); 376 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 377 378 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 379 bool arm_cpu_exec_halt(CPUState *cs); 380 int arm_cpu_mmu_index(CPUState *cs, bool ifetch); 381 #endif /* CONFIG_TCG */ 382 383 typedef enum ARMFPRounding { 384 FPROUNDING_TIEEVEN, 385 FPROUNDING_POSINF, 386 FPROUNDING_NEGINF, 387 FPROUNDING_ZERO, 388 FPROUNDING_TIEAWAY, 389 FPROUNDING_ODD 390 } ARMFPRounding; 391 392 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 393 394 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 395 { 396 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 397 return arm_rmode_to_sf_map[rmode]; 398 } 399 400 /* Return the effective value of SCR_EL3.RW */ 401 static inline bool arm_scr_rw_eff(CPUARMState *env) 402 { 403 /* 404 * SCR_EL3.RW has an effective value of 1 if: 405 * - we are NS and EL2 is implemented but doesn't support AArch32 406 * - we are S and EL2 is enabled (in which case it must be AArch64) 407 */ 408 ARMCPU *cpu = env_archcpu(env); 409 410 if (env->cp15.scr_el3 & SCR_RW) { 411 return true; 412 } 413 if (env->cp15.scr_el3 & SCR_NS) { 414 return arm_feature(env, ARM_FEATURE_EL2) && 415 !cpu_isar_feature(aa64_aa32_el2, cpu); 416 } else { 417 return env->cp15.scr_el3 & SCR_EEL2; 418 } 419 } 420 421 /* Return true if the specified exception level is running in AArch64 state. */ 422 static inline bool arm_el_is_aa64(CPUARMState *env, int el) 423 { 424 /* 425 * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, 426 * and if we're not in EL0 then the state of EL0 isn't well defined.) 427 */ 428 assert(el >= 1 && el <= 3); 429 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); 430 431 /* 432 * The highest exception level is always at the maximum supported 433 * register width, and then lower levels have a register width controlled 434 * by bits in the SCR or HCR registers. 435 */ 436 if (el == 3) { 437 return aa64; 438 } 439 440 if (arm_feature(env, ARM_FEATURE_EL3)) { 441 aa64 = aa64 && arm_scr_rw_eff(env); 442 } 443 444 if (el == 2) { 445 return aa64; 446 } 447 448 if (arm_is_el2_enabled(env)) { 449 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); 450 } 451 452 return aa64; 453 } 454 455 /* 456 * Return the current Exception Level (as per ARMv8; note that this differs 457 * from the ARMv7 Privilege Level). 458 */ 459 static inline int arm_current_el(CPUARMState *env) 460 { 461 if (arm_feature(env, ARM_FEATURE_M)) { 462 return arm_v7m_is_handler_mode(env) || 463 !(env->v7m.control[env->v7m.secure] & 1); 464 } 465 466 if (is_a64(env)) { 467 return extract32(env->pstate, 2, 2); 468 } 469 470 switch (env->uncached_cpsr & 0x1f) { 471 case ARM_CPU_MODE_USR: 472 return 0; 473 case ARM_CPU_MODE_HYP: 474 return 2; 475 case ARM_CPU_MODE_MON: 476 return 3; 477 default: 478 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 479 /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ 480 return 3; 481 } 482 483 return 1; 484 } 485 } 486 487 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, 488 bool sctlr_b) 489 { 490 #ifdef CONFIG_USER_ONLY 491 /* 492 * In system mode, BE32 is modelled in line with the 493 * architecture (as word-invariant big-endianness), where loads 494 * and stores are done little endian but from addresses which 495 * are adjusted by XORing with the appropriate constant. So the 496 * endianness to use for the raw data access is not affected by 497 * SCTLR.B. 498 * In user mode, however, we model BE32 as byte-invariant 499 * big-endianness (because user-only code cannot tell the 500 * difference), and so we need to use a data access endianness 501 * that depends on SCTLR.B. 502 */ 503 if (sctlr_b) { 504 return true; 505 } 506 #endif 507 /* In 32bit endianness is determined by looking at CPSR's E bit */ 508 return env->uncached_cpsr & CPSR_E; 509 } 510 511 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) 512 { 513 return sctlr & (el ? SCTLR_EE : SCTLR_E0E); 514 } 515 516 /* Return true if the processor is in big-endian mode. */ 517 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) 518 { 519 if (!is_a64(env)) { 520 return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); 521 } else { 522 int cur_el = arm_current_el(env); 523 uint64_t sctlr = arm_sctlr(env, cur_el); 524 return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); 525 } 526 } 527 528 #ifdef CONFIG_USER_ONLY 529 static inline bool arm_cpu_bswap_data(CPUARMState *env) 530 { 531 return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); 532 } 533 #endif 534 535 static inline void aarch64_save_sp(CPUARMState *env, int el) 536 { 537 if (env->pstate & PSTATE_SP) { 538 env->sp_el[el] = env->xregs[31]; 539 } else { 540 env->sp_el[0] = env->xregs[31]; 541 } 542 } 543 544 static inline void aarch64_restore_sp(CPUARMState *env, int el) 545 { 546 if (env->pstate & PSTATE_SP) { 547 env->xregs[31] = env->sp_el[el]; 548 } else { 549 env->xregs[31] = env->sp_el[0]; 550 } 551 } 552 553 static inline void update_spsel(CPUARMState *env, uint32_t imm) 554 { 555 unsigned int cur_el = arm_current_el(env); 556 /* Update PSTATE SPSel bit; this requires us to update the 557 * working stack pointer in xregs[31]. 558 */ 559 if (!((imm ^ env->pstate) & PSTATE_SP)) { 560 return; 561 } 562 aarch64_save_sp(env, cur_el); 563 env->pstate = deposit32(env->pstate, 0, 1, imm); 564 565 /* We rely on illegal updates to SPsel from EL0 to get trapped 566 * at translation time. 567 */ 568 assert(cur_el >= 1 && cur_el <= 3); 569 aarch64_restore_sp(env, cur_el); 570 } 571 572 /* 573 * arm_pamax 574 * @cpu: ARMCPU 575 * 576 * Returns the implementation defined bit-width of physical addresses. 577 * The ARMv8 reference manuals refer to this as PAMax(). 578 */ 579 unsigned int arm_pamax(ARMCPU *cpu); 580 581 /* 582 * round_down_to_parange_index 583 * @bit_size: uint8_t 584 * 585 * Rounds down the bit_size supplied to the first supported ARM physical 586 * address range and returns the index for this. The index is intended to 587 * be used to set ID_AA64MMFR0_EL1's PARANGE bits. 588 */ 589 uint8_t round_down_to_parange_index(uint8_t bit_size); 590 591 /* 592 * round_down_to_parange_bit_size 593 * @bit_size: uint8_t 594 * 595 * Rounds down the bit_size supplied to the first supported ARM physical 596 * address range bit size and returns this. 597 */ 598 uint8_t round_down_to_parange_bit_size(uint8_t bit_size); 599 600 /* Return true if extended addresses are enabled. 601 * This is always the case if our translation regime is 64 bit, 602 * but depends on TTBCR.EAE for 32 bit. 603 */ 604 static inline bool extended_addresses_enabled(CPUARMState *env) 605 { 606 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 607 if (arm_feature(env, ARM_FEATURE_PMSA) && 608 arm_feature(env, ARM_FEATURE_V8)) { 609 return true; 610 } 611 return arm_el_is_aa64(env, 1) || 612 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 613 } 614 615 /* Update a QEMU watchpoint based on the information the guest has set in the 616 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 617 */ 618 void hw_watchpoint_update(ARMCPU *cpu, int n); 619 /* Update the QEMU watchpoints for every guest watchpoint. This does a 620 * complete delete-and-reinstate of the QEMU watchpoint list and so is 621 * suitable for use after migration or on reset. 622 */ 623 void hw_watchpoint_update_all(ARMCPU *cpu); 624 /* Update a QEMU breakpoint based on the information the guest has set in the 625 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 626 */ 627 void hw_breakpoint_update(ARMCPU *cpu, int n); 628 /* Update the QEMU breakpoints for every guest breakpoint. This does a 629 * complete delete-and-reinstate of the QEMU breakpoint list and so is 630 * suitable for use after migration or on reset. 631 */ 632 void hw_breakpoint_update_all(ARMCPU *cpu); 633 634 /* Callback function for checking if a breakpoint should trigger. */ 635 bool arm_debug_check_breakpoint(CPUState *cs); 636 637 /* Callback function for checking if a watchpoint should trigger. */ 638 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 639 640 /* Adjust addresses (in BE32 mode) before testing against watchpoint 641 * addresses. 642 */ 643 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 644 645 /* Callback function for when a watchpoint or breakpoint triggers. */ 646 void arm_debug_excp_handler(CPUState *cs); 647 648 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 649 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 650 { 651 return false; 652 } 653 #else 654 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 655 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 656 #endif 657 /* Actually handle a PSCI call */ 658 void arm_handle_psci_call(ARMCPU *cpu); 659 660 /** 661 * arm_clear_exclusive: clear the exclusive monitor 662 * @env: CPU env 663 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 664 */ 665 static inline void arm_clear_exclusive(CPUARMState *env) 666 { 667 env->exclusive_addr = -1; 668 } 669 670 /** 671 * ARMFaultType: type of an ARM MMU fault 672 * This corresponds to the v8A pseudocode's Fault enumeration, 673 * with extensions for QEMU internal conditions. 674 */ 675 typedef enum ARMFaultType { 676 ARMFault_None, 677 ARMFault_AccessFlag, 678 ARMFault_Alignment, 679 ARMFault_Background, 680 ARMFault_Domain, 681 ARMFault_Permission, 682 ARMFault_Translation, 683 ARMFault_AddressSize, 684 ARMFault_SyncExternal, 685 ARMFault_SyncExternalOnWalk, 686 ARMFault_SyncParity, 687 ARMFault_SyncParityOnWalk, 688 ARMFault_AsyncParity, 689 ARMFault_AsyncExternal, 690 ARMFault_Debug, 691 ARMFault_TLBConflict, 692 ARMFault_UnsuppAtomicUpdate, 693 ARMFault_Lockdown, 694 ARMFault_Exclusive, 695 ARMFault_ICacheMaint, 696 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 697 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 698 ARMFault_GPCFOnWalk, 699 ARMFault_GPCFOnOutput, 700 } ARMFaultType; 701 702 typedef enum ARMGPCF { 703 GPCF_None, 704 GPCF_AddressSize, 705 GPCF_Walk, 706 GPCF_EABT, 707 GPCF_Fail, 708 } ARMGPCF; 709 710 /** 711 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 712 * @type: Type of fault 713 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 714 * @level: Table walk level (for translation, access flag and permission faults) 715 * @domain: Domain of the fault address (for non-LPAE CPUs only) 716 * @s2addr: Address that caused a fault at stage 2 717 * @paddr: physical address that caused a fault for gpc 718 * @paddr_space: physical address space that caused a fault for gpc 719 * @stage2: True if we faulted at stage 2 720 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 721 * @s1ns: True if we faulted on a non-secure IPA while in secure state 722 * @ea: True if we should set the EA (external abort type) bit in syndrome 723 */ 724 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 725 struct ARMMMUFaultInfo { 726 ARMFaultType type; 727 ARMGPCF gpcf; 728 hwaddr s2addr; 729 hwaddr paddr; 730 ARMSecuritySpace paddr_space; 731 int level; 732 int domain; 733 bool stage2; 734 bool s1ptw; 735 bool s1ns; 736 bool ea; 737 }; 738 739 /** 740 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 741 * Compare pseudocode EncodeSDFSC(), though unlike that function 742 * we set up a whole FSR-format code including domain field and 743 * putting the high bit of the FSC into bit 10. 744 */ 745 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 746 { 747 uint32_t fsc; 748 749 switch (fi->type) { 750 case ARMFault_None: 751 return 0; 752 case ARMFault_AccessFlag: 753 fsc = fi->level == 1 ? 0x3 : 0x6; 754 break; 755 case ARMFault_Alignment: 756 fsc = 0x1; 757 break; 758 case ARMFault_Permission: 759 fsc = fi->level == 1 ? 0xd : 0xf; 760 break; 761 case ARMFault_Domain: 762 fsc = fi->level == 1 ? 0x9 : 0xb; 763 break; 764 case ARMFault_Translation: 765 fsc = fi->level == 1 ? 0x5 : 0x7; 766 break; 767 case ARMFault_SyncExternal: 768 fsc = 0x8 | (fi->ea << 12); 769 break; 770 case ARMFault_SyncExternalOnWalk: 771 fsc = fi->level == 1 ? 0xc : 0xe; 772 fsc |= (fi->ea << 12); 773 break; 774 case ARMFault_SyncParity: 775 fsc = 0x409; 776 break; 777 case ARMFault_SyncParityOnWalk: 778 fsc = fi->level == 1 ? 0x40c : 0x40e; 779 break; 780 case ARMFault_AsyncParity: 781 fsc = 0x408; 782 break; 783 case ARMFault_AsyncExternal: 784 fsc = 0x406 | (fi->ea << 12); 785 break; 786 case ARMFault_Debug: 787 fsc = 0x2; 788 break; 789 case ARMFault_TLBConflict: 790 fsc = 0x400; 791 break; 792 case ARMFault_Lockdown: 793 fsc = 0x404; 794 break; 795 case ARMFault_Exclusive: 796 fsc = 0x405; 797 break; 798 case ARMFault_ICacheMaint: 799 fsc = 0x4; 800 break; 801 case ARMFault_Background: 802 fsc = 0x0; 803 break; 804 case ARMFault_QEMU_NSCExec: 805 fsc = M_FAKE_FSR_NSC_EXEC; 806 break; 807 case ARMFault_QEMU_SFault: 808 fsc = M_FAKE_FSR_SFAULT; 809 break; 810 default: 811 /* Other faults can't occur in a context that requires a 812 * short-format status code. 813 */ 814 g_assert_not_reached(); 815 } 816 817 fsc |= (fi->domain << 4); 818 return fsc; 819 } 820 821 /** 822 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 823 * Compare pseudocode EncodeLDFSC(), though unlike that function 824 * we fill in also the LPAE bit 9 of a DFSR format. 825 */ 826 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 827 { 828 uint32_t fsc; 829 830 switch (fi->type) { 831 case ARMFault_None: 832 return 0; 833 case ARMFault_AddressSize: 834 assert(fi->level >= -1 && fi->level <= 3); 835 if (fi->level < 0) { 836 fsc = 0b101001; 837 } else { 838 fsc = fi->level; 839 } 840 break; 841 case ARMFault_AccessFlag: 842 assert(fi->level >= 0 && fi->level <= 3); 843 fsc = 0b001000 | fi->level; 844 break; 845 case ARMFault_Permission: 846 assert(fi->level >= 0 && fi->level <= 3); 847 fsc = 0b001100 | fi->level; 848 break; 849 case ARMFault_Translation: 850 assert(fi->level >= -1 && fi->level <= 3); 851 if (fi->level < 0) { 852 fsc = 0b101011; 853 } else { 854 fsc = 0b000100 | fi->level; 855 } 856 break; 857 case ARMFault_SyncExternal: 858 fsc = 0x10 | (fi->ea << 12); 859 break; 860 case ARMFault_SyncExternalOnWalk: 861 assert(fi->level >= -1 && fi->level <= 3); 862 if (fi->level < 0) { 863 fsc = 0b010011; 864 } else { 865 fsc = 0b010100 | fi->level; 866 } 867 fsc |= fi->ea << 12; 868 break; 869 case ARMFault_SyncParity: 870 fsc = 0x18; 871 break; 872 case ARMFault_SyncParityOnWalk: 873 assert(fi->level >= -1 && fi->level <= 3); 874 if (fi->level < 0) { 875 fsc = 0b011011; 876 } else { 877 fsc = 0b011100 | fi->level; 878 } 879 break; 880 case ARMFault_AsyncParity: 881 fsc = 0x19; 882 break; 883 case ARMFault_AsyncExternal: 884 fsc = 0x11 | (fi->ea << 12); 885 break; 886 case ARMFault_Alignment: 887 fsc = 0x21; 888 break; 889 case ARMFault_Debug: 890 fsc = 0x22; 891 break; 892 case ARMFault_TLBConflict: 893 fsc = 0x30; 894 break; 895 case ARMFault_UnsuppAtomicUpdate: 896 fsc = 0x31; 897 break; 898 case ARMFault_Lockdown: 899 fsc = 0x34; 900 break; 901 case ARMFault_Exclusive: 902 fsc = 0x35; 903 break; 904 case ARMFault_GPCFOnWalk: 905 assert(fi->level >= -1 && fi->level <= 3); 906 if (fi->level < 0) { 907 fsc = 0b100011; 908 } else { 909 fsc = 0b100100 | fi->level; 910 } 911 break; 912 case ARMFault_GPCFOnOutput: 913 fsc = 0b101000; 914 break; 915 default: 916 /* Other faults can't occur in a context that requires a 917 * long-format status code. 918 */ 919 g_assert_not_reached(); 920 } 921 922 fsc |= 1 << 9; 923 return fsc; 924 } 925 926 static inline bool arm_extabort_type(MemTxResult result) 927 { 928 /* The EA bit in syndromes and fault status registers is an 929 * IMPDEF classification of external aborts. ARM implementations 930 * usually use this to indicate AXI bus Decode error (0) or 931 * Slave error (1); in QEMU we follow that. 932 */ 933 return result != MEMTX_DECODE_ERROR; 934 } 935 936 #ifdef CONFIG_USER_ONLY 937 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 938 MMUAccessType access_type, 939 bool maperr, uintptr_t ra); 940 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 941 MMUAccessType access_type, uintptr_t ra); 942 #else 943 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, 944 MMUAccessType access_type, int mmu_idx, 945 MemOp memop, int size, bool probe, uintptr_t ra); 946 #endif 947 948 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 949 { 950 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 951 } 952 953 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 954 { 955 if (arm_feature(env, ARM_FEATURE_M)) { 956 return mmu_idx | ARM_MMU_IDX_M; 957 } else { 958 return mmu_idx | ARM_MMU_IDX_A; 959 } 960 } 961 962 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 963 { 964 /* AArch64 is always a-profile. */ 965 return mmu_idx | ARM_MMU_IDX_A; 966 } 967 968 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 969 970 /* Return the MMU index for a v7M CPU in the specified security state */ 971 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 972 973 /* 974 * Return true if the stage 1 translation regime is using LPAE 975 * format page tables 976 */ 977 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 978 979 /* Raise a data fault alignment exception for the specified virtual address */ 980 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 981 MMUAccessType access_type, 982 int mmu_idx, uintptr_t retaddr); 983 984 #ifndef CONFIG_USER_ONLY 985 /* arm_cpu_do_transaction_failed: handle a memory system error response 986 * (eg "no device/memory present at address") by raising an external abort 987 * exception 988 */ 989 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 990 vaddr addr, unsigned size, 991 MMUAccessType access_type, 992 int mmu_idx, MemTxAttrs attrs, 993 MemTxResult response, uintptr_t retaddr); 994 #endif 995 996 /* Call any registered EL change hooks */ 997 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 998 { 999 ARMELChangeHook *hook, *next; 1000 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 1001 hook->hook(cpu, hook->opaque); 1002 } 1003 } 1004 static inline void arm_call_el_change_hook(ARMCPU *cpu) 1005 { 1006 ARMELChangeHook *hook, *next; 1007 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 1008 hook->hook(cpu, hook->opaque); 1009 } 1010 } 1011 1012 /* 1013 * Return true if this address translation regime has two ranges. 1014 * Note that this will not return the correct answer for AArch32 1015 * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is 1016 * never called from a context where EL3 can be AArch32. (The 1017 * correct return value for ARMMMUIdx_E3 would be different for 1018 * that case, so we can't just make the function return the 1019 * correct value anyway; we would need an extra "bool e3_is_aarch32" 1020 * argument which all the current callsites would pass as 'false'.) 1021 */ 1022 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 1023 { 1024 switch (mmu_idx) { 1025 case ARMMMUIdx_Stage1_E0: 1026 case ARMMMUIdx_Stage1_E1: 1027 case ARMMMUIdx_Stage1_E1_PAN: 1028 case ARMMMUIdx_E10_0: 1029 case ARMMMUIdx_E10_1: 1030 case ARMMMUIdx_E10_1_PAN: 1031 case ARMMMUIdx_E20_0: 1032 case ARMMMUIdx_E20_2: 1033 case ARMMMUIdx_E20_2_PAN: 1034 return true; 1035 default: 1036 return false; 1037 } 1038 } 1039 1040 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 1041 { 1042 switch (mmu_idx) { 1043 case ARMMMUIdx_Stage1_E1_PAN: 1044 case ARMMMUIdx_E10_1_PAN: 1045 case ARMMMUIdx_E20_2_PAN: 1046 case ARMMMUIdx_E30_3_PAN: 1047 return true; 1048 default: 1049 return false; 1050 } 1051 } 1052 1053 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 1054 { 1055 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 1056 } 1057 1058 /* Return the exception level which controls this address translation regime */ 1059 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 1060 { 1061 switch (mmu_idx) { 1062 case ARMMMUIdx_E20_0: 1063 case ARMMMUIdx_E20_2: 1064 case ARMMMUIdx_E20_2_PAN: 1065 case ARMMMUIdx_Stage2: 1066 case ARMMMUIdx_Stage2_S: 1067 case ARMMMUIdx_E2: 1068 return 2; 1069 case ARMMMUIdx_E3: 1070 case ARMMMUIdx_E30_0: 1071 case ARMMMUIdx_E30_3_PAN: 1072 return 3; 1073 case ARMMMUIdx_E10_0: 1074 case ARMMMUIdx_Stage1_E0: 1075 case ARMMMUIdx_Stage1_E1: 1076 case ARMMMUIdx_Stage1_E1_PAN: 1077 case ARMMMUIdx_E10_1: 1078 case ARMMMUIdx_E10_1_PAN: 1079 case ARMMMUIdx_MPrivNegPri: 1080 case ARMMMUIdx_MUserNegPri: 1081 case ARMMMUIdx_MPriv: 1082 case ARMMMUIdx_MUser: 1083 case ARMMMUIdx_MSPrivNegPri: 1084 case ARMMMUIdx_MSUserNegPri: 1085 case ARMMMUIdx_MSPriv: 1086 case ARMMMUIdx_MSUser: 1087 return 1; 1088 default: 1089 g_assert_not_reached(); 1090 } 1091 } 1092 1093 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 1094 { 1095 switch (mmu_idx) { 1096 case ARMMMUIdx_E10_0: 1097 case ARMMMUIdx_E20_0: 1098 case ARMMMUIdx_E30_0: 1099 case ARMMMUIdx_Stage1_E0: 1100 case ARMMMUIdx_MUser: 1101 case ARMMMUIdx_MSUser: 1102 case ARMMMUIdx_MUserNegPri: 1103 case ARMMMUIdx_MSUserNegPri: 1104 return true; 1105 default: 1106 return false; 1107 } 1108 } 1109 1110 /* Return the SCTLR value which controls this address translation regime */ 1111 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 1112 { 1113 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 1114 } 1115 1116 /* 1117 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 1118 * and the Non-Secure stage 2 translation regimes (and hence which are 1119 * not present in VSTCR_EL2). 1120 */ 1121 #define VTCR_SHARED_FIELD_MASK \ 1122 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 1123 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 1124 R_VTCR_DS_MASK) 1125 1126 /* Return the value of the TCR controlling this translation regime */ 1127 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 1128 { 1129 if (mmu_idx == ARMMMUIdx_Stage2) { 1130 return env->cp15.vtcr_el2; 1131 } 1132 if (mmu_idx == ARMMMUIdx_Stage2_S) { 1133 /* 1134 * Secure stage 2 shares fields from VTCR_EL2. We merge those 1135 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 1136 * value so the callers don't need to special case this. 1137 * 1138 * If a future architecture change defines bits in VSTCR_EL2 that 1139 * overlap with these VTCR_EL2 fields we may need to revisit this. 1140 */ 1141 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 1142 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 1143 return v; 1144 } 1145 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 1146 } 1147 1148 /* Return true if the translation regime is using LPAE format page tables */ 1149 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 1150 { 1151 int el = regime_el(env, mmu_idx); 1152 if (el == 2 || arm_el_is_aa64(env, el)) { 1153 return true; 1154 } 1155 if (arm_feature(env, ARM_FEATURE_PMSA) && 1156 arm_feature(env, ARM_FEATURE_V8)) { 1157 return true; 1158 } 1159 if (arm_feature(env, ARM_FEATURE_LPAE) 1160 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 1161 return true; 1162 } 1163 return false; 1164 } 1165 1166 /** 1167 * arm_num_brps: Return number of implemented breakpoints. 1168 * Note that the ID register BRPS field is "number of bps - 1", 1169 * and we return the actual number of breakpoints. 1170 */ 1171 static inline int arm_num_brps(ARMCPU *cpu) 1172 { 1173 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1174 return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, BRPS) + 1; 1175 } else { 1176 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1177 } 1178 } 1179 1180 /** 1181 * arm_num_wrps: Return number of implemented watchpoints. 1182 * Note that the ID register WRPS field is "number of wps - 1", 1183 * and we return the actual number of watchpoints. 1184 */ 1185 static inline int arm_num_wrps(ARMCPU *cpu) 1186 { 1187 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1188 return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, WRPS) + 1; 1189 } else { 1190 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1191 } 1192 } 1193 1194 /** 1195 * arm_num_ctx_cmps: Return number of implemented context comparators. 1196 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1197 * and we return the actual number of comparators. 1198 */ 1199 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1200 { 1201 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1202 return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, CTX_CMPS) + 1; 1203 } else { 1204 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1205 } 1206 } 1207 1208 /** 1209 * v7m_using_psp: Return true if using process stack pointer 1210 * Return true if the CPU is currently using the process stack 1211 * pointer, or false if it is using the main stack pointer. 1212 */ 1213 static inline bool v7m_using_psp(CPUARMState *env) 1214 { 1215 /* Handler mode always uses the main stack; for thread mode 1216 * the CONTROL.SPSEL bit determines the answer. 1217 * Note that in v7M it is not possible to be in Handler mode with 1218 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1219 */ 1220 return !arm_v7m_is_handler_mode(env) && 1221 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1222 } 1223 1224 /** 1225 * v7m_sp_limit: Return SP limit for current CPU state 1226 * Return the SP limit value for the current CPU security state 1227 * and stack pointer. 1228 */ 1229 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1230 { 1231 if (v7m_using_psp(env)) { 1232 return env->v7m.psplim[env->v7m.secure]; 1233 } else { 1234 return env->v7m.msplim[env->v7m.secure]; 1235 } 1236 } 1237 1238 /** 1239 * v7m_cpacr_pass: 1240 * Return true if the v7M CPACR permits access to the FPU for the specified 1241 * security state and privilege level. 1242 */ 1243 static inline bool v7m_cpacr_pass(CPUARMState *env, 1244 bool is_secure, bool is_priv) 1245 { 1246 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1247 case 0: 1248 case 2: /* UNPREDICTABLE: we treat like 0 */ 1249 return false; 1250 case 1: 1251 return is_priv; 1252 case 3: 1253 return true; 1254 default: 1255 g_assert_not_reached(); 1256 } 1257 } 1258 1259 /** 1260 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1261 * @psr: Program Status Register indicating CPU mode 1262 * 1263 * Returns, for debug logging purposes, a printable representation 1264 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1265 * the low bits of the specified PSR. 1266 */ 1267 static inline const char *aarch32_mode_name(uint32_t psr) 1268 { 1269 static const char cpu_mode_names[16][4] = { 1270 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1271 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1272 }; 1273 1274 return cpu_mode_names[psr & 0xf]; 1275 } 1276 1277 /** 1278 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1279 * 1280 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1281 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1282 * Must be called with the BQL held. 1283 */ 1284 void arm_cpu_update_virq(ARMCPU *cpu); 1285 1286 /** 1287 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1288 * 1289 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1290 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1291 * Must be called with the BQL held. 1292 */ 1293 void arm_cpu_update_vfiq(ARMCPU *cpu); 1294 1295 /** 1296 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1297 * 1298 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1299 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1300 * Must be called with the BQL held. 1301 */ 1302 void arm_cpu_update_vinmi(ARMCPU *cpu); 1303 1304 /** 1305 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1306 * 1307 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1308 * a change to the HCRX_EL2.VFNMI. 1309 * Must be called with the BQL held. 1310 */ 1311 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1312 1313 /** 1314 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1315 * 1316 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1317 * following a change to the HCR_EL2.VSE bit. 1318 */ 1319 void arm_cpu_update_vserr(ARMCPU *cpu); 1320 1321 /** 1322 * arm_mmu_idx_el: 1323 * @env: The cpu environment 1324 * @el: The EL to use. 1325 * 1326 * Return the full ARMMMUIdx for the translation regime for EL. 1327 */ 1328 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1329 1330 /** 1331 * arm_mmu_idx: 1332 * @env: The cpu environment 1333 * 1334 * Return the full ARMMMUIdx for the current translation regime. 1335 */ 1336 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1337 1338 /** 1339 * arm_stage1_mmu_idx: 1340 * @env: The cpu environment 1341 * 1342 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1343 */ 1344 #ifdef CONFIG_USER_ONLY 1345 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1346 { 1347 return ARMMMUIdx_Stage1_E0; 1348 } 1349 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1350 { 1351 return ARMMMUIdx_Stage1_E0; 1352 } 1353 #else 1354 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1355 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1356 #endif 1357 1358 /** 1359 * arm_mmu_idx_is_stage1_of_2: 1360 * @mmu_idx: The ARMMMUIdx to test 1361 * 1362 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1363 * first stage of a two stage regime. 1364 */ 1365 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1366 { 1367 switch (mmu_idx) { 1368 case ARMMMUIdx_Stage1_E0: 1369 case ARMMMUIdx_Stage1_E1: 1370 case ARMMMUIdx_Stage1_E1_PAN: 1371 return true; 1372 default: 1373 return false; 1374 } 1375 } 1376 1377 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1378 const ARMISARegisters *id) 1379 { 1380 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1381 1382 if ((features >> ARM_FEATURE_V4T) & 1) { 1383 valid |= CPSR_T; 1384 } 1385 if ((features >> ARM_FEATURE_V5) & 1) { 1386 valid |= CPSR_Q; /* V5TE in reality*/ 1387 } 1388 if ((features >> ARM_FEATURE_V6) & 1) { 1389 valid |= CPSR_E | CPSR_GE; 1390 } 1391 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1392 valid |= CPSR_IT; 1393 } 1394 if (isar_feature_aa32_jazelle(id)) { 1395 valid |= CPSR_J; 1396 } 1397 if (isar_feature_aa32_pan(id)) { 1398 valid |= CPSR_PAN; 1399 } 1400 if (isar_feature_aa32_dit(id)) { 1401 valid |= CPSR_DIT; 1402 } 1403 if (isar_feature_aa32_ssbs(id)) { 1404 valid |= CPSR_SSBS; 1405 } 1406 1407 return valid; 1408 } 1409 1410 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1411 { 1412 uint32_t valid; 1413 1414 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1415 if (isar_feature_aa64_bti(id)) { 1416 valid |= PSTATE_BTYPE; 1417 } 1418 if (isar_feature_aa64_pan(id)) { 1419 valid |= PSTATE_PAN; 1420 } 1421 if (isar_feature_aa64_uao(id)) { 1422 valid |= PSTATE_UAO; 1423 } 1424 if (isar_feature_aa64_dit(id)) { 1425 valid |= PSTATE_DIT; 1426 } 1427 if (isar_feature_aa64_ssbs(id)) { 1428 valid |= PSTATE_SSBS; 1429 } 1430 if (isar_feature_aa64_mte(id)) { 1431 valid |= PSTATE_TCO; 1432 } 1433 if (isar_feature_aa64_nmi(id)) { 1434 valid |= PSTATE_ALLINT; 1435 } 1436 1437 return valid; 1438 } 1439 1440 /* Granule size (i.e. page size) */ 1441 typedef enum ARMGranuleSize { 1442 /* Same order as TG0 encoding */ 1443 Gran4K, 1444 Gran64K, 1445 Gran16K, 1446 GranInvalid, 1447 } ARMGranuleSize; 1448 1449 /** 1450 * arm_granule_bits: Return address size of the granule in bits 1451 * 1452 * Return the address size of the granule in bits. This corresponds 1453 * to the pseudocode TGxGranuleBits(). 1454 */ 1455 static inline int arm_granule_bits(ARMGranuleSize gran) 1456 { 1457 switch (gran) { 1458 case Gran64K: 1459 return 16; 1460 case Gran16K: 1461 return 14; 1462 case Gran4K: 1463 return 12; 1464 default: 1465 g_assert_not_reached(); 1466 } 1467 } 1468 1469 /* 1470 * Parameters of a given virtual address, as extracted from the 1471 * translation control register (TCR) for a given regime. 1472 */ 1473 typedef struct ARMVAParameters { 1474 unsigned tsz : 8; 1475 unsigned ps : 3; 1476 unsigned sh : 2; 1477 unsigned select : 1; 1478 bool tbi : 1; 1479 bool epd : 1; 1480 bool hpd : 1; 1481 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1482 bool ds : 1; 1483 bool ha : 1; 1484 bool hd : 1; 1485 ARMGranuleSize gran : 2; 1486 } ARMVAParameters; 1487 1488 /** 1489 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1490 * @env: CPU 1491 * @va: virtual address to look up 1492 * @mmu_idx: determines translation regime to use 1493 * @data: true if this is a data access 1494 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1495 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1496 */ 1497 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1498 ARMMMUIdx mmu_idx, bool data, 1499 bool el1_is_aa32); 1500 1501 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1502 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1503 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1504 1505 /* Determine if allocation tags are available. */ 1506 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1507 uint64_t sctlr) 1508 { 1509 if (el < 3 1510 && arm_feature(env, ARM_FEATURE_EL3) 1511 && !(env->cp15.scr_el3 & SCR_ATA)) { 1512 return false; 1513 } 1514 if (el < 2 && arm_is_el2_enabled(env)) { 1515 uint64_t hcr = arm_hcr_el2_eff(env); 1516 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1517 return false; 1518 } 1519 } 1520 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1521 return sctlr != 0; 1522 } 1523 1524 #ifndef CONFIG_USER_ONLY 1525 1526 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1527 typedef struct V8M_SAttributes { 1528 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1529 bool ns; 1530 bool nsc; 1531 uint8_t sregion; 1532 bool srvalid; 1533 uint8_t iregion; 1534 bool irvalid; 1535 } V8M_SAttributes; 1536 1537 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1538 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1539 bool secure, V8M_SAttributes *sattrs); 1540 1541 /* Cacheability and shareability attributes for a memory access */ 1542 typedef struct ARMCacheAttrs { 1543 /* 1544 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1545 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1546 */ 1547 unsigned int attrs:8; 1548 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1549 bool is_s2_format:1; 1550 } ARMCacheAttrs; 1551 1552 /* Fields that are valid upon success. */ 1553 typedef struct GetPhysAddrResult { 1554 CPUTLBEntryFull f; 1555 ARMCacheAttrs cacheattrs; 1556 } GetPhysAddrResult; 1557 1558 /** 1559 * get_phys_addr: get the physical address for a virtual address 1560 * @env: CPUARMState 1561 * @address: virtual address to get physical address for 1562 * @access_type: 0 for read, 1 for write, 2 for execute 1563 * @memop: memory operation feeding this access, or 0 for none 1564 * @mmu_idx: MMU index indicating required translation regime 1565 * @result: set on translation success. 1566 * @fi: set to fault info if the translation fails 1567 * 1568 * Find the physical address corresponding to the given virtual address, 1569 * by doing a translation table walk on MMU based systems or using the 1570 * MPU state on MPU based systems. 1571 * 1572 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1573 * prot and page_size may not be filled in, and the populated fsr value provides 1574 * information on why the translation aborted, in the format of a 1575 * DFSR/IFSR fault register, with the following caveats: 1576 * * we honour the short vs long DFSR format differences. 1577 * * the WnR bit is never set (the caller must do this). 1578 * * for PSMAv5 based systems we don't bother to return a full FSR format 1579 * value. 1580 */ 1581 bool get_phys_addr(CPUARMState *env, vaddr address, 1582 MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 1583 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1584 __attribute__((nonnull)); 1585 1586 /** 1587 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1588 * address 1589 * @env: CPUARMState 1590 * @address: virtual address to get physical address for 1591 * @access_type: 0 for read, 1 for write, 2 for execute 1592 * @memop: memory operation feeding this access, or 0 for none 1593 * @mmu_idx: MMU index indicating required translation regime 1594 * @space: security space for the access 1595 * @result: set on translation success. 1596 * @fi: set to fault info if the translation fails 1597 * 1598 * Similar to get_phys_addr, but use the given security space and don't perform 1599 * a Granule Protection Check on the resulting address. 1600 */ 1601 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 1602 MMUAccessType access_type, MemOp memop, 1603 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1604 GetPhysAddrResult *result, 1605 ARMMMUFaultInfo *fi) 1606 __attribute__((nonnull)); 1607 1608 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1609 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1610 bool is_secure, GetPhysAddrResult *result, 1611 ARMMMUFaultInfo *fi, uint32_t *mregion); 1612 1613 void arm_log_exception(CPUState *cs); 1614 1615 #endif /* !CONFIG_USER_ONLY */ 1616 1617 /* 1618 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1619 * the same simd_desc() encoding due to restrictions on size. 1620 * Use these instead. 1621 */ 1622 FIELD(PREDDESC, OPRSZ, 0, 6) 1623 FIELD(PREDDESC, ESZ, 6, 2) 1624 FIELD(PREDDESC, DATA, 8, 24) 1625 1626 /* 1627 * The SVE simd_data field, for memory ops, contains either 1628 * rd (5 bits) or a shift count (2 bits). 1629 */ 1630 #define SVE_MTEDESC_SHIFT 5 1631 1632 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1633 FIELD(MTEDESC, MIDX, 0, 4) 1634 FIELD(MTEDESC, TBI, 4, 2) 1635 FIELD(MTEDESC, TCMA, 6, 2) 1636 FIELD(MTEDESC, WRITE, 8, 1) 1637 FIELD(MTEDESC, ALIGN, 9, 3) 1638 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1639 1640 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1641 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1642 1643 /** 1644 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1645 * @env: CPU env 1646 * @ptr: start address of memory region (dirty pointer) 1647 * @size: length of region (guaranteed not to cross a page boundary) 1648 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1649 * Returns: the size of the region that can be copied without hitting 1650 * an MTE tag failure 1651 * 1652 * Note that we assume that the caller has already checked the TBI 1653 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1654 * required. 1655 */ 1656 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1657 uint32_t desc); 1658 1659 /** 1660 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1661 * operation going in the reverse direction 1662 * @env: CPU env 1663 * @ptr: *end* address of memory region (dirty pointer) 1664 * @size: length of region (guaranteed not to cross a page boundary) 1665 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1666 * Returns: the size of the region that can be copied without hitting 1667 * an MTE tag failure 1668 * 1669 * Note that we assume that the caller has already checked the TBI 1670 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1671 * required. 1672 */ 1673 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1674 uint32_t desc); 1675 1676 /** 1677 * mte_check_fail: Record an MTE tag check failure 1678 * @env: CPU env 1679 * @desc: MTEDESC descriptor word 1680 * @dirty_ptr: Failing dirty address 1681 * @ra: TCG retaddr 1682 * 1683 * This may never return (if the MTE tag checks are configured to fault). 1684 */ 1685 void mte_check_fail(CPUARMState *env, uint32_t desc, 1686 uint64_t dirty_ptr, uintptr_t ra); 1687 1688 /** 1689 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1690 * @env: CPU env 1691 * @dirty_ptr: Start address of memory region (dirty pointer) 1692 * @size: length of region (guaranteed not to cross page boundary) 1693 * @desc: MTEDESC descriptor word 1694 */ 1695 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1696 uint32_t desc); 1697 1698 static inline int allocation_tag_from_addr(uint64_t ptr) 1699 { 1700 return extract64(ptr, 56, 4); 1701 } 1702 1703 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1704 { 1705 return deposit64(ptr, 56, 4, rtag); 1706 } 1707 1708 /* Return true if tbi bits mean that the access is checked. */ 1709 static inline bool tbi_check(uint32_t desc, int bit55) 1710 { 1711 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1712 } 1713 1714 /* Return true if tcma bits mean that the access is unchecked. */ 1715 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1716 { 1717 /* 1718 * We had extracted bit55 and ptr_tag for other reasons, so fold 1719 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1720 */ 1721 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1722 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1723 return tcma && match; 1724 } 1725 1726 /* 1727 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1728 * for the tag to be present in the FAR_ELx register. But for user-only 1729 * mode, we do not have a TLB with which to implement this, so we must 1730 * remove the top byte. 1731 */ 1732 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1733 { 1734 #ifdef CONFIG_USER_ONLY 1735 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1736 ptr &= sextract64(ptr, 0, 56); 1737 #endif 1738 return ptr; 1739 } 1740 1741 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1742 { 1743 #ifdef CONFIG_USER_ONLY 1744 int64_t clean_ptr = sextract64(ptr, 0, 56); 1745 if (tbi_check(desc, clean_ptr < 0)) { 1746 ptr = clean_ptr; 1747 } 1748 #endif 1749 return ptr; 1750 } 1751 1752 /* Values for M-profile PSR.ECI for MVE insns */ 1753 enum MVEECIState { 1754 ECI_NONE = 0, /* No completed beats */ 1755 ECI_A0 = 1, /* Completed: A0 */ 1756 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1757 /* 3 is reserved */ 1758 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1759 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1760 /* All other values reserved */ 1761 }; 1762 1763 /* Definitions for the PMU registers */ 1764 #define PMCRN_MASK 0xf800 1765 #define PMCRN_SHIFT 11 1766 #define PMCRLP 0x80 1767 #define PMCRLC 0x40 1768 #define PMCRDP 0x20 1769 #define PMCRX 0x10 1770 #define PMCRD 0x8 1771 #define PMCRC 0x4 1772 #define PMCRP 0x2 1773 #define PMCRE 0x1 1774 /* 1775 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1776 * which can be written as 1 to trigger behaviour but which stay RAZ). 1777 */ 1778 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1779 1780 #define PMXEVTYPER_P 0x80000000 1781 #define PMXEVTYPER_U 0x40000000 1782 #define PMXEVTYPER_NSK 0x20000000 1783 #define PMXEVTYPER_NSU 0x10000000 1784 #define PMXEVTYPER_NSH 0x08000000 1785 #define PMXEVTYPER_M 0x04000000 1786 #define PMXEVTYPER_MT 0x02000000 1787 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1788 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1789 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1790 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1791 PMXEVTYPER_EVTCOUNT) 1792 1793 #define PMCCFILTR 0xf8000000 1794 #define PMCCFILTR_M PMXEVTYPER_M 1795 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1796 1797 static inline uint32_t pmu_num_counters(CPUARMState *env) 1798 { 1799 ARMCPU *cpu = env_archcpu(env); 1800 1801 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1802 } 1803 1804 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1805 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1806 { 1807 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1808 } 1809 1810 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1811 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1812 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1813 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1814 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1815 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1816 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1817 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1818 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1819 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1820 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1821 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1822 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1823 void aarch64_max_tcg_initfn(Object *obj); 1824 void aarch64_add_pauth_properties(Object *obj); 1825 void aarch64_add_sve_properties(Object *obj); 1826 void aarch64_add_sme_properties(Object *obj); 1827 1828 /* Return true if the gdbstub is presenting an AArch64 CPU */ 1829 static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu) 1830 { 1831 return arm_feature(&cpu->env, ARM_FEATURE_AARCH64); 1832 } 1833 1834 /* Read the CONTROL register as the MRS instruction would. */ 1835 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1836 1837 /* 1838 * Return a pointer to the location where we currently store the 1839 * stack pointer for the requested security state and thread mode. 1840 * This pointer will become invalid if the CPU state is updated 1841 * such that the stack pointers are switched around (eg changing 1842 * the SPSEL control bit). 1843 */ 1844 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1845 bool threadmode, bool spsel); 1846 1847 bool el_is_in_host(CPUARMState *env, int el); 1848 1849 void aa32_max_features(ARMCPU *cpu); 1850 int exception_target_el(CPUARMState *env); 1851 bool arm_singlestep_active(CPUARMState *env); 1852 bool arm_generate_debug_exceptions(CPUARMState *env); 1853 1854 /** 1855 * pauth_ptr_mask: 1856 * @param: parameters defining the MMU setup 1857 * 1858 * Return a mask of the address bits that contain the authentication code, 1859 * given the MMU config defined by @param. 1860 */ 1861 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1862 { 1863 int bot_pac_bit = 64 - param.tsz; 1864 int top_pac_bit = 64 - 8 * param.tbi; 1865 1866 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1867 } 1868 1869 /* Add the cpreg definitions for debug related system registers */ 1870 void define_debug_regs(ARMCPU *cpu); 1871 1872 /* Add the cpreg definitions for TLBI instructions */ 1873 void define_tlb_insn_regs(ARMCPU *cpu); 1874 /* Add the cpreg definitions for AT instructions */ 1875 void define_at_insn_regs(ARMCPU *cpu); 1876 /* Add the cpreg definitions for PM cpregs */ 1877 void define_pm_cpregs(ARMCPU *cpu); 1878 1879 /* Effective value of MDCR_EL2 */ 1880 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1881 { 1882 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1883 } 1884 1885 /* Powers of 2 for sve_vq_map et al. */ 1886 #define SVE_VQ_POW2_MAP \ 1887 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1888 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1889 1890 /* 1891 * Return true if it is possible to take a fine-grained-trap to EL2. 1892 */ 1893 static inline bool arm_fgt_active(CPUARMState *env, int el) 1894 { 1895 /* 1896 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1897 * that can affect EL0, but it is harmless to do the test also for 1898 * traps on registers that are only accessible at EL1 because if the test 1899 * returns true then we can't be executing at EL1 anyway. 1900 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1901 * traps from AArch32 only happen for the EL0 is AArch32 case. 1902 */ 1903 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1904 el < 2 && arm_is_el2_enabled(env) && 1905 arm_el_is_aa64(env, 1) && 1906 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1907 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1908 } 1909 1910 /* 1911 * Although the ARM implementation of hardware assisted debugging 1912 * allows for different breakpoints per-core, the current GDB 1913 * interface treats them as a global pool of registers (which seems to 1914 * be the case for x86, ppc and s390). As a result we store one copy 1915 * of registers which is used for all active cores. 1916 * 1917 * Write access is serialised by virtue of the GDB protocol which 1918 * updates things. Read access (i.e. when the values are copied to the 1919 * vCPU) is also gated by GDB's run control. 1920 * 1921 * This is not unreasonable as most of the time debugging kernels you 1922 * never know which core will eventually execute your function. 1923 */ 1924 1925 typedef struct { 1926 uint64_t bcr; 1927 uint64_t bvr; 1928 } HWBreakpoint; 1929 1930 /* 1931 * The watchpoint registers can cover more area than the requested 1932 * watchpoint so we need to store the additional information 1933 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1934 * when the watchpoint is hit. 1935 */ 1936 typedef struct { 1937 uint64_t wcr; 1938 uint64_t wvr; 1939 CPUWatchpoint details; 1940 } HWWatchpoint; 1941 1942 /* Maximum and current break/watch point counts */ 1943 extern int max_hw_bps, max_hw_wps; 1944 extern GArray *hw_breakpoints, *hw_watchpoints; 1945 1946 #define cur_hw_wps (hw_watchpoints->len) 1947 #define cur_hw_bps (hw_breakpoints->len) 1948 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1949 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1950 1951 bool find_hw_breakpoint(CPUState *cpu, vaddr pc); 1952 int insert_hw_breakpoint(vaddr pc); 1953 int delete_hw_breakpoint(vaddr pc); 1954 1955 bool check_watchpoint_in_range(int i, vaddr addr); 1956 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr); 1957 int insert_hw_watchpoint(vaddr addr, vaddr len, int type); 1958 int delete_hw_watchpoint(vaddr addr, vaddr len, int type); 1959 1960 /* Return the current value of the system counter in ticks */ 1961 uint64_t gt_get_countervalue(CPUARMState *env); 1962 /* 1963 * Return the currently applicable offset between the system counter 1964 * and the counter for the specified timer, as used for direct register 1965 * accesses. 1966 */ 1967 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx); 1968 1969 /* 1970 * Return mask of ARMMMUIdxBit values corresponding to an "invalidate 1971 * all EL1" scope; this covers stage 1 and stage 2. 1972 */ 1973 int alle1_tlbmask(CPUARMState *env); 1974 1975 /* Set the float_status behaviour to match the Arm defaults */ 1976 void arm_set_default_fp_behaviours(float_status *s); 1977 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ 1978 void arm_set_ah_fp_behaviours(float_status *s); 1979 /* Read the float_status info and return the appropriate FPSR value */ 1980 uint32_t vfp_get_fpsr_from_host(CPUARMState *env); 1981 /* Clear the exception status flags from all float_status fields */ 1982 void vfp_clear_float_status_exc_flags(CPUARMState *env); 1983 /* 1984 * Update float_status fields to handle the bits of the FPCR 1985 * specified by mask changing to the values in val. 1986 */ 1987 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask); 1988 bool arm_pan_enabled(CPUARMState *env); 1989 1990 #endif 1991