1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "exec/breakpoint.h" 29 #include "hw/registerfields.h" 30 #include "tcg/tcg-gvec-desc.h" 31 #include "syndrome.h" 32 #include "cpu-features.h" 33 34 /* register banks for CPU modes */ 35 #define BANK_USRSYS 0 36 #define BANK_SVC 1 37 #define BANK_ABT 2 38 #define BANK_UND 3 39 #define BANK_IRQ 4 40 #define BANK_FIQ 5 41 #define BANK_HYP 6 42 #define BANK_MON 7 43 44 static inline int arm_env_mmu_index(CPUARMState *env) 45 { 46 return EX_TBFLAG_ANY(env->hflags, MMUIDX); 47 } 48 49 static inline bool excp_is_internal(int excp) 50 { 51 /* Return true if this exception number represents a QEMU-internal 52 * exception that will not be passed to the guest. 53 */ 54 return excp == EXCP_INTERRUPT 55 || excp == EXCP_HLT 56 || excp == EXCP_DEBUG 57 || excp == EXCP_HALTED 58 || excp == EXCP_EXCEPTION_EXIT 59 || excp == EXCP_KERNEL_TRAP 60 || excp == EXCP_SEMIHOST; 61 } 62 63 /* 64 * Default frequency for the generic timer, in Hz. 65 * ARMv8.6 and later CPUs architecturally must use a 1GHz timer; before 66 * that it was an IMPDEF choice, and QEMU initially picked 62.5MHz, 67 * which gives a 16ns tick period. 68 * 69 * We will use the back-compat value: 70 * - for QEMU CPU types added before we standardized on 1GHz 71 * - for versioned machine types with a version of 9.0 or earlier 72 * In any case, the machine model may override via the cntfrq property. 73 */ 74 #define GTIMER_DEFAULT_HZ 1000000000 75 #define GTIMER_BACKCOMPAT_HZ 62500000 76 77 /* Bit definitions for the v7M CONTROL register */ 78 FIELD(V7M_CONTROL, NPRIV, 0, 1) 79 FIELD(V7M_CONTROL, SPSEL, 1, 1) 80 FIELD(V7M_CONTROL, FPCA, 2, 1) 81 FIELD(V7M_CONTROL, SFPA, 3, 1) 82 83 /* Bit definitions for v7M exception return payload */ 84 FIELD(V7M_EXCRET, ES, 0, 1) 85 FIELD(V7M_EXCRET, RES0, 1, 1) 86 FIELD(V7M_EXCRET, SPSEL, 2, 1) 87 FIELD(V7M_EXCRET, MODE, 3, 1) 88 FIELD(V7M_EXCRET, FTYPE, 4, 1) 89 FIELD(V7M_EXCRET, DCRS, 5, 1) 90 FIELD(V7M_EXCRET, S, 6, 1) 91 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 92 93 /* Minimum value which is a magic number for exception return */ 94 #define EXC_RETURN_MIN_MAGIC 0xff000000 95 /* Minimum number which is a magic number for function or exception return 96 * when using v8M security extension 97 */ 98 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 99 100 /* Bit definitions for DBGWCRn and DBGWCRn_EL1 */ 101 FIELD(DBGWCR, E, 0, 1) 102 FIELD(DBGWCR, PAC, 1, 2) 103 FIELD(DBGWCR, LSC, 3, 2) 104 FIELD(DBGWCR, BAS, 5, 8) 105 FIELD(DBGWCR, HMC, 13, 1) 106 FIELD(DBGWCR, SSC, 14, 2) 107 FIELD(DBGWCR, LBN, 16, 4) 108 FIELD(DBGWCR, WT, 20, 1) 109 FIELD(DBGWCR, MASK, 24, 5) 110 FIELD(DBGWCR, SSCE, 29, 1) 111 112 #define VTCR_NSW (1u << 29) 113 #define VTCR_NSA (1u << 30) 114 #define VSTCR_SW VTCR_NSW 115 #define VSTCR_SA VTCR_NSA 116 117 /* Bit definitions for CPACR (AArch32 only) */ 118 FIELD(CPACR, CP10, 20, 2) 119 FIELD(CPACR, CP11, 22, 2) 120 FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */ 121 FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */ 122 FIELD(CPACR, ASEDIS, 31, 1) 123 124 /* Bit definitions for CPACR_EL1 (AArch64 only) */ 125 FIELD(CPACR_EL1, ZEN, 16, 2) 126 FIELD(CPACR_EL1, FPEN, 20, 2) 127 FIELD(CPACR_EL1, SMEN, 24, 2) 128 FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */ 129 130 /* Bit definitions for HCPTR (AArch32 only) */ 131 FIELD(HCPTR, TCP10, 10, 1) 132 FIELD(HCPTR, TCP11, 11, 1) 133 FIELD(HCPTR, TASE, 15, 1) 134 FIELD(HCPTR, TTA, 20, 1) 135 FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */ 136 FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */ 137 138 /* Bit definitions for CPTR_EL2 (AArch64 only) */ 139 FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */ 140 FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */ 141 FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */ 142 FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */ 143 FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */ 144 FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */ 145 FIELD(CPTR_EL2, TTA, 28, 1) 146 FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */ 147 FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */ 148 149 /* Bit definitions for CPTR_EL3 (AArch64 only) */ 150 FIELD(CPTR_EL3, EZ, 8, 1) 151 FIELD(CPTR_EL3, TFP, 10, 1) 152 FIELD(CPTR_EL3, ESM, 12, 1) 153 FIELD(CPTR_EL3, TTA, 20, 1) 154 FIELD(CPTR_EL3, TAM, 30, 1) 155 FIELD(CPTR_EL3, TCPAC, 31, 1) 156 157 #define MDCR_MTPME (1U << 28) 158 #define MDCR_TDCC (1U << 27) 159 #define MDCR_HLP (1U << 26) /* MDCR_EL2 */ 160 #define MDCR_SCCD (1U << 23) /* MDCR_EL3 */ 161 #define MDCR_HCCD (1U << 23) /* MDCR_EL2 */ 162 #define MDCR_EPMAD (1U << 21) 163 #define MDCR_EDAD (1U << 20) 164 #define MDCR_TTRF (1U << 19) 165 #define MDCR_STE (1U << 18) /* MDCR_EL3 */ 166 #define MDCR_SPME (1U << 17) /* MDCR_EL3 */ 167 #define MDCR_HPMD (1U << 17) /* MDCR_EL2 */ 168 #define MDCR_SDD (1U << 16) 169 #define MDCR_SPD (3U << 14) 170 #define MDCR_TDRA (1U << 11) 171 #define MDCR_TDOSA (1U << 10) 172 #define MDCR_TDA (1U << 9) 173 #define MDCR_TDE (1U << 8) 174 #define MDCR_HPME (1U << 7) 175 #define MDCR_TPM (1U << 6) 176 #define MDCR_TPMCR (1U << 5) 177 #define MDCR_HPMN (0x1fU) 178 179 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 180 #define SDCR_VALID_MASK (MDCR_MTPME | MDCR_TDCC | MDCR_SCCD | \ 181 MDCR_EPMAD | MDCR_EDAD | MDCR_TTRF | \ 182 MDCR_STE | MDCR_SPME | MDCR_SPD) 183 184 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 185 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 186 #define TTBCR_PD0 (1U << 4) 187 #define TTBCR_PD1 (1U << 5) 188 #define TTBCR_EPD0 (1U << 7) 189 #define TTBCR_IRGN0 (3U << 8) 190 #define TTBCR_ORGN0 (3U << 10) 191 #define TTBCR_SH0 (3U << 12) 192 #define TTBCR_T1SZ (3U << 16) 193 #define TTBCR_A1 (1U << 22) 194 #define TTBCR_EPD1 (1U << 23) 195 #define TTBCR_IRGN1 (3U << 24) 196 #define TTBCR_ORGN1 (3U << 26) 197 #define TTBCR_SH1 (1U << 28) 198 #define TTBCR_EAE (1U << 31) 199 200 FIELD(VTCR, T0SZ, 0, 6) 201 FIELD(VTCR, SL0, 6, 2) 202 FIELD(VTCR, IRGN0, 8, 2) 203 FIELD(VTCR, ORGN0, 10, 2) 204 FIELD(VTCR, SH0, 12, 2) 205 FIELD(VTCR, TG0, 14, 2) 206 FIELD(VTCR, PS, 16, 3) 207 FIELD(VTCR, VS, 19, 1) 208 FIELD(VTCR, HA, 21, 1) 209 FIELD(VTCR, HD, 22, 1) 210 FIELD(VTCR, HWU59, 25, 1) 211 FIELD(VTCR, HWU60, 26, 1) 212 FIELD(VTCR, HWU61, 27, 1) 213 FIELD(VTCR, HWU62, 28, 1) 214 FIELD(VTCR, NSW, 29, 1) 215 FIELD(VTCR, NSA, 30, 1) 216 FIELD(VTCR, DS, 32, 1) 217 FIELD(VTCR, SL2, 33, 1) 218 219 #define HCRX_ENAS0 (1ULL << 0) 220 #define HCRX_ENALS (1ULL << 1) 221 #define HCRX_ENASR (1ULL << 2) 222 #define HCRX_FNXS (1ULL << 3) 223 #define HCRX_FGTNXS (1ULL << 4) 224 #define HCRX_SMPME (1ULL << 5) 225 #define HCRX_TALLINT (1ULL << 6) 226 #define HCRX_VINMI (1ULL << 7) 227 #define HCRX_VFNMI (1ULL << 8) 228 #define HCRX_CMOW (1ULL << 9) 229 #define HCRX_MCE2 (1ULL << 10) 230 #define HCRX_MSCEN (1ULL << 11) 231 232 #define HPFAR_NS (1ULL << 63) 233 234 #define HSTR_TTEE (1 << 16) 235 #define HSTR_TJDBX (1 << 17) 236 237 /* 238 * Depending on the value of HCR_EL2.E2H, bits 0 and 1 239 * have different bit definitions, and EL1PCTEN might be 240 * bit 0 or bit 10. We use _E2H1 and _E2H0 suffixes to 241 * disambiguate if necessary. 242 */ 243 FIELD(CNTHCTL, EL0PCTEN_E2H1, 0, 1) 244 FIELD(CNTHCTL, EL0VCTEN_E2H1, 1, 1) 245 FIELD(CNTHCTL, EL1PCTEN_E2H0, 0, 1) 246 FIELD(CNTHCTL, EL1PCEN_E2H0, 1, 1) 247 FIELD(CNTHCTL, EVNTEN, 2, 1) 248 FIELD(CNTHCTL, EVNTDIR, 3, 1) 249 FIELD(CNTHCTL, EVNTI, 4, 4) 250 FIELD(CNTHCTL, EL0VTEN, 8, 1) 251 FIELD(CNTHCTL, EL0PTEN, 9, 1) 252 FIELD(CNTHCTL, EL1PCTEN_E2H1, 10, 1) 253 FIELD(CNTHCTL, EL1PTEN, 11, 1) 254 FIELD(CNTHCTL, ECV, 12, 1) 255 FIELD(CNTHCTL, EL1TVT, 13, 1) 256 FIELD(CNTHCTL, EL1TVCT, 14, 1) 257 FIELD(CNTHCTL, EL1NVPCT, 15, 1) 258 FIELD(CNTHCTL, EL1NVVCT, 16, 1) 259 FIELD(CNTHCTL, EVNTIS, 17, 1) 260 FIELD(CNTHCTL, CNTVMASK, 18, 1) 261 FIELD(CNTHCTL, CNTPMASK, 19, 1) 262 263 /* We use a few fake FSR values for internal purposes in M profile. 264 * M profile cores don't have A/R format FSRs, but currently our 265 * get_phys_addr() code assumes A/R profile and reports failures via 266 * an A/R format FSR value. We then translate that into the proper 267 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 268 * Mostly the FSR values we use for this are those defined for v7PMSA, 269 * since we share some of that codepath. A few kinds of fault are 270 * only for M profile and have no A/R equivalent, though, so we have 271 * to pick a value from the reserved range (which we never otherwise 272 * generate) to use for these. 273 * These values will never be visible to the guest. 274 */ 275 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 276 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 277 278 /** 279 * raise_exception: Raise the specified exception. 280 * Raise a guest exception with the specified value, syndrome register 281 * and target exception level. This should be called from helper functions, 282 * and never returns because we will longjump back up to the CPU main loop. 283 */ 284 G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, 285 uint32_t syndrome, uint32_t target_el); 286 287 /* 288 * Similarly, but also use unwinding to restore cpu state. 289 */ 290 G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, 291 uint32_t syndrome, uint32_t target_el, 292 uintptr_t ra); 293 294 /* 295 * For AArch64, map a given EL to an index in the banked_spsr array. 296 * Note that this mapping and the AArch32 mapping defined in bank_number() 297 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 298 * mandated mapping between each other. 299 */ 300 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 301 { 302 static const unsigned int map[4] = { 303 [1] = BANK_SVC, /* EL1. */ 304 [2] = BANK_HYP, /* EL2. */ 305 [3] = BANK_MON, /* EL3. */ 306 }; 307 assert(el >= 1 && el <= 3); 308 return map[el]; 309 } 310 311 /* Map CPU modes onto saved register banks. */ 312 static inline int bank_number(int mode) 313 { 314 switch (mode) { 315 case ARM_CPU_MODE_USR: 316 case ARM_CPU_MODE_SYS: 317 return BANK_USRSYS; 318 case ARM_CPU_MODE_SVC: 319 return BANK_SVC; 320 case ARM_CPU_MODE_ABT: 321 return BANK_ABT; 322 case ARM_CPU_MODE_UND: 323 return BANK_UND; 324 case ARM_CPU_MODE_IRQ: 325 return BANK_IRQ; 326 case ARM_CPU_MODE_FIQ: 327 return BANK_FIQ; 328 case ARM_CPU_MODE_HYP: 329 return BANK_HYP; 330 case ARM_CPU_MODE_MON: 331 return BANK_MON; 332 } 333 g_assert_not_reached(); 334 } 335 336 /** 337 * r14_bank_number: Map CPU mode onto register bank for r14 338 * 339 * Given an AArch32 CPU mode, return the index into the saved register 340 * banks to use for the R14 (LR) in that mode. This is the same as 341 * bank_number(), except for the special case of Hyp mode, where 342 * R14 is shared with USR and SYS, unlike its R13 and SPSR. 343 * This should be used as the index into env->banked_r14[], and 344 * bank_number() used for the index into env->banked_r13[] and 345 * env->banked_spsr[]. 346 */ 347 static inline int r14_bank_number(int mode) 348 { 349 return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode); 350 } 351 352 void arm_cpu_register(const ARMCPUInfo *info); 353 void aarch64_cpu_register(const ARMCPUInfo *info); 354 355 void register_cp_regs_for_features(ARMCPU *cpu); 356 void init_cpreg_list(ARMCPU *cpu); 357 358 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 359 void arm_translate_init(void); 360 void arm_translate_code(CPUState *cs, TranslationBlock *tb, 361 int *max_insns, vaddr pc, void *host_pc); 362 363 void arm_cpu_register_gdb_commands(ARMCPU *cpu); 364 void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *, 365 GPtrArray *, GPtrArray *); 366 367 void arm_restore_state_to_opc(CPUState *cs, 368 const TranslationBlock *tb, 369 const uint64_t *data); 370 371 #ifdef CONFIG_TCG 372 void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); 373 374 /* Our implementation of TCGCPUOps::cpu_exec_halt */ 375 bool arm_cpu_exec_halt(CPUState *cs); 376 #endif /* CONFIG_TCG */ 377 378 typedef enum ARMFPRounding { 379 FPROUNDING_TIEEVEN, 380 FPROUNDING_POSINF, 381 FPROUNDING_NEGINF, 382 FPROUNDING_ZERO, 383 FPROUNDING_TIEAWAY, 384 FPROUNDING_ODD 385 } ARMFPRounding; 386 387 extern const FloatRoundMode arm_rmode_to_sf_map[6]; 388 389 static inline FloatRoundMode arm_rmode_to_sf(ARMFPRounding rmode) 390 { 391 assert((unsigned)rmode < ARRAY_SIZE(arm_rmode_to_sf_map)); 392 return arm_rmode_to_sf_map[rmode]; 393 } 394 395 /* Return true if the specified exception level is running in AArch64 state. */ 396 static inline bool arm_el_is_aa64(CPUARMState *env, int el) 397 { 398 /* 399 * This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, 400 * and if we're not in EL0 then the state of EL0 isn't well defined.) 401 */ 402 assert(el >= 1 && el <= 3); 403 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); 404 405 /* 406 * The highest exception level is always at the maximum supported 407 * register width, and then lower levels have a register width controlled 408 * by bits in the SCR or HCR registers. 409 */ 410 if (el == 3) { 411 return aa64; 412 } 413 414 if (arm_feature(env, ARM_FEATURE_EL3) && 415 ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) { 416 aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); 417 } 418 419 if (el == 2) { 420 return aa64; 421 } 422 423 if (arm_is_el2_enabled(env)) { 424 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); 425 } 426 427 return aa64; 428 } 429 430 /* 431 * Return the current Exception Level (as per ARMv8; note that this differs 432 * from the ARMv7 Privilege Level). 433 */ 434 static inline int arm_current_el(CPUARMState *env) 435 { 436 if (arm_feature(env, ARM_FEATURE_M)) { 437 return arm_v7m_is_handler_mode(env) || 438 !(env->v7m.control[env->v7m.secure] & 1); 439 } 440 441 if (is_a64(env)) { 442 return extract32(env->pstate, 2, 2); 443 } 444 445 switch (env->uncached_cpsr & 0x1f) { 446 case ARM_CPU_MODE_USR: 447 return 0; 448 case ARM_CPU_MODE_HYP: 449 return 2; 450 case ARM_CPU_MODE_MON: 451 return 3; 452 default: 453 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 454 /* If EL3 is 32-bit then all secure privileged modes run in EL3 */ 455 return 3; 456 } 457 458 return 1; 459 } 460 } 461 462 static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env, 463 bool sctlr_b) 464 { 465 #ifdef CONFIG_USER_ONLY 466 /* 467 * In system mode, BE32 is modelled in line with the 468 * architecture (as word-invariant big-endianness), where loads 469 * and stores are done little endian but from addresses which 470 * are adjusted by XORing with the appropriate constant. So the 471 * endianness to use for the raw data access is not affected by 472 * SCTLR.B. 473 * In user mode, however, we model BE32 as byte-invariant 474 * big-endianness (because user-only code cannot tell the 475 * difference), and so we need to use a data access endianness 476 * that depends on SCTLR.B. 477 */ 478 if (sctlr_b) { 479 return true; 480 } 481 #endif 482 /* In 32bit endianness is determined by looking at CPSR's E bit */ 483 return env->uncached_cpsr & CPSR_E; 484 } 485 486 static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr) 487 { 488 return sctlr & (el ? SCTLR_EE : SCTLR_E0E); 489 } 490 491 /* Return true if the processor is in big-endian mode. */ 492 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) 493 { 494 if (!is_a64(env)) { 495 return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env)); 496 } else { 497 int cur_el = arm_current_el(env); 498 uint64_t sctlr = arm_sctlr(env, cur_el); 499 return arm_cpu_data_is_big_endian_a64(cur_el, sctlr); 500 } 501 } 502 503 #ifdef CONFIG_USER_ONLY 504 static inline bool arm_cpu_bswap_data(CPUARMState *env) 505 { 506 return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env); 507 } 508 #endif 509 510 static inline void aarch64_save_sp(CPUARMState *env, int el) 511 { 512 if (env->pstate & PSTATE_SP) { 513 env->sp_el[el] = env->xregs[31]; 514 } else { 515 env->sp_el[0] = env->xregs[31]; 516 } 517 } 518 519 static inline void aarch64_restore_sp(CPUARMState *env, int el) 520 { 521 if (env->pstate & PSTATE_SP) { 522 env->xregs[31] = env->sp_el[el]; 523 } else { 524 env->xregs[31] = env->sp_el[0]; 525 } 526 } 527 528 static inline void update_spsel(CPUARMState *env, uint32_t imm) 529 { 530 unsigned int cur_el = arm_current_el(env); 531 /* Update PSTATE SPSel bit; this requires us to update the 532 * working stack pointer in xregs[31]. 533 */ 534 if (!((imm ^ env->pstate) & PSTATE_SP)) { 535 return; 536 } 537 aarch64_save_sp(env, cur_el); 538 env->pstate = deposit32(env->pstate, 0, 1, imm); 539 540 /* We rely on illegal updates to SPsel from EL0 to get trapped 541 * at translation time. 542 */ 543 assert(cur_el >= 1 && cur_el <= 3); 544 aarch64_restore_sp(env, cur_el); 545 } 546 547 /* 548 * arm_pamax 549 * @cpu: ARMCPU 550 * 551 * Returns the implementation defined bit-width of physical addresses. 552 * The ARMv8 reference manuals refer to this as PAMax(). 553 */ 554 unsigned int arm_pamax(ARMCPU *cpu); 555 556 /* 557 * round_down_to_parange_index 558 * @bit_size: uint8_t 559 * 560 * Rounds down the bit_size supplied to the first supported ARM physical 561 * address range and returns the index for this. The index is intended to 562 * be used to set ID_AA64MMFR0_EL1's PARANGE bits. 563 */ 564 uint8_t round_down_to_parange_index(uint8_t bit_size); 565 566 /* 567 * round_down_to_parange_bit_size 568 * @bit_size: uint8_t 569 * 570 * Rounds down the bit_size supplied to the first supported ARM physical 571 * address range bit size and returns this. 572 */ 573 uint8_t round_down_to_parange_bit_size(uint8_t bit_size); 574 575 /* Return true if extended addresses are enabled. 576 * This is always the case if our translation regime is 64 bit, 577 * but depends on TTBCR.EAE for 32 bit. 578 */ 579 static inline bool extended_addresses_enabled(CPUARMState *env) 580 { 581 uint64_t tcr = env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 582 if (arm_feature(env, ARM_FEATURE_PMSA) && 583 arm_feature(env, ARM_FEATURE_V8)) { 584 return true; 585 } 586 return arm_el_is_aa64(env, 1) || 587 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr & TTBCR_EAE)); 588 } 589 590 /* Update a QEMU watchpoint based on the information the guest has set in the 591 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 592 */ 593 void hw_watchpoint_update(ARMCPU *cpu, int n); 594 /* Update the QEMU watchpoints for every guest watchpoint. This does a 595 * complete delete-and-reinstate of the QEMU watchpoint list and so is 596 * suitable for use after migration or on reset. 597 */ 598 void hw_watchpoint_update_all(ARMCPU *cpu); 599 /* Update a QEMU breakpoint based on the information the guest has set in the 600 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 601 */ 602 void hw_breakpoint_update(ARMCPU *cpu, int n); 603 /* Update the QEMU breakpoints for every guest breakpoint. This does a 604 * complete delete-and-reinstate of the QEMU breakpoint list and so is 605 * suitable for use after migration or on reset. 606 */ 607 void hw_breakpoint_update_all(ARMCPU *cpu); 608 609 /* Callback function for checking if a breakpoint should trigger. */ 610 bool arm_debug_check_breakpoint(CPUState *cs); 611 612 /* Callback function for checking if a watchpoint should trigger. */ 613 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 614 615 /* Adjust addresses (in BE32 mode) before testing against watchpoint 616 * addresses. 617 */ 618 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 619 620 /* Callback function for when a watchpoint or breakpoint triggers. */ 621 void arm_debug_excp_handler(CPUState *cs); 622 623 #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG) 624 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 625 { 626 return false; 627 } 628 static inline void arm_handle_psci_call(ARMCPU *cpu) 629 { 630 g_assert_not_reached(); 631 } 632 #else 633 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 634 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 635 /* Actually handle a PSCI call */ 636 void arm_handle_psci_call(ARMCPU *cpu); 637 #endif 638 639 /** 640 * arm_clear_exclusive: clear the exclusive monitor 641 * @env: CPU env 642 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 643 */ 644 static inline void arm_clear_exclusive(CPUARMState *env) 645 { 646 env->exclusive_addr = -1; 647 } 648 649 /** 650 * ARMFaultType: type of an ARM MMU fault 651 * This corresponds to the v8A pseudocode's Fault enumeration, 652 * with extensions for QEMU internal conditions. 653 */ 654 typedef enum ARMFaultType { 655 ARMFault_None, 656 ARMFault_AccessFlag, 657 ARMFault_Alignment, 658 ARMFault_Background, 659 ARMFault_Domain, 660 ARMFault_Permission, 661 ARMFault_Translation, 662 ARMFault_AddressSize, 663 ARMFault_SyncExternal, 664 ARMFault_SyncExternalOnWalk, 665 ARMFault_SyncParity, 666 ARMFault_SyncParityOnWalk, 667 ARMFault_AsyncParity, 668 ARMFault_AsyncExternal, 669 ARMFault_Debug, 670 ARMFault_TLBConflict, 671 ARMFault_UnsuppAtomicUpdate, 672 ARMFault_Lockdown, 673 ARMFault_Exclusive, 674 ARMFault_ICacheMaint, 675 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 676 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 677 ARMFault_GPCFOnWalk, 678 ARMFault_GPCFOnOutput, 679 } ARMFaultType; 680 681 typedef enum ARMGPCF { 682 GPCF_None, 683 GPCF_AddressSize, 684 GPCF_Walk, 685 GPCF_EABT, 686 GPCF_Fail, 687 } ARMGPCF; 688 689 /** 690 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 691 * @type: Type of fault 692 * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}. 693 * @level: Table walk level (for translation, access flag and permission faults) 694 * @domain: Domain of the fault address (for non-LPAE CPUs only) 695 * @s2addr: Address that caused a fault at stage 2 696 * @paddr: physical address that caused a fault for gpc 697 * @paddr_space: physical address space that caused a fault for gpc 698 * @stage2: True if we faulted at stage 2 699 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 700 * @s1ns: True if we faulted on a non-secure IPA while in secure state 701 * @ea: True if we should set the EA (external abort type) bit in syndrome 702 */ 703 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 704 struct ARMMMUFaultInfo { 705 ARMFaultType type; 706 ARMGPCF gpcf; 707 target_ulong s2addr; 708 target_ulong paddr; 709 ARMSecuritySpace paddr_space; 710 int level; 711 int domain; 712 bool stage2; 713 bool s1ptw; 714 bool s1ns; 715 bool ea; 716 }; 717 718 /** 719 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 720 * Compare pseudocode EncodeSDFSC(), though unlike that function 721 * we set up a whole FSR-format code including domain field and 722 * putting the high bit of the FSC into bit 10. 723 */ 724 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 725 { 726 uint32_t fsc; 727 728 switch (fi->type) { 729 case ARMFault_None: 730 return 0; 731 case ARMFault_AccessFlag: 732 fsc = fi->level == 1 ? 0x3 : 0x6; 733 break; 734 case ARMFault_Alignment: 735 fsc = 0x1; 736 break; 737 case ARMFault_Permission: 738 fsc = fi->level == 1 ? 0xd : 0xf; 739 break; 740 case ARMFault_Domain: 741 fsc = fi->level == 1 ? 0x9 : 0xb; 742 break; 743 case ARMFault_Translation: 744 fsc = fi->level == 1 ? 0x5 : 0x7; 745 break; 746 case ARMFault_SyncExternal: 747 fsc = 0x8 | (fi->ea << 12); 748 break; 749 case ARMFault_SyncExternalOnWalk: 750 fsc = fi->level == 1 ? 0xc : 0xe; 751 fsc |= (fi->ea << 12); 752 break; 753 case ARMFault_SyncParity: 754 fsc = 0x409; 755 break; 756 case ARMFault_SyncParityOnWalk: 757 fsc = fi->level == 1 ? 0x40c : 0x40e; 758 break; 759 case ARMFault_AsyncParity: 760 fsc = 0x408; 761 break; 762 case ARMFault_AsyncExternal: 763 fsc = 0x406 | (fi->ea << 12); 764 break; 765 case ARMFault_Debug: 766 fsc = 0x2; 767 break; 768 case ARMFault_TLBConflict: 769 fsc = 0x400; 770 break; 771 case ARMFault_Lockdown: 772 fsc = 0x404; 773 break; 774 case ARMFault_Exclusive: 775 fsc = 0x405; 776 break; 777 case ARMFault_ICacheMaint: 778 fsc = 0x4; 779 break; 780 case ARMFault_Background: 781 fsc = 0x0; 782 break; 783 case ARMFault_QEMU_NSCExec: 784 fsc = M_FAKE_FSR_NSC_EXEC; 785 break; 786 case ARMFault_QEMU_SFault: 787 fsc = M_FAKE_FSR_SFAULT; 788 break; 789 default: 790 /* Other faults can't occur in a context that requires a 791 * short-format status code. 792 */ 793 g_assert_not_reached(); 794 } 795 796 fsc |= (fi->domain << 4); 797 return fsc; 798 } 799 800 /** 801 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 802 * Compare pseudocode EncodeLDFSC(), though unlike that function 803 * we fill in also the LPAE bit 9 of a DFSR format. 804 */ 805 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 806 { 807 uint32_t fsc; 808 809 switch (fi->type) { 810 case ARMFault_None: 811 return 0; 812 case ARMFault_AddressSize: 813 assert(fi->level >= -1 && fi->level <= 3); 814 if (fi->level < 0) { 815 fsc = 0b101001; 816 } else { 817 fsc = fi->level; 818 } 819 break; 820 case ARMFault_AccessFlag: 821 assert(fi->level >= 0 && fi->level <= 3); 822 fsc = 0b001000 | fi->level; 823 break; 824 case ARMFault_Permission: 825 assert(fi->level >= 0 && fi->level <= 3); 826 fsc = 0b001100 | fi->level; 827 break; 828 case ARMFault_Translation: 829 assert(fi->level >= -1 && fi->level <= 3); 830 if (fi->level < 0) { 831 fsc = 0b101011; 832 } else { 833 fsc = 0b000100 | fi->level; 834 } 835 break; 836 case ARMFault_SyncExternal: 837 fsc = 0x10 | (fi->ea << 12); 838 break; 839 case ARMFault_SyncExternalOnWalk: 840 assert(fi->level >= -1 && fi->level <= 3); 841 if (fi->level < 0) { 842 fsc = 0b010011; 843 } else { 844 fsc = 0b010100 | fi->level; 845 } 846 fsc |= fi->ea << 12; 847 break; 848 case ARMFault_SyncParity: 849 fsc = 0x18; 850 break; 851 case ARMFault_SyncParityOnWalk: 852 assert(fi->level >= -1 && fi->level <= 3); 853 if (fi->level < 0) { 854 fsc = 0b011011; 855 } else { 856 fsc = 0b011100 | fi->level; 857 } 858 break; 859 case ARMFault_AsyncParity: 860 fsc = 0x19; 861 break; 862 case ARMFault_AsyncExternal: 863 fsc = 0x11 | (fi->ea << 12); 864 break; 865 case ARMFault_Alignment: 866 fsc = 0x21; 867 break; 868 case ARMFault_Debug: 869 fsc = 0x22; 870 break; 871 case ARMFault_TLBConflict: 872 fsc = 0x30; 873 break; 874 case ARMFault_UnsuppAtomicUpdate: 875 fsc = 0x31; 876 break; 877 case ARMFault_Lockdown: 878 fsc = 0x34; 879 break; 880 case ARMFault_Exclusive: 881 fsc = 0x35; 882 break; 883 case ARMFault_GPCFOnWalk: 884 assert(fi->level >= -1 && fi->level <= 3); 885 if (fi->level < 0) { 886 fsc = 0b100011; 887 } else { 888 fsc = 0b100100 | fi->level; 889 } 890 break; 891 case ARMFault_GPCFOnOutput: 892 fsc = 0b101000; 893 break; 894 default: 895 /* Other faults can't occur in a context that requires a 896 * long-format status code. 897 */ 898 g_assert_not_reached(); 899 } 900 901 fsc |= 1 << 9; 902 return fsc; 903 } 904 905 static inline bool arm_extabort_type(MemTxResult result) 906 { 907 /* The EA bit in syndromes and fault status registers is an 908 * IMPDEF classification of external aborts. ARM implementations 909 * usually use this to indicate AXI bus Decode error (0) or 910 * Slave error (1); in QEMU we follow that. 911 */ 912 return result != MEMTX_DECODE_ERROR; 913 } 914 915 #ifdef CONFIG_USER_ONLY 916 void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, 917 MMUAccessType access_type, 918 bool maperr, uintptr_t ra); 919 void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, 920 MMUAccessType access_type, uintptr_t ra); 921 #else 922 bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, 923 MMUAccessType access_type, int mmu_idx, 924 MemOp memop, int size, bool probe, uintptr_t ra); 925 #endif 926 927 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 928 { 929 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 930 } 931 932 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 933 { 934 if (arm_feature(env, ARM_FEATURE_M)) { 935 return mmu_idx | ARM_MMU_IDX_M; 936 } else { 937 return mmu_idx | ARM_MMU_IDX_A; 938 } 939 } 940 941 static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) 942 { 943 /* AArch64 is always a-profile. */ 944 return mmu_idx | ARM_MMU_IDX_A; 945 } 946 947 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); 948 949 /* Return the MMU index for a v7M CPU in the specified security state */ 950 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); 951 952 /* 953 * Return true if the stage 1 translation regime is using LPAE 954 * format page tables 955 */ 956 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 957 958 /* Raise a data fault alignment exception for the specified virtual address */ 959 G_NORETURN void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 960 MMUAccessType access_type, 961 int mmu_idx, uintptr_t retaddr); 962 963 #ifndef CONFIG_USER_ONLY 964 /* arm_cpu_do_transaction_failed: handle a memory system error response 965 * (eg "no device/memory present at address") by raising an external abort 966 * exception 967 */ 968 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 969 vaddr addr, unsigned size, 970 MMUAccessType access_type, 971 int mmu_idx, MemTxAttrs attrs, 972 MemTxResult response, uintptr_t retaddr); 973 #endif 974 975 /* Call any registered EL change hooks */ 976 static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) 977 { 978 ARMELChangeHook *hook, *next; 979 QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { 980 hook->hook(cpu, hook->opaque); 981 } 982 } 983 static inline void arm_call_el_change_hook(ARMCPU *cpu) 984 { 985 ARMELChangeHook *hook, *next; 986 QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { 987 hook->hook(cpu, hook->opaque); 988 } 989 } 990 991 /* 992 * Return true if this address translation regime has two ranges. 993 * Note that this will not return the correct answer for AArch32 994 * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is 995 * never called from a context where EL3 can be AArch32. (The 996 * correct return value for ARMMMUIdx_E3 would be different for 997 * that case, so we can't just make the function return the 998 * correct value anyway; we would need an extra "bool e3_is_aarch32" 999 * argument which all the current callsites would pass as 'false'.) 1000 */ 1001 static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) 1002 { 1003 switch (mmu_idx) { 1004 case ARMMMUIdx_Stage1_E0: 1005 case ARMMMUIdx_Stage1_E1: 1006 case ARMMMUIdx_Stage1_E1_PAN: 1007 case ARMMMUIdx_E10_0: 1008 case ARMMMUIdx_E10_1: 1009 case ARMMMUIdx_E10_1_PAN: 1010 case ARMMMUIdx_E20_0: 1011 case ARMMMUIdx_E20_2: 1012 case ARMMMUIdx_E20_2_PAN: 1013 return true; 1014 default: 1015 return false; 1016 } 1017 } 1018 1019 static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) 1020 { 1021 switch (mmu_idx) { 1022 case ARMMMUIdx_Stage1_E1_PAN: 1023 case ARMMMUIdx_E10_1_PAN: 1024 case ARMMMUIdx_E20_2_PAN: 1025 case ARMMMUIdx_E30_3_PAN: 1026 return true; 1027 default: 1028 return false; 1029 } 1030 } 1031 1032 static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) 1033 { 1034 return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; 1035 } 1036 1037 /* Return the exception level which controls this address translation regime */ 1038 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 1039 { 1040 switch (mmu_idx) { 1041 case ARMMMUIdx_E20_0: 1042 case ARMMMUIdx_E20_2: 1043 case ARMMMUIdx_E20_2_PAN: 1044 case ARMMMUIdx_Stage2: 1045 case ARMMMUIdx_Stage2_S: 1046 case ARMMMUIdx_E2: 1047 return 2; 1048 case ARMMMUIdx_E3: 1049 case ARMMMUIdx_E30_0: 1050 case ARMMMUIdx_E30_3_PAN: 1051 return 3; 1052 case ARMMMUIdx_E10_0: 1053 case ARMMMUIdx_Stage1_E0: 1054 case ARMMMUIdx_Stage1_E1: 1055 case ARMMMUIdx_Stage1_E1_PAN: 1056 case ARMMMUIdx_E10_1: 1057 case ARMMMUIdx_E10_1_PAN: 1058 case ARMMMUIdx_MPrivNegPri: 1059 case ARMMMUIdx_MUserNegPri: 1060 case ARMMMUIdx_MPriv: 1061 case ARMMMUIdx_MUser: 1062 case ARMMMUIdx_MSPrivNegPri: 1063 case ARMMMUIdx_MSUserNegPri: 1064 case ARMMMUIdx_MSPriv: 1065 case ARMMMUIdx_MSUser: 1066 return 1; 1067 default: 1068 g_assert_not_reached(); 1069 } 1070 } 1071 1072 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 1073 { 1074 switch (mmu_idx) { 1075 case ARMMMUIdx_E10_0: 1076 case ARMMMUIdx_E20_0: 1077 case ARMMMUIdx_E30_0: 1078 case ARMMMUIdx_Stage1_E0: 1079 case ARMMMUIdx_MUser: 1080 case ARMMMUIdx_MSUser: 1081 case ARMMMUIdx_MUserNegPri: 1082 case ARMMMUIdx_MSUserNegPri: 1083 return true; 1084 default: 1085 return false; 1086 } 1087 } 1088 1089 /* Return the SCTLR value which controls this address translation regime */ 1090 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 1091 { 1092 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 1093 } 1094 1095 /* 1096 * These are the fields in VTCR_EL2 which affect both the Secure stage 2 1097 * and the Non-Secure stage 2 translation regimes (and hence which are 1098 * not present in VSTCR_EL2). 1099 */ 1100 #define VTCR_SHARED_FIELD_MASK \ 1101 (R_VTCR_IRGN0_MASK | R_VTCR_ORGN0_MASK | R_VTCR_SH0_MASK | \ 1102 R_VTCR_PS_MASK | R_VTCR_VS_MASK | R_VTCR_HA_MASK | R_VTCR_HD_MASK | \ 1103 R_VTCR_DS_MASK) 1104 1105 /* Return the value of the TCR controlling this translation regime */ 1106 static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 1107 { 1108 if (mmu_idx == ARMMMUIdx_Stage2) { 1109 return env->cp15.vtcr_el2; 1110 } 1111 if (mmu_idx == ARMMMUIdx_Stage2_S) { 1112 /* 1113 * Secure stage 2 shares fields from VTCR_EL2. We merge those 1114 * in with the VSTCR_EL2 value to synthesize a single VTCR_EL2 format 1115 * value so the callers don't need to special case this. 1116 * 1117 * If a future architecture change defines bits in VSTCR_EL2 that 1118 * overlap with these VTCR_EL2 fields we may need to revisit this. 1119 */ 1120 uint64_t v = env->cp15.vstcr_el2 & ~VTCR_SHARED_FIELD_MASK; 1121 v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; 1122 return v; 1123 } 1124 return env->cp15.tcr_el[regime_el(env, mmu_idx)]; 1125 } 1126 1127 /* Return true if the translation regime is using LPAE format page tables */ 1128 static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 1129 { 1130 int el = regime_el(env, mmu_idx); 1131 if (el == 2 || arm_el_is_aa64(env, el)) { 1132 return true; 1133 } 1134 if (arm_feature(env, ARM_FEATURE_PMSA) && 1135 arm_feature(env, ARM_FEATURE_V8)) { 1136 return true; 1137 } 1138 if (arm_feature(env, ARM_FEATURE_LPAE) 1139 && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) { 1140 return true; 1141 } 1142 return false; 1143 } 1144 1145 /** 1146 * arm_num_brps: Return number of implemented breakpoints. 1147 * Note that the ID register BRPS field is "number of bps - 1", 1148 * and we return the actual number of breakpoints. 1149 */ 1150 static inline int arm_num_brps(ARMCPU *cpu) 1151 { 1152 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1153 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; 1154 } else { 1155 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; 1156 } 1157 } 1158 1159 /** 1160 * arm_num_wrps: Return number of implemented watchpoints. 1161 * Note that the ID register WRPS field is "number of wps - 1", 1162 * and we return the actual number of watchpoints. 1163 */ 1164 static inline int arm_num_wrps(ARMCPU *cpu) 1165 { 1166 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1167 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; 1168 } else { 1169 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; 1170 } 1171 } 1172 1173 /** 1174 * arm_num_ctx_cmps: Return number of implemented context comparators. 1175 * Note that the ID register CTX_CMPS field is "number of cmps - 1", 1176 * and we return the actual number of comparators. 1177 */ 1178 static inline int arm_num_ctx_cmps(ARMCPU *cpu) 1179 { 1180 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 1181 return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; 1182 } else { 1183 return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; 1184 } 1185 } 1186 1187 /** 1188 * v7m_using_psp: Return true if using process stack pointer 1189 * Return true if the CPU is currently using the process stack 1190 * pointer, or false if it is using the main stack pointer. 1191 */ 1192 static inline bool v7m_using_psp(CPUARMState *env) 1193 { 1194 /* Handler mode always uses the main stack; for thread mode 1195 * the CONTROL.SPSEL bit determines the answer. 1196 * Note that in v7M it is not possible to be in Handler mode with 1197 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 1198 */ 1199 return !arm_v7m_is_handler_mode(env) && 1200 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 1201 } 1202 1203 /** 1204 * v7m_sp_limit: Return SP limit for current CPU state 1205 * Return the SP limit value for the current CPU security state 1206 * and stack pointer. 1207 */ 1208 static inline uint32_t v7m_sp_limit(CPUARMState *env) 1209 { 1210 if (v7m_using_psp(env)) { 1211 return env->v7m.psplim[env->v7m.secure]; 1212 } else { 1213 return env->v7m.msplim[env->v7m.secure]; 1214 } 1215 } 1216 1217 /** 1218 * v7m_cpacr_pass: 1219 * Return true if the v7M CPACR permits access to the FPU for the specified 1220 * security state and privilege level. 1221 */ 1222 static inline bool v7m_cpacr_pass(CPUARMState *env, 1223 bool is_secure, bool is_priv) 1224 { 1225 switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) { 1226 case 0: 1227 case 2: /* UNPREDICTABLE: we treat like 0 */ 1228 return false; 1229 case 1: 1230 return is_priv; 1231 case 3: 1232 return true; 1233 default: 1234 g_assert_not_reached(); 1235 } 1236 } 1237 1238 /** 1239 * aarch32_mode_name(): Return name of the AArch32 CPU mode 1240 * @psr: Program Status Register indicating CPU mode 1241 * 1242 * Returns, for debug logging purposes, a printable representation 1243 * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by 1244 * the low bits of the specified PSR. 1245 */ 1246 static inline const char *aarch32_mode_name(uint32_t psr) 1247 { 1248 static const char cpu_mode_names[16][4] = { 1249 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt", 1250 "???", "???", "hyp", "und", "???", "???", "???", "sys" 1251 }; 1252 1253 return cpu_mode_names[psr & 0xf]; 1254 } 1255 1256 /** 1257 * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request 1258 * 1259 * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following 1260 * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. 1261 * Must be called with the BQL held. 1262 */ 1263 void arm_cpu_update_virq(ARMCPU *cpu); 1264 1265 /** 1266 * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request 1267 * 1268 * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following 1269 * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. 1270 * Must be called with the BQL held. 1271 */ 1272 void arm_cpu_update_vfiq(ARMCPU *cpu); 1273 1274 /** 1275 * arm_cpu_update_vinmi: Update CPU_INTERRUPT_VINMI bit in cs->interrupt_request 1276 * 1277 * Update the CPU_INTERRUPT_VINMI bit in cs->interrupt_request, following 1278 * a change to either the input VNMI line from the GIC or the HCRX_EL2.VINMI. 1279 * Must be called with the BQL held. 1280 */ 1281 void arm_cpu_update_vinmi(ARMCPU *cpu); 1282 1283 /** 1284 * arm_cpu_update_vfnmi: Update CPU_INTERRUPT_VFNMI bit in cs->interrupt_request 1285 * 1286 * Update the CPU_INTERRUPT_VFNMI bit in cs->interrupt_request, following 1287 * a change to the HCRX_EL2.VFNMI. 1288 * Must be called with the BQL held. 1289 */ 1290 void arm_cpu_update_vfnmi(ARMCPU *cpu); 1291 1292 /** 1293 * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit 1294 * 1295 * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request, 1296 * following a change to the HCR_EL2.VSE bit. 1297 */ 1298 void arm_cpu_update_vserr(ARMCPU *cpu); 1299 1300 /** 1301 * arm_mmu_idx_el: 1302 * @env: The cpu environment 1303 * @el: The EL to use. 1304 * 1305 * Return the full ARMMMUIdx for the translation regime for EL. 1306 */ 1307 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el); 1308 1309 /** 1310 * arm_mmu_idx: 1311 * @env: The cpu environment 1312 * 1313 * Return the full ARMMMUIdx for the current translation regime. 1314 */ 1315 ARMMMUIdx arm_mmu_idx(CPUARMState *env); 1316 1317 /** 1318 * arm_stage1_mmu_idx: 1319 * @env: The cpu environment 1320 * 1321 * Return the ARMMMUIdx for the stage1 traversal for the current regime. 1322 */ 1323 #ifdef CONFIG_USER_ONLY 1324 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 1325 { 1326 return ARMMMUIdx_Stage1_E0; 1327 } 1328 static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 1329 { 1330 return ARMMMUIdx_Stage1_E0; 1331 } 1332 #else 1333 ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); 1334 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); 1335 #endif 1336 1337 /** 1338 * arm_mmu_idx_is_stage1_of_2: 1339 * @mmu_idx: The ARMMMUIdx to test 1340 * 1341 * Return true if @mmu_idx is a NOTLB mmu_idx that is the 1342 * first stage of a two stage regime. 1343 */ 1344 static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) 1345 { 1346 switch (mmu_idx) { 1347 case ARMMMUIdx_Stage1_E0: 1348 case ARMMMUIdx_Stage1_E1: 1349 case ARMMMUIdx_Stage1_E1_PAN: 1350 return true; 1351 default: 1352 return false; 1353 } 1354 } 1355 1356 static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, 1357 const ARMISARegisters *id) 1358 { 1359 uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV; 1360 1361 if ((features >> ARM_FEATURE_V4T) & 1) { 1362 valid |= CPSR_T; 1363 } 1364 if ((features >> ARM_FEATURE_V5) & 1) { 1365 valid |= CPSR_Q; /* V5TE in reality*/ 1366 } 1367 if ((features >> ARM_FEATURE_V6) & 1) { 1368 valid |= CPSR_E | CPSR_GE; 1369 } 1370 if ((features >> ARM_FEATURE_THUMB2) & 1) { 1371 valid |= CPSR_IT; 1372 } 1373 if (isar_feature_aa32_jazelle(id)) { 1374 valid |= CPSR_J; 1375 } 1376 if (isar_feature_aa32_pan(id)) { 1377 valid |= CPSR_PAN; 1378 } 1379 if (isar_feature_aa32_dit(id)) { 1380 valid |= CPSR_DIT; 1381 } 1382 if (isar_feature_aa32_ssbs(id)) { 1383 valid |= CPSR_SSBS; 1384 } 1385 1386 return valid; 1387 } 1388 1389 static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id) 1390 { 1391 uint32_t valid; 1392 1393 valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV; 1394 if (isar_feature_aa64_bti(id)) { 1395 valid |= PSTATE_BTYPE; 1396 } 1397 if (isar_feature_aa64_pan(id)) { 1398 valid |= PSTATE_PAN; 1399 } 1400 if (isar_feature_aa64_uao(id)) { 1401 valid |= PSTATE_UAO; 1402 } 1403 if (isar_feature_aa64_dit(id)) { 1404 valid |= PSTATE_DIT; 1405 } 1406 if (isar_feature_aa64_ssbs(id)) { 1407 valid |= PSTATE_SSBS; 1408 } 1409 if (isar_feature_aa64_mte(id)) { 1410 valid |= PSTATE_TCO; 1411 } 1412 if (isar_feature_aa64_nmi(id)) { 1413 valid |= PSTATE_ALLINT; 1414 } 1415 1416 return valid; 1417 } 1418 1419 /* Granule size (i.e. page size) */ 1420 typedef enum ARMGranuleSize { 1421 /* Same order as TG0 encoding */ 1422 Gran4K, 1423 Gran64K, 1424 Gran16K, 1425 GranInvalid, 1426 } ARMGranuleSize; 1427 1428 /** 1429 * arm_granule_bits: Return address size of the granule in bits 1430 * 1431 * Return the address size of the granule in bits. This corresponds 1432 * to the pseudocode TGxGranuleBits(). 1433 */ 1434 static inline int arm_granule_bits(ARMGranuleSize gran) 1435 { 1436 switch (gran) { 1437 case Gran64K: 1438 return 16; 1439 case Gran16K: 1440 return 14; 1441 case Gran4K: 1442 return 12; 1443 default: 1444 g_assert_not_reached(); 1445 } 1446 } 1447 1448 /* 1449 * Parameters of a given virtual address, as extracted from the 1450 * translation control register (TCR) for a given regime. 1451 */ 1452 typedef struct ARMVAParameters { 1453 unsigned tsz : 8; 1454 unsigned ps : 3; 1455 unsigned sh : 2; 1456 unsigned select : 1; 1457 bool tbi : 1; 1458 bool epd : 1; 1459 bool hpd : 1; 1460 bool tsz_oob : 1; /* tsz has been clamped to legal range */ 1461 bool ds : 1; 1462 bool ha : 1; 1463 bool hd : 1; 1464 ARMGranuleSize gran : 2; 1465 } ARMVAParameters; 1466 1467 /** 1468 * aa64_va_parameters: Return parameters for an AArch64 virtual address 1469 * @env: CPU 1470 * @va: virtual address to look up 1471 * @mmu_idx: determines translation regime to use 1472 * @data: true if this is a data access 1473 * @el1_is_aa32: true if we are asking about stage 2 when EL1 is AArch32 1474 * (ignored if @mmu_idx is for a stage 1 regime; only affects tsz/tsz_oob) 1475 */ 1476 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 1477 ARMMMUIdx mmu_idx, bool data, 1478 bool el1_is_aa32); 1479 1480 int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 1481 int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx); 1482 int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx); 1483 1484 /* Determine if allocation tags are available. */ 1485 static inline bool allocation_tag_access_enabled(CPUARMState *env, int el, 1486 uint64_t sctlr) 1487 { 1488 if (el < 3 1489 && arm_feature(env, ARM_FEATURE_EL3) 1490 && !(env->cp15.scr_el3 & SCR_ATA)) { 1491 return false; 1492 } 1493 if (el < 2 && arm_is_el2_enabled(env)) { 1494 uint64_t hcr = arm_hcr_el2_eff(env); 1495 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 1496 return false; 1497 } 1498 } 1499 sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA); 1500 return sctlr != 0; 1501 } 1502 1503 #ifndef CONFIG_USER_ONLY 1504 1505 /* Security attributes for an address, as returned by v8m_security_lookup. */ 1506 typedef struct V8M_SAttributes { 1507 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 1508 bool ns; 1509 bool nsc; 1510 uint8_t sregion; 1511 bool srvalid; 1512 uint8_t iregion; 1513 bool irvalid; 1514 } V8M_SAttributes; 1515 1516 void v8m_security_lookup(CPUARMState *env, uint32_t address, 1517 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1518 bool secure, V8M_SAttributes *sattrs); 1519 1520 /* Cacheability and shareability attributes for a memory access */ 1521 typedef struct ARMCacheAttrs { 1522 /* 1523 * If is_s2_format is true, attrs is the S2 descriptor bits [5:2] 1524 * Otherwise, attrs is the same as the MAIR_EL1 8-bit format 1525 */ 1526 unsigned int attrs:8; 1527 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 1528 bool is_s2_format:1; 1529 } ARMCacheAttrs; 1530 1531 /* Fields that are valid upon success. */ 1532 typedef struct GetPhysAddrResult { 1533 CPUTLBEntryFull f; 1534 ARMCacheAttrs cacheattrs; 1535 } GetPhysAddrResult; 1536 1537 /** 1538 * get_phys_addr: get the physical address for a virtual address 1539 * @env: CPUARMState 1540 * @address: virtual address to get physical address for 1541 * @access_type: 0 for read, 1 for write, 2 for execute 1542 * @memop: memory operation feeding this access, or 0 for none 1543 * @mmu_idx: MMU index indicating required translation regime 1544 * @result: set on translation success. 1545 * @fi: set to fault info if the translation fails 1546 * 1547 * Find the physical address corresponding to the given virtual address, 1548 * by doing a translation table walk on MMU based systems or using the 1549 * MPU state on MPU based systems. 1550 * 1551 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 1552 * prot and page_size may not be filled in, and the populated fsr value provides 1553 * information on why the translation aborted, in the format of a 1554 * DFSR/IFSR fault register, with the following caveats: 1555 * * we honour the short vs long DFSR format differences. 1556 * * the WnR bit is never set (the caller must do this). 1557 * * for PSMAv5 based systems we don't bother to return a full FSR format 1558 * value. 1559 */ 1560 bool get_phys_addr(CPUARMState *env, vaddr address, 1561 MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, 1562 GetPhysAddrResult *result, ARMMMUFaultInfo *fi) 1563 __attribute__((nonnull)); 1564 1565 /** 1566 * get_phys_addr_with_space_nogpc: get the physical address for a virtual 1567 * address 1568 * @env: CPUARMState 1569 * @address: virtual address to get physical address for 1570 * @access_type: 0 for read, 1 for write, 2 for execute 1571 * @memop: memory operation feeding this access, or 0 for none 1572 * @mmu_idx: MMU index indicating required translation regime 1573 * @space: security space for the access 1574 * @result: set on translation success. 1575 * @fi: set to fault info if the translation fails 1576 * 1577 * Similar to get_phys_addr, but use the given security space and don't perform 1578 * a Granule Protection Check on the resulting address. 1579 */ 1580 bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, 1581 MMUAccessType access_type, MemOp memop, 1582 ARMMMUIdx mmu_idx, ARMSecuritySpace space, 1583 GetPhysAddrResult *result, 1584 ARMMMUFaultInfo *fi) 1585 __attribute__((nonnull)); 1586 1587 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 1588 MMUAccessType access_type, ARMMMUIdx mmu_idx, 1589 bool is_secure, GetPhysAddrResult *result, 1590 ARMMMUFaultInfo *fi, uint32_t *mregion); 1591 1592 void arm_log_exception(CPUState *cs); 1593 1594 #endif /* !CONFIG_USER_ONLY */ 1595 1596 /* 1597 * SVE predicates are 1/8 the size of SVE vectors, and cannot use 1598 * the same simd_desc() encoding due to restrictions on size. 1599 * Use these instead. 1600 */ 1601 FIELD(PREDDESC, OPRSZ, 0, 6) 1602 FIELD(PREDDESC, ESZ, 6, 2) 1603 FIELD(PREDDESC, DATA, 8, 24) 1604 1605 /* 1606 * The SVE simd_data field, for memory ops, contains either 1607 * rd (5 bits) or a shift count (2 bits). 1608 */ 1609 #define SVE_MTEDESC_SHIFT 5 1610 1611 /* Bits within a descriptor passed to the helper_mte_check* functions. */ 1612 FIELD(MTEDESC, MIDX, 0, 4) 1613 FIELD(MTEDESC, TBI, 4, 2) 1614 FIELD(MTEDESC, TCMA, 6, 2) 1615 FIELD(MTEDESC, WRITE, 8, 1) 1616 FIELD(MTEDESC, ALIGN, 9, 3) 1617 FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ 1618 1619 bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); 1620 uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); 1621 1622 /** 1623 * mte_mops_probe: Check where the next MTE failure is for a FEAT_MOPS operation 1624 * @env: CPU env 1625 * @ptr: start address of memory region (dirty pointer) 1626 * @size: length of region (guaranteed not to cross a page boundary) 1627 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1628 * Returns: the size of the region that can be copied without hitting 1629 * an MTE tag failure 1630 * 1631 * Note that we assume that the caller has already checked the TBI 1632 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1633 * required. 1634 */ 1635 uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size, 1636 uint32_t desc); 1637 1638 /** 1639 * mte_mops_probe_rev: Check where the next MTE failure is for a FEAT_MOPS 1640 * operation going in the reverse direction 1641 * @env: CPU env 1642 * @ptr: *end* address of memory region (dirty pointer) 1643 * @size: length of region (guaranteed not to cross a page boundary) 1644 * @desc: MTEDESC descriptor word (0 means no MTE checks) 1645 * Returns: the size of the region that can be copied without hitting 1646 * an MTE tag failure 1647 * 1648 * Note that we assume that the caller has already checked the TBI 1649 * and TCMA bits with mte_checks_needed() and an MTE check is definitely 1650 * required. 1651 */ 1652 uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size, 1653 uint32_t desc); 1654 1655 /** 1656 * mte_check_fail: Record an MTE tag check failure 1657 * @env: CPU env 1658 * @desc: MTEDESC descriptor word 1659 * @dirty_ptr: Failing dirty address 1660 * @ra: TCG retaddr 1661 * 1662 * This may never return (if the MTE tag checks are configured to fault). 1663 */ 1664 void mte_check_fail(CPUARMState *env, uint32_t desc, 1665 uint64_t dirty_ptr, uintptr_t ra); 1666 1667 /** 1668 * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation 1669 * @env: CPU env 1670 * @dirty_ptr: Start address of memory region (dirty pointer) 1671 * @size: length of region (guaranteed not to cross page boundary) 1672 * @desc: MTEDESC descriptor word 1673 */ 1674 void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size, 1675 uint32_t desc); 1676 1677 static inline int allocation_tag_from_addr(uint64_t ptr) 1678 { 1679 return extract64(ptr, 56, 4); 1680 } 1681 1682 static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag) 1683 { 1684 return deposit64(ptr, 56, 4, rtag); 1685 } 1686 1687 /* Return true if tbi bits mean that the access is checked. */ 1688 static inline bool tbi_check(uint32_t desc, int bit55) 1689 { 1690 return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1; 1691 } 1692 1693 /* Return true if tcma bits mean that the access is unchecked. */ 1694 static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag) 1695 { 1696 /* 1697 * We had extracted bit55 and ptr_tag for other reasons, so fold 1698 * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test. 1699 */ 1700 bool match = ((ptr_tag + bit55) & 0xf) == 0; 1701 bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1; 1702 return tcma && match; 1703 } 1704 1705 /* 1706 * For TBI, ideally, we would do nothing. Proper behaviour on fault is 1707 * for the tag to be present in the FAR_ELx register. But for user-only 1708 * mode, we do not have a TLB with which to implement this, so we must 1709 * remove the top byte. 1710 */ 1711 static inline uint64_t useronly_clean_ptr(uint64_t ptr) 1712 { 1713 #ifdef CONFIG_USER_ONLY 1714 /* TBI0 is known to be enabled, while TBI1 is disabled. */ 1715 ptr &= sextract64(ptr, 0, 56); 1716 #endif 1717 return ptr; 1718 } 1719 1720 static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr) 1721 { 1722 #ifdef CONFIG_USER_ONLY 1723 int64_t clean_ptr = sextract64(ptr, 0, 56); 1724 if (tbi_check(desc, clean_ptr < 0)) { 1725 ptr = clean_ptr; 1726 } 1727 #endif 1728 return ptr; 1729 } 1730 1731 /* Values for M-profile PSR.ECI for MVE insns */ 1732 enum MVEECIState { 1733 ECI_NONE = 0, /* No completed beats */ 1734 ECI_A0 = 1, /* Completed: A0 */ 1735 ECI_A0A1 = 2, /* Completed: A0, A1 */ 1736 /* 3 is reserved */ 1737 ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */ 1738 ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */ 1739 /* All other values reserved */ 1740 }; 1741 1742 /* Definitions for the PMU registers */ 1743 #define PMCRN_MASK 0xf800 1744 #define PMCRN_SHIFT 11 1745 #define PMCRLP 0x80 1746 #define PMCRLC 0x40 1747 #define PMCRDP 0x20 1748 #define PMCRX 0x10 1749 #define PMCRD 0x8 1750 #define PMCRC 0x4 1751 #define PMCRP 0x2 1752 #define PMCRE 0x1 1753 /* 1754 * Mask of PMCR bits writable by guest (not including WO bits like C, P, 1755 * which can be written as 1 to trigger behaviour but which stay RAZ). 1756 */ 1757 #define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1758 1759 #define PMXEVTYPER_P 0x80000000 1760 #define PMXEVTYPER_U 0x40000000 1761 #define PMXEVTYPER_NSK 0x20000000 1762 #define PMXEVTYPER_NSU 0x10000000 1763 #define PMXEVTYPER_NSH 0x08000000 1764 #define PMXEVTYPER_M 0x04000000 1765 #define PMXEVTYPER_MT 0x02000000 1766 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1767 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1768 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1769 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1770 PMXEVTYPER_EVTCOUNT) 1771 1772 #define PMCCFILTR 0xf8000000 1773 #define PMCCFILTR_M PMXEVTYPER_M 1774 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1775 1776 static inline uint32_t pmu_num_counters(CPUARMState *env) 1777 { 1778 ARMCPU *cpu = env_archcpu(env); 1779 1780 return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT; 1781 } 1782 1783 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1784 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1785 { 1786 return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1); 1787 } 1788 1789 #ifdef TARGET_AARCH64 1790 GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); 1791 int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); 1792 int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); 1793 int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); 1794 int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); 1795 int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); 1796 int aarch64_gdb_set_pauth_reg(CPUState *cs, uint8_t *buf, int reg); 1797 int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg); 1798 int aarch64_gdb_set_tag_ctl_reg(CPUState *cs, uint8_t *buf, int reg); 1799 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp); 1800 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp); 1801 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp); 1802 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp); 1803 void aarch64_max_tcg_initfn(Object *obj); 1804 void aarch64_add_pauth_properties(Object *obj); 1805 void aarch64_add_sve_properties(Object *obj); 1806 void aarch64_add_sme_properties(Object *obj); 1807 #endif 1808 1809 /* Read the CONTROL register as the MRS instruction would. */ 1810 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure); 1811 1812 /* 1813 * Return a pointer to the location where we currently store the 1814 * stack pointer for the requested security state and thread mode. 1815 * This pointer will become invalid if the CPU state is updated 1816 * such that the stack pointers are switched around (eg changing 1817 * the SPSEL control bit). 1818 */ 1819 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, 1820 bool threadmode, bool spsel); 1821 1822 bool el_is_in_host(CPUARMState *env, int el); 1823 1824 void aa32_max_features(ARMCPU *cpu); 1825 int exception_target_el(CPUARMState *env); 1826 bool arm_singlestep_active(CPUARMState *env); 1827 bool arm_generate_debug_exceptions(CPUARMState *env); 1828 1829 /** 1830 * pauth_ptr_mask: 1831 * @param: parameters defining the MMU setup 1832 * 1833 * Return a mask of the address bits that contain the authentication code, 1834 * given the MMU config defined by @param. 1835 */ 1836 static inline uint64_t pauth_ptr_mask(ARMVAParameters param) 1837 { 1838 int bot_pac_bit = 64 - param.tsz; 1839 int top_pac_bit = 64 - 8 * param.tbi; 1840 1841 return MAKE_64BIT_MASK(bot_pac_bit, top_pac_bit - bot_pac_bit); 1842 } 1843 1844 /* Add the cpreg definitions for debug related system registers */ 1845 void define_debug_regs(ARMCPU *cpu); 1846 1847 /* Add the cpreg definitions for TLBI instructions */ 1848 void define_tlb_insn_regs(ARMCPU *cpu); 1849 1850 /* Effective value of MDCR_EL2 */ 1851 static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) 1852 { 1853 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 1854 } 1855 1856 /* Powers of 2 for sve_vq_map et al. */ 1857 #define SVE_VQ_POW2_MAP \ 1858 ((1 << (1 - 1)) | (1 << (2 - 1)) | \ 1859 (1 << (4 - 1)) | (1 << (8 - 1)) | (1 << (16 - 1))) 1860 1861 /* 1862 * Return true if it is possible to take a fine-grained-trap to EL2. 1863 */ 1864 static inline bool arm_fgt_active(CPUARMState *env, int el) 1865 { 1866 /* 1867 * The Arm ARM only requires the "{E2H,TGE} != {1,1}" test for traps 1868 * that can affect EL0, but it is harmless to do the test also for 1869 * traps on registers that are only accessible at EL1 because if the test 1870 * returns true then we can't be executing at EL1 anyway. 1871 * FGT traps only happen when EL2 is enabled and EL1 is AArch64; 1872 * traps from AArch32 only happen for the EL0 is AArch32 case. 1873 */ 1874 return cpu_isar_feature(aa64_fgt, env_archcpu(env)) && 1875 el < 2 && arm_is_el2_enabled(env) && 1876 arm_el_is_aa64(env, 1) && 1877 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE) && 1878 (!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN)); 1879 } 1880 1881 void assert_hflags_rebuild_correctly(CPUARMState *env); 1882 1883 /* 1884 * Although the ARM implementation of hardware assisted debugging 1885 * allows for different breakpoints per-core, the current GDB 1886 * interface treats them as a global pool of registers (which seems to 1887 * be the case for x86, ppc and s390). As a result we store one copy 1888 * of registers which is used for all active cores. 1889 * 1890 * Write access is serialised by virtue of the GDB protocol which 1891 * updates things. Read access (i.e. when the values are copied to the 1892 * vCPU) is also gated by GDB's run control. 1893 * 1894 * This is not unreasonable as most of the time debugging kernels you 1895 * never know which core will eventually execute your function. 1896 */ 1897 1898 typedef struct { 1899 uint64_t bcr; 1900 uint64_t bvr; 1901 } HWBreakpoint; 1902 1903 /* 1904 * The watchpoint registers can cover more area than the requested 1905 * watchpoint so we need to store the additional information 1906 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub 1907 * when the watchpoint is hit. 1908 */ 1909 typedef struct { 1910 uint64_t wcr; 1911 uint64_t wvr; 1912 CPUWatchpoint details; 1913 } HWWatchpoint; 1914 1915 /* Maximum and current break/watch point counts */ 1916 extern int max_hw_bps, max_hw_wps; 1917 extern GArray *hw_breakpoints, *hw_watchpoints; 1918 1919 #define cur_hw_wps (hw_watchpoints->len) 1920 #define cur_hw_bps (hw_breakpoints->len) 1921 #define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i)) 1922 #define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i)) 1923 1924 bool find_hw_breakpoint(CPUState *cpu, target_ulong pc); 1925 int insert_hw_breakpoint(target_ulong pc); 1926 int delete_hw_breakpoint(target_ulong pc); 1927 1928 bool check_watchpoint_in_range(int i, target_ulong addr); 1929 CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr); 1930 int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1931 int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type); 1932 1933 /* Return the current value of the system counter in ticks */ 1934 uint64_t gt_get_countervalue(CPUARMState *env); 1935 /* 1936 * Return the currently applicable offset between the system counter 1937 * and the counter for the specified timer, as used for direct register 1938 * accesses. 1939 */ 1940 uint64_t gt_direct_access_timer_offset(CPUARMState *env, int timeridx); 1941 1942 /* 1943 * Return mask of ARMMMUIdxBit values corresponding to an "invalidate 1944 * all EL1" scope; this covers stage 1 and stage 2. 1945 */ 1946 int alle1_tlbmask(CPUARMState *env); 1947 1948 /* Set the float_status behaviour to match the Arm defaults */ 1949 void arm_set_default_fp_behaviours(float_status *s); 1950 /* Set the float_status behaviour to match Arm FPCR.AH=1 behaviour */ 1951 void arm_set_ah_fp_behaviours(float_status *s); 1952 /* Read the float_status info and return the appropriate FPSR value */ 1953 uint32_t vfp_get_fpsr_from_host(CPUARMState *env); 1954 /* Clear the exception status flags from all float_status fields */ 1955 void vfp_clear_float_status_exc_flags(CPUARMState *env); 1956 /* 1957 * Update float_status fields to handle the bits of the FPCR 1958 * specified by mask changing to the values in val. 1959 */ 1960 void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask); 1961 1962 #endif 1963