1 /* 2 * ARM virtual CPU header 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef ARM_CPU_H 21 #define ARM_CPU_H 22 23 #include "kvm-consts.h" 24 #include "hw/registerfields.h" 25 26 #if defined(TARGET_AARCH64) 27 /* AArch64 definitions */ 28 # define TARGET_LONG_BITS 64 29 #else 30 # define TARGET_LONG_BITS 32 31 #endif 32 33 /* ARM processors have a weak memory model */ 34 #define TCG_GUEST_DEFAULT_MO (0) 35 36 #define CPUArchState struct CPUARMState 37 38 #include "qemu-common.h" 39 #include "cpu-qom.h" 40 #include "exec/cpu-defs.h" 41 42 #define EXCP_UDEF 1 /* undefined instruction */ 43 #define EXCP_SWI 2 /* software interrupt */ 44 #define EXCP_PREFETCH_ABORT 3 45 #define EXCP_DATA_ABORT 4 46 #define EXCP_IRQ 5 47 #define EXCP_FIQ 6 48 #define EXCP_BKPT 7 49 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ 50 #define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ 51 #define EXCP_HVC 11 /* HyperVisor Call */ 52 #define EXCP_HYP_TRAP 12 53 #define EXCP_SMC 13 /* Secure Monitor Call */ 54 #define EXCP_VIRQ 14 55 #define EXCP_VFIQ 15 56 #define EXCP_SEMIHOST 16 /* semihosting call */ 57 #define EXCP_NOCP 17 /* v7M NOCP UsageFault */ 58 #define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */ 59 #define EXCP_STKOF 19 /* v8M STKOF UsageFault */ 60 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */ 61 62 #define ARMV7M_EXCP_RESET 1 63 #define ARMV7M_EXCP_NMI 2 64 #define ARMV7M_EXCP_HARD 3 65 #define ARMV7M_EXCP_MEM 4 66 #define ARMV7M_EXCP_BUS 5 67 #define ARMV7M_EXCP_USAGE 6 68 #define ARMV7M_EXCP_SECURE 7 69 #define ARMV7M_EXCP_SVC 11 70 #define ARMV7M_EXCP_DEBUG 12 71 #define ARMV7M_EXCP_PENDSV 14 72 #define ARMV7M_EXCP_SYSTICK 15 73 74 /* For M profile, some registers are banked secure vs non-secure; 75 * these are represented as a 2-element array where the first element 76 * is the non-secure copy and the second is the secure copy. 77 * When the CPU does not have implement the security extension then 78 * only the first element is used. 79 * This means that the copy for the current security state can be 80 * accessed via env->registerfield[env->v7m.secure] (whether the security 81 * extension is implemented or not). 82 */ 83 enum { 84 M_REG_NS = 0, 85 M_REG_S = 1, 86 M_REG_NUM_BANKS = 2, 87 }; 88 89 /* ARM-specific interrupt pending bits. */ 90 #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 91 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 92 #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 93 94 /* The usual mapping for an AArch64 system register to its AArch32 95 * counterpart is for the 32 bit world to have access to the lower 96 * half only (with writes leaving the upper half untouched). It's 97 * therefore useful to be able to pass TCG the offset of the least 98 * significant half of a uint64_t struct member. 99 */ 100 #ifdef HOST_WORDS_BIGENDIAN 101 #define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t)) 102 #define offsetofhigh32(S, M) offsetof(S, M) 103 #else 104 #define offsetoflow32(S, M) offsetof(S, M) 105 #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) 106 #endif 107 108 /* Meanings of the ARMCPU object's four inbound GPIO lines */ 109 #define ARM_CPU_IRQ 0 110 #define ARM_CPU_FIQ 1 111 #define ARM_CPU_VIRQ 2 112 #define ARM_CPU_VFIQ 3 113 114 #define NB_MMU_MODES 8 115 /* ARM-specific extra insn start words: 116 * 1: Conditional execution bits 117 * 2: Partial exception syndrome for data aborts 118 */ 119 #define TARGET_INSN_START_EXTRA_WORDS 2 120 121 /* The 2nd extra word holding syndrome info for data aborts does not use 122 * the upper 6 bits nor the lower 14 bits. We mask and shift it down to 123 * help the sleb128 encoder do a better job. 124 * When restoring the CPU state, we shift it back up. 125 */ 126 #define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1) 127 #define ARM_INSN_START_WORD2_SHIFT 14 128 129 /* We currently assume float and double are IEEE single and double 130 precision respectively. 131 Doing runtime conversions is tricky because VFP registers may contain 132 integer values (eg. as the result of a FTOSI instruction). 133 s<2n> maps to the least significant half of d<n> 134 s<2n+1> maps to the most significant half of d<n> 135 */ 136 137 /** 138 * DynamicGDBXMLInfo: 139 * @desc: Contains the XML descriptions. 140 * @num_cpregs: Number of the Coprocessor registers seen by GDB. 141 * @cpregs_keys: Array that contains the corresponding Key of 142 * a given cpreg with the same order of the cpreg in the XML description. 143 */ 144 typedef struct DynamicGDBXMLInfo { 145 char *desc; 146 int num_cpregs; 147 uint32_t *cpregs_keys; 148 } DynamicGDBXMLInfo; 149 150 /* CPU state for each instance of a generic timer (in cp15 c14) */ 151 typedef struct ARMGenericTimer { 152 uint64_t cval; /* Timer CompareValue register */ 153 uint64_t ctl; /* Timer Control register */ 154 } ARMGenericTimer; 155 156 #define GTIMER_PHYS 0 157 #define GTIMER_VIRT 1 158 #define GTIMER_HYP 2 159 #define GTIMER_SEC 3 160 #define NUM_GTIMERS 4 161 162 typedef struct { 163 uint64_t raw_tcr; 164 uint32_t mask; 165 uint32_t base_mask; 166 } TCR; 167 168 /* Define a maximum sized vector register. 169 * For 32-bit, this is a 128-bit NEON/AdvSIMD register. 170 * For 64-bit, this is a 2048-bit SVE register. 171 * 172 * Note that the mapping between S, D, and Q views of the register bank 173 * differs between AArch64 and AArch32. 174 * In AArch32: 175 * Qn = regs[n].d[1]:regs[n].d[0] 176 * Dn = regs[n / 2].d[n & 1] 177 * Sn = regs[n / 4].d[n % 4 / 2], 178 * bits 31..0 for even n, and bits 63..32 for odd n 179 * (and regs[16] to regs[31] are inaccessible) 180 * In AArch64: 181 * Zn = regs[n].d[*] 182 * Qn = regs[n].d[1]:regs[n].d[0] 183 * Dn = regs[n].d[0] 184 * Sn = regs[n].d[0] bits 31..0 185 * Hn = regs[n].d[0] bits 15..0 186 * 187 * This corresponds to the architecturally defined mapping between 188 * the two execution states, and means we do not need to explicitly 189 * map these registers when changing states. 190 * 191 * Align the data for use with TCG host vector operations. 192 */ 193 194 #ifdef TARGET_AARCH64 195 # define ARM_MAX_VQ 16 196 #else 197 # define ARM_MAX_VQ 1 198 #endif 199 200 typedef struct ARMVectorReg { 201 uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16); 202 } ARMVectorReg; 203 204 /* In AArch32 mode, predicate registers do not exist at all. */ 205 #ifdef TARGET_AARCH64 206 typedef struct ARMPredicateReg { 207 uint64_t p[2 * ARM_MAX_VQ / 8] QEMU_ALIGNED(16); 208 } ARMPredicateReg; 209 #endif 210 211 212 typedef struct CPUARMState { 213 /* Regs for current mode. */ 214 uint32_t regs[16]; 215 216 /* 32/64 switch only happens when taking and returning from 217 * exceptions so the overlap semantics are taken care of then 218 * instead of having a complicated union. 219 */ 220 /* Regs for A64 mode. */ 221 uint64_t xregs[32]; 222 uint64_t pc; 223 /* PSTATE isn't an architectural register for ARMv8. However, it is 224 * convenient for us to assemble the underlying state into a 32 bit format 225 * identical to the architectural format used for the SPSR. (This is also 226 * what the Linux kernel's 'pstate' field in signal handlers and KVM's 227 * 'pstate' register are.) Of the PSTATE bits: 228 * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same 229 * semantics as for AArch32, as described in the comments on each field) 230 * nRW (also known as M[4]) is kept, inverted, in env->aarch64 231 * DAIF (exception masks) are kept in env->daif 232 * all other bits are stored in their correct places in env->pstate 233 */ 234 uint32_t pstate; 235 uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ 236 237 /* Frequently accessed CPSR bits are stored separately for efficiency. 238 This contains all the other bits. Use cpsr_{read,write} to access 239 the whole CPSR. */ 240 uint32_t uncached_cpsr; 241 uint32_t spsr; 242 243 /* Banked registers. */ 244 uint64_t banked_spsr[8]; 245 uint32_t banked_r13[8]; 246 uint32_t banked_r14[8]; 247 248 /* These hold r8-r12. */ 249 uint32_t usr_regs[5]; 250 uint32_t fiq_regs[5]; 251 252 /* cpsr flag cache for faster execution */ 253 uint32_t CF; /* 0 or 1 */ 254 uint32_t VF; /* V is the bit 31. All other bits are undefined */ 255 uint32_t NF; /* N is bit 31. All other bits are undefined. */ 256 uint32_t ZF; /* Z set if zero. */ 257 uint32_t QF; /* 0 or 1 */ 258 uint32_t GE; /* cpsr[19:16] */ 259 uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */ 260 uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ 261 uint64_t daif; /* exception masks, in the bits they are in PSTATE */ 262 263 uint64_t elr_el[4]; /* AArch64 exception link regs */ 264 uint64_t sp_el[4]; /* AArch64 banked stack pointers */ 265 266 /* System control coprocessor (cp15) */ 267 struct { 268 uint32_t c0_cpuid; 269 union { /* Cache size selection */ 270 struct { 271 uint64_t _unused_csselr0; 272 uint64_t csselr_ns; 273 uint64_t _unused_csselr1; 274 uint64_t csselr_s; 275 }; 276 uint64_t csselr_el[4]; 277 }; 278 union { /* System control register. */ 279 struct { 280 uint64_t _unused_sctlr; 281 uint64_t sctlr_ns; 282 uint64_t hsctlr; 283 uint64_t sctlr_s; 284 }; 285 uint64_t sctlr_el[4]; 286 }; 287 uint64_t cpacr_el1; /* Architectural feature access control register */ 288 uint64_t cptr_el[4]; /* ARMv8 feature trap registers */ 289 uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ 290 uint64_t sder; /* Secure debug enable register. */ 291 uint32_t nsacr; /* Non-secure access control register. */ 292 union { /* MMU translation table base 0. */ 293 struct { 294 uint64_t _unused_ttbr0_0; 295 uint64_t ttbr0_ns; 296 uint64_t _unused_ttbr0_1; 297 uint64_t ttbr0_s; 298 }; 299 uint64_t ttbr0_el[4]; 300 }; 301 union { /* MMU translation table base 1. */ 302 struct { 303 uint64_t _unused_ttbr1_0; 304 uint64_t ttbr1_ns; 305 uint64_t _unused_ttbr1_1; 306 uint64_t ttbr1_s; 307 }; 308 uint64_t ttbr1_el[4]; 309 }; 310 uint64_t vttbr_el2; /* Virtualization Translation Table Base. */ 311 /* MMU translation table base control. */ 312 TCR tcr_el[4]; 313 TCR vtcr_el2; /* Virtualization Translation Control. */ 314 uint32_t c2_data; /* MPU data cacheable bits. */ 315 uint32_t c2_insn; /* MPU instruction cacheable bits. */ 316 union { /* MMU domain access control register 317 * MPU write buffer control. 318 */ 319 struct { 320 uint64_t dacr_ns; 321 uint64_t dacr_s; 322 }; 323 struct { 324 uint64_t dacr32_el2; 325 }; 326 }; 327 uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ 328 uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ 329 uint64_t hcr_el2; /* Hypervisor configuration register */ 330 uint64_t scr_el3; /* Secure configuration register. */ 331 union { /* Fault status registers. */ 332 struct { 333 uint64_t ifsr_ns; 334 uint64_t ifsr_s; 335 }; 336 struct { 337 uint64_t ifsr32_el2; 338 }; 339 }; 340 union { 341 struct { 342 uint64_t _unused_dfsr; 343 uint64_t dfsr_ns; 344 uint64_t hsr; 345 uint64_t dfsr_s; 346 }; 347 uint64_t esr_el[4]; 348 }; 349 uint32_t c6_region[8]; /* MPU base/size registers. */ 350 union { /* Fault address registers. */ 351 struct { 352 uint64_t _unused_far0; 353 #ifdef HOST_WORDS_BIGENDIAN 354 uint32_t ifar_ns; 355 uint32_t dfar_ns; 356 uint32_t ifar_s; 357 uint32_t dfar_s; 358 #else 359 uint32_t dfar_ns; 360 uint32_t ifar_ns; 361 uint32_t dfar_s; 362 uint32_t ifar_s; 363 #endif 364 uint64_t _unused_far3; 365 }; 366 uint64_t far_el[4]; 367 }; 368 uint64_t hpfar_el2; 369 uint64_t hstr_el2; 370 union { /* Translation result. */ 371 struct { 372 uint64_t _unused_par_0; 373 uint64_t par_ns; 374 uint64_t _unused_par_1; 375 uint64_t par_s; 376 }; 377 uint64_t par_el[4]; 378 }; 379 380 uint32_t c9_insn; /* Cache lockdown registers. */ 381 uint32_t c9_data; 382 uint64_t c9_pmcr; /* performance monitor control register */ 383 uint64_t c9_pmcnten; /* perf monitor counter enables */ 384 uint64_t c9_pmovsr; /* perf monitor overflow status */ 385 uint64_t c9_pmuserenr; /* perf monitor user enable */ 386 uint64_t c9_pmselr; /* perf monitor counter selection register */ 387 uint64_t c9_pminten; /* perf monitor interrupt enables */ 388 union { /* Memory attribute redirection */ 389 struct { 390 #ifdef HOST_WORDS_BIGENDIAN 391 uint64_t _unused_mair_0; 392 uint32_t mair1_ns; 393 uint32_t mair0_ns; 394 uint64_t _unused_mair_1; 395 uint32_t mair1_s; 396 uint32_t mair0_s; 397 #else 398 uint64_t _unused_mair_0; 399 uint32_t mair0_ns; 400 uint32_t mair1_ns; 401 uint64_t _unused_mair_1; 402 uint32_t mair0_s; 403 uint32_t mair1_s; 404 #endif 405 }; 406 uint64_t mair_el[4]; 407 }; 408 union { /* vector base address register */ 409 struct { 410 uint64_t _unused_vbar; 411 uint64_t vbar_ns; 412 uint64_t hvbar; 413 uint64_t vbar_s; 414 }; 415 uint64_t vbar_el[4]; 416 }; 417 uint32_t mvbar; /* (monitor) vector base address register */ 418 struct { /* FCSE PID. */ 419 uint32_t fcseidr_ns; 420 uint32_t fcseidr_s; 421 }; 422 union { /* Context ID. */ 423 struct { 424 uint64_t _unused_contextidr_0; 425 uint64_t contextidr_ns; 426 uint64_t _unused_contextidr_1; 427 uint64_t contextidr_s; 428 }; 429 uint64_t contextidr_el[4]; 430 }; 431 union { /* User RW Thread register. */ 432 struct { 433 uint64_t tpidrurw_ns; 434 uint64_t tpidrprw_ns; 435 uint64_t htpidr; 436 uint64_t _tpidr_el3; 437 }; 438 uint64_t tpidr_el[4]; 439 }; 440 /* The secure banks of these registers don't map anywhere */ 441 uint64_t tpidrurw_s; 442 uint64_t tpidrprw_s; 443 uint64_t tpidruro_s; 444 445 union { /* User RO Thread register. */ 446 uint64_t tpidruro_ns; 447 uint64_t tpidrro_el[1]; 448 }; 449 uint64_t c14_cntfrq; /* Counter Frequency register */ 450 uint64_t c14_cntkctl; /* Timer Control register */ 451 uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */ 452 uint64_t cntvoff_el2; /* Counter Virtual Offset register */ 453 ARMGenericTimer c14_timer[NUM_GTIMERS]; 454 uint32_t c15_cpar; /* XScale Coprocessor Access Register */ 455 uint32_t c15_ticonfig; /* TI925T configuration byte. */ 456 uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ 457 uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ 458 uint32_t c15_threadid; /* TI debugger thread-ID. */ 459 uint32_t c15_config_base_address; /* SCU base address. */ 460 uint32_t c15_diagnostic; /* diagnostic register */ 461 uint32_t c15_power_diagnostic; 462 uint32_t c15_power_control; /* power control */ 463 uint64_t dbgbvr[16]; /* breakpoint value registers */ 464 uint64_t dbgbcr[16]; /* breakpoint control registers */ 465 uint64_t dbgwvr[16]; /* watchpoint value registers */ 466 uint64_t dbgwcr[16]; /* watchpoint control registers */ 467 uint64_t mdscr_el1; 468 uint64_t oslsr_el1; /* OS Lock Status */ 469 uint64_t mdcr_el2; 470 uint64_t mdcr_el3; 471 /* If the counter is enabled, this stores the last time the counter 472 * was reset. Otherwise it stores the counter value 473 */ 474 uint64_t c15_ccnt; 475 uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */ 476 uint64_t vpidr_el2; /* Virtualization Processor ID Register */ 477 uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */ 478 } cp15; 479 480 struct { 481 /* M profile has up to 4 stack pointers: 482 * a Main Stack Pointer and a Process Stack Pointer for each 483 * of the Secure and Non-Secure states. (If the CPU doesn't support 484 * the security extension then it has only two SPs.) 485 * In QEMU we always store the currently active SP in regs[13], 486 * and the non-active SP for the current security state in 487 * v7m.other_sp. The stack pointers for the inactive security state 488 * are stored in other_ss_msp and other_ss_psp. 489 * switch_v7m_security_state() is responsible for rearranging them 490 * when we change security state. 491 */ 492 uint32_t other_sp; 493 uint32_t other_ss_msp; 494 uint32_t other_ss_psp; 495 uint32_t vecbase[M_REG_NUM_BANKS]; 496 uint32_t basepri[M_REG_NUM_BANKS]; 497 uint32_t control[M_REG_NUM_BANKS]; 498 uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */ 499 uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */ 500 uint32_t hfsr; /* HardFault Status */ 501 uint32_t dfsr; /* Debug Fault Status Register */ 502 uint32_t sfsr; /* Secure Fault Status Register */ 503 uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */ 504 uint32_t bfar; /* BusFault Address */ 505 uint32_t sfar; /* Secure Fault Address Register */ 506 unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */ 507 int exception; 508 uint32_t primask[M_REG_NUM_BANKS]; 509 uint32_t faultmask[M_REG_NUM_BANKS]; 510 uint32_t aircr; /* only holds r/w state if security extn implemented */ 511 uint32_t secure; /* Is CPU in Secure state? (not guest visible) */ 512 uint32_t csselr[M_REG_NUM_BANKS]; 513 uint32_t scr[M_REG_NUM_BANKS]; 514 uint32_t msplim[M_REG_NUM_BANKS]; 515 uint32_t psplim[M_REG_NUM_BANKS]; 516 } v7m; 517 518 /* Information associated with an exception about to be taken: 519 * code which raises an exception must set cs->exception_index and 520 * the relevant parts of this structure; the cpu_do_interrupt function 521 * will then set the guest-visible registers as part of the exception 522 * entry process. 523 */ 524 struct { 525 uint32_t syndrome; /* AArch64 format syndrome register */ 526 uint32_t fsr; /* AArch32 format fault status register info */ 527 uint64_t vaddress; /* virtual addr associated with exception, if any */ 528 uint32_t target_el; /* EL the exception should be targeted for */ 529 /* If we implement EL2 we will also need to store information 530 * about the intermediate physical address for stage 2 faults. 531 */ 532 } exception; 533 534 /* Information associated with an SError */ 535 struct { 536 uint8_t pending; 537 uint8_t has_esr; 538 uint64_t esr; 539 } serror; 540 541 /* State of our input IRQ/FIQ/VIRQ/VFIQ lines */ 542 uint32_t irq_line_state; 543 544 /* Thumb-2 EE state. */ 545 uint32_t teecr; 546 uint32_t teehbr; 547 548 /* VFP coprocessor state. */ 549 struct { 550 ARMVectorReg zregs[32]; 551 552 #ifdef TARGET_AARCH64 553 /* Store FFR as pregs[16] to make it easier to treat as any other. */ 554 #define FFR_PRED_NUM 16 555 ARMPredicateReg pregs[17]; 556 /* Scratch space for aa64 sve predicate temporary. */ 557 ARMPredicateReg preg_tmp; 558 #endif 559 560 uint32_t xregs[16]; 561 /* We store these fpcsr fields separately for convenience. */ 562 int vec_len; 563 int vec_stride; 564 565 /* Scratch space for aa32 neon expansion. */ 566 uint32_t scratch[8]; 567 568 /* There are a number of distinct float control structures: 569 * 570 * fp_status: is the "normal" fp status. 571 * fp_status_fp16: used for half-precision calculations 572 * standard_fp_status : the ARM "Standard FPSCR Value" 573 * 574 * Half-precision operations are governed by a separate 575 * flush-to-zero control bit in FPSCR:FZ16. We pass a separate 576 * status structure to control this. 577 * 578 * The "Standard FPSCR", ie default-NaN, flush-to-zero, 579 * round-to-nearest and is used by any operations (generally 580 * Neon) which the architecture defines as controlled by the 581 * standard FPSCR value rather than the FPSCR. 582 * 583 * To avoid having to transfer exception bits around, we simply 584 * say that the FPSCR cumulative exception flags are the logical 585 * OR of the flags in the three fp statuses. This relies on the 586 * only thing which needs to read the exception flags being 587 * an explicit FPSCR read. 588 */ 589 float_status fp_status; 590 float_status fp_status_f16; 591 float_status standard_fp_status; 592 593 /* ZCR_EL[1-3] */ 594 uint64_t zcr_el[4]; 595 } vfp; 596 uint64_t exclusive_addr; 597 uint64_t exclusive_val; 598 uint64_t exclusive_high; 599 600 /* iwMMXt coprocessor state. */ 601 struct { 602 uint64_t regs[16]; 603 uint64_t val; 604 605 uint32_t cregs[16]; 606 } iwmmxt; 607 608 #if defined(CONFIG_USER_ONLY) 609 /* For usermode syscall translation. */ 610 int eabi; 611 #endif 612 613 struct CPUBreakpoint *cpu_breakpoint[16]; 614 struct CPUWatchpoint *cpu_watchpoint[16]; 615 616 /* Fields up to this point are cleared by a CPU reset */ 617 struct {} end_reset_fields; 618 619 CPU_COMMON 620 621 /* Fields after CPU_COMMON are preserved across CPU reset. */ 622 623 /* Internal CPU feature flags. */ 624 uint64_t features; 625 626 /* PMSAv7 MPU */ 627 struct { 628 uint32_t *drbar; 629 uint32_t *drsr; 630 uint32_t *dracr; 631 uint32_t rnr[M_REG_NUM_BANKS]; 632 } pmsav7; 633 634 /* PMSAv8 MPU */ 635 struct { 636 /* The PMSAv8 implementation also shares some PMSAv7 config 637 * and state: 638 * pmsav7.rnr (region number register) 639 * pmsav7_dregion (number of configured regions) 640 */ 641 uint32_t *rbar[M_REG_NUM_BANKS]; 642 uint32_t *rlar[M_REG_NUM_BANKS]; 643 uint32_t mair0[M_REG_NUM_BANKS]; 644 uint32_t mair1[M_REG_NUM_BANKS]; 645 } pmsav8; 646 647 /* v8M SAU */ 648 struct { 649 uint32_t *rbar; 650 uint32_t *rlar; 651 uint32_t rnr; 652 uint32_t ctrl; 653 } sau; 654 655 void *nvic; 656 const struct arm_boot_info *boot_info; 657 /* Store GICv3CPUState to access from this struct */ 658 void *gicv3state; 659 } CPUARMState; 660 661 /** 662 * ARMELChangeHookFn: 663 * type of a function which can be registered via arm_register_el_change_hook() 664 * to get callbacks when the CPU changes its exception level or mode. 665 */ 666 typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque); 667 typedef struct ARMELChangeHook ARMELChangeHook; 668 struct ARMELChangeHook { 669 ARMELChangeHookFn *hook; 670 void *opaque; 671 QLIST_ENTRY(ARMELChangeHook) node; 672 }; 673 674 /* These values map onto the return values for 675 * QEMU_PSCI_0_2_FN_AFFINITY_INFO */ 676 typedef enum ARMPSCIState { 677 PSCI_ON = 0, 678 PSCI_OFF = 1, 679 PSCI_ON_PENDING = 2 680 } ARMPSCIState; 681 682 typedef struct ARMISARegisters ARMISARegisters; 683 684 /** 685 * ARMCPU: 686 * @env: #CPUARMState 687 * 688 * An ARM CPU core. 689 */ 690 struct ARMCPU { 691 /*< private >*/ 692 CPUState parent_obj; 693 /*< public >*/ 694 695 CPUARMState env; 696 697 /* Coprocessor information */ 698 GHashTable *cp_regs; 699 /* For marshalling (mostly coprocessor) register state between the 700 * kernel and QEMU (for KVM) and between two QEMUs (for migration), 701 * we use these arrays. 702 */ 703 /* List of register indexes managed via these arrays; (full KVM style 704 * 64 bit indexes, not CPRegInfo 32 bit indexes) 705 */ 706 uint64_t *cpreg_indexes; 707 /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */ 708 uint64_t *cpreg_values; 709 /* Length of the indexes, values, reset_values arrays */ 710 int32_t cpreg_array_len; 711 /* These are used only for migration: incoming data arrives in 712 * these fields and is sanity checked in post_load before copying 713 * to the working data structures above. 714 */ 715 uint64_t *cpreg_vmstate_indexes; 716 uint64_t *cpreg_vmstate_values; 717 int32_t cpreg_vmstate_array_len; 718 719 DynamicGDBXMLInfo dyn_xml; 720 721 /* Timers used by the generic (architected) timer */ 722 QEMUTimer *gt_timer[NUM_GTIMERS]; 723 /* GPIO outputs for generic timer */ 724 qemu_irq gt_timer_outputs[NUM_GTIMERS]; 725 /* GPIO output for GICv3 maintenance interrupt signal */ 726 qemu_irq gicv3_maintenance_interrupt; 727 /* GPIO output for the PMU interrupt */ 728 qemu_irq pmu_interrupt; 729 730 /* MemoryRegion to use for secure physical accesses */ 731 MemoryRegion *secure_memory; 732 733 /* For v8M, pointer to the IDAU interface provided by board/SoC */ 734 Object *idau; 735 736 /* 'compatible' string for this CPU for Linux device trees */ 737 const char *dtb_compatible; 738 739 /* PSCI version for this CPU 740 * Bits[31:16] = Major Version 741 * Bits[15:0] = Minor Version 742 */ 743 uint32_t psci_version; 744 745 /* Should CPU start in PSCI powered-off state? */ 746 bool start_powered_off; 747 748 /* Current power state, access guarded by BQL */ 749 ARMPSCIState power_state; 750 751 /* CPU has virtualization extension */ 752 bool has_el2; 753 /* CPU has security extension */ 754 bool has_el3; 755 /* CPU has PMU (Performance Monitor Unit) */ 756 bool has_pmu; 757 758 /* CPU has memory protection unit */ 759 bool has_mpu; 760 /* PMSAv7 MPU number of supported regions */ 761 uint32_t pmsav7_dregion; 762 /* v8M SAU number of supported regions */ 763 uint32_t sau_sregion; 764 765 /* PSCI conduit used to invoke PSCI methods 766 * 0 - disabled, 1 - smc, 2 - hvc 767 */ 768 uint32_t psci_conduit; 769 770 /* For v8M, initial value of the Secure VTOR */ 771 uint32_t init_svtor; 772 773 /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or 774 * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type. 775 */ 776 uint32_t kvm_target; 777 778 /* KVM init features for this CPU */ 779 uint32_t kvm_init_features[7]; 780 781 /* Uniprocessor system with MP extensions */ 782 bool mp_is_up; 783 784 /* True if we tried kvm_arm_host_cpu_features() during CPU instance_init 785 * and the probe failed (so we need to report the error in realize) 786 */ 787 bool host_cpu_probe_failed; 788 789 /* Specify the number of cores in this CPU cluster. Used for the L2CTLR 790 * register. 791 */ 792 int32_t core_count; 793 794 /* The instance init functions for implementation-specific subclasses 795 * set these fields to specify the implementation-dependent values of 796 * various constant registers and reset values of non-constant 797 * registers. 798 * Some of these might become QOM properties eventually. 799 * Field names match the official register names as defined in the 800 * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix 801 * is used for reset values of non-constant registers; no reset_ 802 * prefix means a constant register. 803 * Some of these registers are split out into a substructure that 804 * is shared with the translators to control the ISA. 805 */ 806 struct ARMISARegisters { 807 uint32_t id_isar0; 808 uint32_t id_isar1; 809 uint32_t id_isar2; 810 uint32_t id_isar3; 811 uint32_t id_isar4; 812 uint32_t id_isar5; 813 uint32_t id_isar6; 814 uint32_t mvfr0; 815 uint32_t mvfr1; 816 uint32_t mvfr2; 817 uint64_t id_aa64isar0; 818 uint64_t id_aa64isar1; 819 uint64_t id_aa64pfr0; 820 uint64_t id_aa64pfr1; 821 } isar; 822 uint32_t midr; 823 uint32_t revidr; 824 uint32_t reset_fpsid; 825 uint32_t ctr; 826 uint32_t reset_sctlr; 827 uint32_t id_pfr0; 828 uint32_t id_pfr1; 829 uint32_t id_dfr0; 830 uint32_t pmceid0; 831 uint32_t pmceid1; 832 uint32_t id_afr0; 833 uint32_t id_mmfr0; 834 uint32_t id_mmfr1; 835 uint32_t id_mmfr2; 836 uint32_t id_mmfr3; 837 uint32_t id_mmfr4; 838 uint64_t id_aa64dfr0; 839 uint64_t id_aa64dfr1; 840 uint64_t id_aa64afr0; 841 uint64_t id_aa64afr1; 842 uint64_t id_aa64mmfr0; 843 uint64_t id_aa64mmfr1; 844 uint32_t dbgdidr; 845 uint32_t clidr; 846 uint64_t mp_affinity; /* MP ID without feature bits */ 847 /* The elements of this array are the CCSIDR values for each cache, 848 * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc. 849 */ 850 uint32_t ccsidr[16]; 851 uint64_t reset_cbar; 852 uint32_t reset_auxcr; 853 bool reset_hivecs; 854 /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ 855 uint32_t dcz_blocksize; 856 uint64_t rvbar; 857 858 /* Configurable aspects of GIC cpu interface (which is part of the CPU) */ 859 int gic_num_lrs; /* number of list registers */ 860 int gic_vpribits; /* number of virtual priority bits */ 861 int gic_vprebits; /* number of virtual preemption bits */ 862 863 /* Whether the cfgend input is high (i.e. this CPU should reset into 864 * big-endian mode). This setting isn't used directly: instead it modifies 865 * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the 866 * architecture version. 867 */ 868 bool cfgend; 869 870 QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks; 871 QLIST_HEAD(, ARMELChangeHook) el_change_hooks; 872 873 int32_t node_id; /* NUMA node this CPU belongs to */ 874 875 /* Used to synchronize KVM and QEMU in-kernel device levels */ 876 uint8_t device_irq_level; 877 878 /* Used to set the maximum vector length the cpu will support. */ 879 uint32_t sve_max_vq; 880 }; 881 882 static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) 883 { 884 return container_of(env, ARMCPU, env); 885 } 886 887 uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz); 888 889 #define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e)) 890 891 #define ENV_OFFSET offsetof(ARMCPU, env) 892 893 #ifndef CONFIG_USER_ONLY 894 extern const struct VMStateDescription vmstate_arm_cpu; 895 #endif 896 897 void arm_cpu_do_interrupt(CPUState *cpu); 898 void arm_v7m_cpu_do_interrupt(CPUState *cpu); 899 bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); 900 901 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, 902 int flags); 903 904 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 905 MemTxAttrs *attrs); 906 907 int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 908 int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 909 910 /* Dynamically generates for gdb stub an XML description of the sysregs from 911 * the cp_regs hashtable. Returns the registered sysregs number. 912 */ 913 int arm_gen_dynamic_xml(CPUState *cpu); 914 915 /* Returns the dynamically generated XML for the gdb stub. 916 * Returns a pointer to the XML contents for the specified XML file or NULL 917 * if the XML name doesn't match the predefined one. 918 */ 919 const char *arm_gdb_get_dynamic_xml(CPUState *cpu, const char *xmlname); 920 921 int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 922 int cpuid, void *opaque); 923 int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 924 int cpuid, void *opaque); 925 926 #ifdef TARGET_AARCH64 927 int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 928 int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 929 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); 930 void aarch64_sve_change_el(CPUARMState *env, int old_el, 931 int new_el, bool el0_a64); 932 #else 933 static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } 934 static inline void aarch64_sve_change_el(CPUARMState *env, int o, 935 int n, bool a) 936 { } 937 #endif 938 939 target_ulong do_arm_semihosting(CPUARMState *env); 940 void aarch64_sync_32_to_64(CPUARMState *env); 941 void aarch64_sync_64_to_32(CPUARMState *env); 942 943 int fp_exception_el(CPUARMState *env, int cur_el); 944 int sve_exception_el(CPUARMState *env, int cur_el); 945 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el); 946 947 static inline bool is_a64(CPUARMState *env) 948 { 949 return env->aarch64; 950 } 951 952 /* you can call this signal handler from your SIGBUS and SIGSEGV 953 signal handlers to inform the virtual CPU of exceptions. non zero 954 is returned if the signal was handled by the virtual CPU. */ 955 int cpu_arm_signal_handler(int host_signum, void *pinfo, 956 void *puc); 957 958 /** 959 * pmccntr_sync 960 * @env: CPUARMState 961 * 962 * Synchronises the counter in the PMCCNTR. This must always be called twice, 963 * once before any action that might affect the timer and again afterwards. 964 * The function is used to swap the state of the register if required. 965 * This only happens when not in user mode (!CONFIG_USER_ONLY) 966 */ 967 void pmccntr_sync(CPUARMState *env); 968 969 /* SCTLR bit meanings. Several bits have been reused in newer 970 * versions of the architecture; in that case we define constants 971 * for both old and new bit meanings. Code which tests against those 972 * bits should probably check or otherwise arrange that the CPU 973 * is the architectural version it expects. 974 */ 975 #define SCTLR_M (1U << 0) 976 #define SCTLR_A (1U << 1) 977 #define SCTLR_C (1U << 2) 978 #define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */ 979 #define SCTLR_SA (1U << 3) 980 #define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */ 981 #define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */ 982 #define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */ 983 #define SCTLR_CP15BEN (1U << 5) /* v7 onward */ 984 #define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */ 985 #define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */ 986 #define SCTLR_ITD (1U << 7) /* v8 onward */ 987 #define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */ 988 #define SCTLR_SED (1U << 8) /* v8 onward */ 989 #define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */ 990 #define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */ 991 #define SCTLR_F (1U << 10) /* up to v6 */ 992 #define SCTLR_SW (1U << 10) /* v7 onward */ 993 #define SCTLR_Z (1U << 11) 994 #define SCTLR_I (1U << 12) 995 #define SCTLR_V (1U << 13) 996 #define SCTLR_RR (1U << 14) /* up to v7 */ 997 #define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */ 998 #define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */ 999 #define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */ 1000 #define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */ 1001 #define SCTLR_nTWI (1U << 16) /* v8 onward */ 1002 #define SCTLR_HA (1U << 17) 1003 #define SCTLR_BR (1U << 17) /* PMSA only */ 1004 #define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */ 1005 #define SCTLR_nTWE (1U << 18) /* v8 onward */ 1006 #define SCTLR_WXN (1U << 19) 1007 #define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */ 1008 #define SCTLR_UWXN (1U << 20) /* v7 onward */ 1009 #define SCTLR_FI (1U << 21) 1010 #define SCTLR_U (1U << 22) 1011 #define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */ 1012 #define SCTLR_VE (1U << 24) /* up to v7 */ 1013 #define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */ 1014 #define SCTLR_EE (1U << 25) 1015 #define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */ 1016 #define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */ 1017 #define SCTLR_NMFI (1U << 27) 1018 #define SCTLR_TRE (1U << 28) 1019 #define SCTLR_AFE (1U << 29) 1020 #define SCTLR_TE (1U << 30) 1021 1022 #define CPTR_TCPAC (1U << 31) 1023 #define CPTR_TTA (1U << 20) 1024 #define CPTR_TFP (1U << 10) 1025 #define CPTR_TZ (1U << 8) /* CPTR_EL2 */ 1026 #define CPTR_EZ (1U << 8) /* CPTR_EL3 */ 1027 1028 #define MDCR_EPMAD (1U << 21) 1029 #define MDCR_EDAD (1U << 20) 1030 #define MDCR_SPME (1U << 17) 1031 #define MDCR_SDD (1U << 16) 1032 #define MDCR_SPD (3U << 14) 1033 #define MDCR_TDRA (1U << 11) 1034 #define MDCR_TDOSA (1U << 10) 1035 #define MDCR_TDA (1U << 9) 1036 #define MDCR_TDE (1U << 8) 1037 #define MDCR_HPME (1U << 7) 1038 #define MDCR_TPM (1U << 6) 1039 #define MDCR_TPMCR (1U << 5) 1040 1041 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 1042 #define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD) 1043 1044 #define CPSR_M (0x1fU) 1045 #define CPSR_T (1U << 5) 1046 #define CPSR_F (1U << 6) 1047 #define CPSR_I (1U << 7) 1048 #define CPSR_A (1U << 8) 1049 #define CPSR_E (1U << 9) 1050 #define CPSR_IT_2_7 (0xfc00U) 1051 #define CPSR_GE (0xfU << 16) 1052 #define CPSR_IL (1U << 20) 1053 /* Note that the RESERVED bits include bit 21, which is PSTATE_SS in 1054 * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use 1055 * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32, 1056 * where it is live state but not accessible to the AArch32 code. 1057 */ 1058 #define CPSR_RESERVED (0x7U << 21) 1059 #define CPSR_J (1U << 24) 1060 #define CPSR_IT_0_1 (3U << 25) 1061 #define CPSR_Q (1U << 27) 1062 #define CPSR_V (1U << 28) 1063 #define CPSR_C (1U << 29) 1064 #define CPSR_Z (1U << 30) 1065 #define CPSR_N (1U << 31) 1066 #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) 1067 #define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F) 1068 1069 #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) 1070 #define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \ 1071 | CPSR_NZCV) 1072 /* Bits writable in user mode. */ 1073 #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) 1074 /* Execution state bits. MRS read as zero, MSR writes ignored. */ 1075 #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL) 1076 /* Mask of bits which may be set by exception return copying them from SPSR */ 1077 #define CPSR_ERET_MASK (~CPSR_RESERVED) 1078 1079 /* Bit definitions for M profile XPSR. Most are the same as CPSR. */ 1080 #define XPSR_EXCP 0x1ffU 1081 #define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */ 1082 #define XPSR_IT_2_7 CPSR_IT_2_7 1083 #define XPSR_GE CPSR_GE 1084 #define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */ 1085 #define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */ 1086 #define XPSR_IT_0_1 CPSR_IT_0_1 1087 #define XPSR_Q CPSR_Q 1088 #define XPSR_V CPSR_V 1089 #define XPSR_C CPSR_C 1090 #define XPSR_Z CPSR_Z 1091 #define XPSR_N CPSR_N 1092 #define XPSR_NZCV CPSR_NZCV 1093 #define XPSR_IT CPSR_IT 1094 1095 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 1096 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 1097 #define TTBCR_PD0 (1U << 4) 1098 #define TTBCR_PD1 (1U << 5) 1099 #define TTBCR_EPD0 (1U << 7) 1100 #define TTBCR_IRGN0 (3U << 8) 1101 #define TTBCR_ORGN0 (3U << 10) 1102 #define TTBCR_SH0 (3U << 12) 1103 #define TTBCR_T1SZ (3U << 16) 1104 #define TTBCR_A1 (1U << 22) 1105 #define TTBCR_EPD1 (1U << 23) 1106 #define TTBCR_IRGN1 (3U << 24) 1107 #define TTBCR_ORGN1 (3U << 26) 1108 #define TTBCR_SH1 (1U << 28) 1109 #define TTBCR_EAE (1U << 31) 1110 1111 /* Bit definitions for ARMv8 SPSR (PSTATE) format. 1112 * Only these are valid when in AArch64 mode; in 1113 * AArch32 mode SPSRs are basically CPSR-format. 1114 */ 1115 #define PSTATE_SP (1U) 1116 #define PSTATE_M (0xFU) 1117 #define PSTATE_nRW (1U << 4) 1118 #define PSTATE_F (1U << 6) 1119 #define PSTATE_I (1U << 7) 1120 #define PSTATE_A (1U << 8) 1121 #define PSTATE_D (1U << 9) 1122 #define PSTATE_IL (1U << 20) 1123 #define PSTATE_SS (1U << 21) 1124 #define PSTATE_V (1U << 28) 1125 #define PSTATE_C (1U << 29) 1126 #define PSTATE_Z (1U << 30) 1127 #define PSTATE_N (1U << 31) 1128 #define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V) 1129 #define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F) 1130 #define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF) 1131 /* Mode values for AArch64 */ 1132 #define PSTATE_MODE_EL3h 13 1133 #define PSTATE_MODE_EL3t 12 1134 #define PSTATE_MODE_EL2h 9 1135 #define PSTATE_MODE_EL2t 8 1136 #define PSTATE_MODE_EL1h 5 1137 #define PSTATE_MODE_EL1t 4 1138 #define PSTATE_MODE_EL0t 0 1139 1140 /* Write a new value to v7m.exception, thus transitioning into or out 1141 * of Handler mode; this may result in a change of active stack pointer. 1142 */ 1143 void write_v7m_exception(CPUARMState *env, uint32_t new_exc); 1144 1145 /* Map EL and handler into a PSTATE_MODE. */ 1146 static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) 1147 { 1148 return (el << 2) | handler; 1149 } 1150 1151 /* Return the current PSTATE value. For the moment we don't support 32<->64 bit 1152 * interprocessing, so we don't attempt to sync with the cpsr state used by 1153 * the 32 bit decoder. 1154 */ 1155 static inline uint32_t pstate_read(CPUARMState *env) 1156 { 1157 int ZF; 1158 1159 ZF = (env->ZF == 0); 1160 return (env->NF & 0x80000000) | (ZF << 30) 1161 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) 1162 | env->pstate | env->daif; 1163 } 1164 1165 static inline void pstate_write(CPUARMState *env, uint32_t val) 1166 { 1167 env->ZF = (~val) & PSTATE_Z; 1168 env->NF = val; 1169 env->CF = (val >> 29) & 1; 1170 env->VF = (val << 3) & 0x80000000; 1171 env->daif = val & PSTATE_DAIF; 1172 env->pstate = val & ~CACHED_PSTATE_BITS; 1173 } 1174 1175 /* Return the current CPSR value. */ 1176 uint32_t cpsr_read(CPUARMState *env); 1177 1178 typedef enum CPSRWriteType { 1179 CPSRWriteByInstr = 0, /* from guest MSR or CPS */ 1180 CPSRWriteExceptionReturn = 1, /* from guest exception return insn */ 1181 CPSRWriteRaw = 2, /* trust values, do not switch reg banks */ 1182 CPSRWriteByGDBStub = 3, /* from the GDB stub */ 1183 } CPSRWriteType; 1184 1185 /* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/ 1186 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 1187 CPSRWriteType write_type); 1188 1189 /* Return the current xPSR value. */ 1190 static inline uint32_t xpsr_read(CPUARMState *env) 1191 { 1192 int ZF; 1193 ZF = (env->ZF == 0); 1194 return (env->NF & 0x80000000) | (ZF << 30) 1195 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 1196 | (env->thumb << 24) | ((env->condexec_bits & 3) << 25) 1197 | ((env->condexec_bits & 0xfc) << 8) 1198 | env->v7m.exception; 1199 } 1200 1201 /* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ 1202 static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) 1203 { 1204 if (mask & XPSR_NZCV) { 1205 env->ZF = (~val) & XPSR_Z; 1206 env->NF = val; 1207 env->CF = (val >> 29) & 1; 1208 env->VF = (val << 3) & 0x80000000; 1209 } 1210 if (mask & XPSR_Q) { 1211 env->QF = ((val & XPSR_Q) != 0); 1212 } 1213 if (mask & XPSR_T) { 1214 env->thumb = ((val & XPSR_T) != 0); 1215 } 1216 if (mask & XPSR_IT_0_1) { 1217 env->condexec_bits &= ~3; 1218 env->condexec_bits |= (val >> 25) & 3; 1219 } 1220 if (mask & XPSR_IT_2_7) { 1221 env->condexec_bits &= 3; 1222 env->condexec_bits |= (val >> 8) & 0xfc; 1223 } 1224 if (mask & XPSR_EXCP) { 1225 /* Note that this only happens on exception exit */ 1226 write_v7m_exception(env, val & XPSR_EXCP); 1227 } 1228 } 1229 1230 #define HCR_VM (1ULL << 0) 1231 #define HCR_SWIO (1ULL << 1) 1232 #define HCR_PTW (1ULL << 2) 1233 #define HCR_FMO (1ULL << 3) 1234 #define HCR_IMO (1ULL << 4) 1235 #define HCR_AMO (1ULL << 5) 1236 #define HCR_VF (1ULL << 6) 1237 #define HCR_VI (1ULL << 7) 1238 #define HCR_VSE (1ULL << 8) 1239 #define HCR_FB (1ULL << 9) 1240 #define HCR_BSU_MASK (3ULL << 10) 1241 #define HCR_DC (1ULL << 12) 1242 #define HCR_TWI (1ULL << 13) 1243 #define HCR_TWE (1ULL << 14) 1244 #define HCR_TID0 (1ULL << 15) 1245 #define HCR_TID1 (1ULL << 16) 1246 #define HCR_TID2 (1ULL << 17) 1247 #define HCR_TID3 (1ULL << 18) 1248 #define HCR_TSC (1ULL << 19) 1249 #define HCR_TIDCP (1ULL << 20) 1250 #define HCR_TACR (1ULL << 21) 1251 #define HCR_TSW (1ULL << 22) 1252 #define HCR_TPC (1ULL << 23) 1253 #define HCR_TPU (1ULL << 24) 1254 #define HCR_TTLB (1ULL << 25) 1255 #define HCR_TVM (1ULL << 26) 1256 #define HCR_TGE (1ULL << 27) 1257 #define HCR_TDZ (1ULL << 28) 1258 #define HCR_HCD (1ULL << 29) 1259 #define HCR_TRVM (1ULL << 30) 1260 #define HCR_RW (1ULL << 31) 1261 #define HCR_CD (1ULL << 32) 1262 #define HCR_ID (1ULL << 33) 1263 #define HCR_E2H (1ULL << 34) 1264 /* 1265 * When we actually implement ARMv8.1-VHE we should add HCR_E2H to 1266 * HCR_MASK and then clear it again if the feature bit is not set in 1267 * hcr_write(). 1268 */ 1269 #define HCR_MASK ((1ULL << 34) - 1) 1270 1271 #define SCR_NS (1U << 0) 1272 #define SCR_IRQ (1U << 1) 1273 #define SCR_FIQ (1U << 2) 1274 #define SCR_EA (1U << 3) 1275 #define SCR_FW (1U << 4) 1276 #define SCR_AW (1U << 5) 1277 #define SCR_NET (1U << 6) 1278 #define SCR_SMD (1U << 7) 1279 #define SCR_HCE (1U << 8) 1280 #define SCR_SIF (1U << 9) 1281 #define SCR_RW (1U << 10) 1282 #define SCR_ST (1U << 11) 1283 #define SCR_TWI (1U << 12) 1284 #define SCR_TWE (1U << 13) 1285 #define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST)) 1286 #define SCR_AARCH64_MASK (0x3fff & ~SCR_NET) 1287 1288 /* Return the current FPSCR value. */ 1289 uint32_t vfp_get_fpscr(CPUARMState *env); 1290 void vfp_set_fpscr(CPUARMState *env, uint32_t val); 1291 1292 /* FPCR, Floating Point Control Register 1293 * FPSR, Floating Poiht Status Register 1294 * 1295 * For A64 the FPSCR is split into two logically distinct registers, 1296 * FPCR and FPSR. However since they still use non-overlapping bits 1297 * we store the underlying state in fpscr and just mask on read/write. 1298 */ 1299 #define FPSR_MASK 0xf800009f 1300 #define FPCR_MASK 0x07ff9f00 1301 1302 #define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */ 1303 #define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */ 1304 #define FPCR_DN (1 << 25) /* Default NaN enable bit */ 1305 1306 static inline uint32_t vfp_get_fpsr(CPUARMState *env) 1307 { 1308 return vfp_get_fpscr(env) & FPSR_MASK; 1309 } 1310 1311 static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val) 1312 { 1313 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK); 1314 vfp_set_fpscr(env, new_fpscr); 1315 } 1316 1317 static inline uint32_t vfp_get_fpcr(CPUARMState *env) 1318 { 1319 return vfp_get_fpscr(env) & FPCR_MASK; 1320 } 1321 1322 static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val) 1323 { 1324 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK); 1325 vfp_set_fpscr(env, new_fpscr); 1326 } 1327 1328 enum arm_cpu_mode { 1329 ARM_CPU_MODE_USR = 0x10, 1330 ARM_CPU_MODE_FIQ = 0x11, 1331 ARM_CPU_MODE_IRQ = 0x12, 1332 ARM_CPU_MODE_SVC = 0x13, 1333 ARM_CPU_MODE_MON = 0x16, 1334 ARM_CPU_MODE_ABT = 0x17, 1335 ARM_CPU_MODE_HYP = 0x1a, 1336 ARM_CPU_MODE_UND = 0x1b, 1337 ARM_CPU_MODE_SYS = 0x1f 1338 }; 1339 1340 /* VFP system registers. */ 1341 #define ARM_VFP_FPSID 0 1342 #define ARM_VFP_FPSCR 1 1343 #define ARM_VFP_MVFR2 5 1344 #define ARM_VFP_MVFR1 6 1345 #define ARM_VFP_MVFR0 7 1346 #define ARM_VFP_FPEXC 8 1347 #define ARM_VFP_FPINST 9 1348 #define ARM_VFP_FPINST2 10 1349 1350 /* iwMMXt coprocessor control registers. */ 1351 #define ARM_IWMMXT_wCID 0 1352 #define ARM_IWMMXT_wCon 1 1353 #define ARM_IWMMXT_wCSSF 2 1354 #define ARM_IWMMXT_wCASF 3 1355 #define ARM_IWMMXT_wCGR0 8 1356 #define ARM_IWMMXT_wCGR1 9 1357 #define ARM_IWMMXT_wCGR2 10 1358 #define ARM_IWMMXT_wCGR3 11 1359 1360 /* V7M CCR bits */ 1361 FIELD(V7M_CCR, NONBASETHRDENA, 0, 1) 1362 FIELD(V7M_CCR, USERSETMPEND, 1, 1) 1363 FIELD(V7M_CCR, UNALIGN_TRP, 3, 1) 1364 FIELD(V7M_CCR, DIV_0_TRP, 4, 1) 1365 FIELD(V7M_CCR, BFHFNMIGN, 8, 1) 1366 FIELD(V7M_CCR, STKALIGN, 9, 1) 1367 FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1) 1368 FIELD(V7M_CCR, DC, 16, 1) 1369 FIELD(V7M_CCR, IC, 17, 1) 1370 FIELD(V7M_CCR, BP, 18, 1) 1371 1372 /* V7M SCR bits */ 1373 FIELD(V7M_SCR, SLEEPONEXIT, 1, 1) 1374 FIELD(V7M_SCR, SLEEPDEEP, 2, 1) 1375 FIELD(V7M_SCR, SLEEPDEEPS, 3, 1) 1376 FIELD(V7M_SCR, SEVONPEND, 4, 1) 1377 1378 /* V7M AIRCR bits */ 1379 FIELD(V7M_AIRCR, VECTRESET, 0, 1) 1380 FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1) 1381 FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1) 1382 FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1) 1383 FIELD(V7M_AIRCR, PRIGROUP, 8, 3) 1384 FIELD(V7M_AIRCR, BFHFNMINS, 13, 1) 1385 FIELD(V7M_AIRCR, PRIS, 14, 1) 1386 FIELD(V7M_AIRCR, ENDIANNESS, 15, 1) 1387 FIELD(V7M_AIRCR, VECTKEY, 16, 16) 1388 1389 /* V7M CFSR bits for MMFSR */ 1390 FIELD(V7M_CFSR, IACCVIOL, 0, 1) 1391 FIELD(V7M_CFSR, DACCVIOL, 1, 1) 1392 FIELD(V7M_CFSR, MUNSTKERR, 3, 1) 1393 FIELD(V7M_CFSR, MSTKERR, 4, 1) 1394 FIELD(V7M_CFSR, MLSPERR, 5, 1) 1395 FIELD(V7M_CFSR, MMARVALID, 7, 1) 1396 1397 /* V7M CFSR bits for BFSR */ 1398 FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1) 1399 FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1) 1400 FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1) 1401 FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1) 1402 FIELD(V7M_CFSR, STKERR, 8 + 4, 1) 1403 FIELD(V7M_CFSR, LSPERR, 8 + 5, 1) 1404 FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1) 1405 1406 /* V7M CFSR bits for UFSR */ 1407 FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1) 1408 FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1) 1409 FIELD(V7M_CFSR, INVPC, 16 + 2, 1) 1410 FIELD(V7M_CFSR, NOCP, 16 + 3, 1) 1411 FIELD(V7M_CFSR, STKOF, 16 + 4, 1) 1412 FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1) 1413 FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1) 1414 1415 /* V7M CFSR bit masks covering all of the subregister bits */ 1416 FIELD(V7M_CFSR, MMFSR, 0, 8) 1417 FIELD(V7M_CFSR, BFSR, 8, 8) 1418 FIELD(V7M_CFSR, UFSR, 16, 16) 1419 1420 /* V7M HFSR bits */ 1421 FIELD(V7M_HFSR, VECTTBL, 1, 1) 1422 FIELD(V7M_HFSR, FORCED, 30, 1) 1423 FIELD(V7M_HFSR, DEBUGEVT, 31, 1) 1424 1425 /* V7M DFSR bits */ 1426 FIELD(V7M_DFSR, HALTED, 0, 1) 1427 FIELD(V7M_DFSR, BKPT, 1, 1) 1428 FIELD(V7M_DFSR, DWTTRAP, 2, 1) 1429 FIELD(V7M_DFSR, VCATCH, 3, 1) 1430 FIELD(V7M_DFSR, EXTERNAL, 4, 1) 1431 1432 /* V7M SFSR bits */ 1433 FIELD(V7M_SFSR, INVEP, 0, 1) 1434 FIELD(V7M_SFSR, INVIS, 1, 1) 1435 FIELD(V7M_SFSR, INVER, 2, 1) 1436 FIELD(V7M_SFSR, AUVIOL, 3, 1) 1437 FIELD(V7M_SFSR, INVTRAN, 4, 1) 1438 FIELD(V7M_SFSR, LSPERR, 5, 1) 1439 FIELD(V7M_SFSR, SFARVALID, 6, 1) 1440 FIELD(V7M_SFSR, LSERR, 7, 1) 1441 1442 /* v7M MPU_CTRL bits */ 1443 FIELD(V7M_MPU_CTRL, ENABLE, 0, 1) 1444 FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1) 1445 FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1) 1446 1447 /* v7M CLIDR bits */ 1448 FIELD(V7M_CLIDR, CTYPE_ALL, 0, 21) 1449 FIELD(V7M_CLIDR, LOUIS, 21, 3) 1450 FIELD(V7M_CLIDR, LOC, 24, 3) 1451 FIELD(V7M_CLIDR, LOUU, 27, 3) 1452 FIELD(V7M_CLIDR, ICB, 30, 2) 1453 1454 FIELD(V7M_CSSELR, IND, 0, 1) 1455 FIELD(V7M_CSSELR, LEVEL, 1, 3) 1456 /* We use the combination of InD and Level to index into cpu->ccsidr[]; 1457 * define a mask for this and check that it doesn't permit running off 1458 * the end of the array. 1459 */ 1460 FIELD(V7M_CSSELR, INDEX, 0, 4) 1461 1462 /* 1463 * System register ID fields. 1464 */ 1465 FIELD(ID_ISAR0, SWAP, 0, 4) 1466 FIELD(ID_ISAR0, BITCOUNT, 4, 4) 1467 FIELD(ID_ISAR0, BITFIELD, 8, 4) 1468 FIELD(ID_ISAR0, CMPBRANCH, 12, 4) 1469 FIELD(ID_ISAR0, COPROC, 16, 4) 1470 FIELD(ID_ISAR0, DEBUG, 20, 4) 1471 FIELD(ID_ISAR0, DIVIDE, 24, 4) 1472 1473 FIELD(ID_ISAR1, ENDIAN, 0, 4) 1474 FIELD(ID_ISAR1, EXCEPT, 4, 4) 1475 FIELD(ID_ISAR1, EXCEPT_AR, 8, 4) 1476 FIELD(ID_ISAR1, EXTEND, 12, 4) 1477 FIELD(ID_ISAR1, IFTHEN, 16, 4) 1478 FIELD(ID_ISAR1, IMMEDIATE, 20, 4) 1479 FIELD(ID_ISAR1, INTERWORK, 24, 4) 1480 FIELD(ID_ISAR1, JAZELLE, 28, 4) 1481 1482 FIELD(ID_ISAR2, LOADSTORE, 0, 4) 1483 FIELD(ID_ISAR2, MEMHINT, 4, 4) 1484 FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4) 1485 FIELD(ID_ISAR2, MULT, 12, 4) 1486 FIELD(ID_ISAR2, MULTS, 16, 4) 1487 FIELD(ID_ISAR2, MULTU, 20, 4) 1488 FIELD(ID_ISAR2, PSR_AR, 24, 4) 1489 FIELD(ID_ISAR2, REVERSAL, 28, 4) 1490 1491 FIELD(ID_ISAR3, SATURATE, 0, 4) 1492 FIELD(ID_ISAR3, SIMD, 4, 4) 1493 FIELD(ID_ISAR3, SVC, 8, 4) 1494 FIELD(ID_ISAR3, SYNCHPRIM, 12, 4) 1495 FIELD(ID_ISAR3, TABBRANCH, 16, 4) 1496 FIELD(ID_ISAR3, T32COPY, 20, 4) 1497 FIELD(ID_ISAR3, TRUENOP, 24, 4) 1498 FIELD(ID_ISAR3, T32EE, 28, 4) 1499 1500 FIELD(ID_ISAR4, UNPRIV, 0, 4) 1501 FIELD(ID_ISAR4, WITHSHIFTS, 4, 4) 1502 FIELD(ID_ISAR4, WRITEBACK, 8, 4) 1503 FIELD(ID_ISAR4, SMC, 12, 4) 1504 FIELD(ID_ISAR4, BARRIER, 16, 4) 1505 FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4) 1506 FIELD(ID_ISAR4, PSR_M, 24, 4) 1507 FIELD(ID_ISAR4, SWP_FRAC, 28, 4) 1508 1509 FIELD(ID_ISAR5, SEVL, 0, 4) 1510 FIELD(ID_ISAR5, AES, 4, 4) 1511 FIELD(ID_ISAR5, SHA1, 8, 4) 1512 FIELD(ID_ISAR5, SHA2, 12, 4) 1513 FIELD(ID_ISAR5, CRC32, 16, 4) 1514 FIELD(ID_ISAR5, RDM, 24, 4) 1515 FIELD(ID_ISAR5, VCMA, 28, 4) 1516 1517 FIELD(ID_ISAR6, JSCVT, 0, 4) 1518 FIELD(ID_ISAR6, DP, 4, 4) 1519 FIELD(ID_ISAR6, FHM, 8, 4) 1520 FIELD(ID_ISAR6, SB, 12, 4) 1521 FIELD(ID_ISAR6, SPECRES, 16, 4) 1522 1523 FIELD(ID_AA64ISAR0, AES, 4, 4) 1524 FIELD(ID_AA64ISAR0, SHA1, 8, 4) 1525 FIELD(ID_AA64ISAR0, SHA2, 12, 4) 1526 FIELD(ID_AA64ISAR0, CRC32, 16, 4) 1527 FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) 1528 FIELD(ID_AA64ISAR0, RDM, 28, 4) 1529 FIELD(ID_AA64ISAR0, SHA3, 32, 4) 1530 FIELD(ID_AA64ISAR0, SM3, 36, 4) 1531 FIELD(ID_AA64ISAR0, SM4, 40, 4) 1532 FIELD(ID_AA64ISAR0, DP, 44, 4) 1533 FIELD(ID_AA64ISAR0, FHM, 48, 4) 1534 FIELD(ID_AA64ISAR0, TS, 52, 4) 1535 FIELD(ID_AA64ISAR0, TLB, 56, 4) 1536 FIELD(ID_AA64ISAR0, RNDR, 60, 4) 1537 1538 FIELD(ID_AA64ISAR1, DPB, 0, 4) 1539 FIELD(ID_AA64ISAR1, APA, 4, 4) 1540 FIELD(ID_AA64ISAR1, API, 8, 4) 1541 FIELD(ID_AA64ISAR1, JSCVT, 12, 4) 1542 FIELD(ID_AA64ISAR1, FCMA, 16, 4) 1543 FIELD(ID_AA64ISAR1, LRCPC, 20, 4) 1544 FIELD(ID_AA64ISAR1, GPA, 24, 4) 1545 FIELD(ID_AA64ISAR1, GPI, 28, 4) 1546 FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) 1547 FIELD(ID_AA64ISAR1, SB, 36, 4) 1548 FIELD(ID_AA64ISAR1, SPECRES, 40, 4) 1549 1550 FIELD(ID_AA64PFR0, EL0, 0, 4) 1551 FIELD(ID_AA64PFR0, EL1, 4, 4) 1552 FIELD(ID_AA64PFR0, EL2, 8, 4) 1553 FIELD(ID_AA64PFR0, EL3, 12, 4) 1554 FIELD(ID_AA64PFR0, FP, 16, 4) 1555 FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) 1556 FIELD(ID_AA64PFR0, GIC, 24, 4) 1557 FIELD(ID_AA64PFR0, RAS, 28, 4) 1558 FIELD(ID_AA64PFR0, SVE, 32, 4) 1559 1560 QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK); 1561 1562 /* If adding a feature bit which corresponds to a Linux ELF 1563 * HWCAP bit, remember to update the feature-bit-to-hwcap 1564 * mapping in linux-user/elfload.c:get_elf_hwcap(). 1565 */ 1566 enum arm_features { 1567 ARM_FEATURE_VFP, 1568 ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ 1569 ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ 1570 ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ 1571 ARM_FEATURE_V6, 1572 ARM_FEATURE_V6K, 1573 ARM_FEATURE_V7, 1574 ARM_FEATURE_THUMB2, 1575 ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */ 1576 ARM_FEATURE_VFP3, 1577 ARM_FEATURE_VFP_FP16, 1578 ARM_FEATURE_NEON, 1579 ARM_FEATURE_M, /* Microcontroller profile. */ 1580 ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ 1581 ARM_FEATURE_THUMB2EE, 1582 ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ 1583 ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */ 1584 ARM_FEATURE_V4T, 1585 ARM_FEATURE_V5, 1586 ARM_FEATURE_STRONGARM, 1587 ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ 1588 ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ 1589 ARM_FEATURE_GENERIC_TIMER, 1590 ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ 1591 ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ 1592 ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ 1593 ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ 1594 ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ 1595 ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ 1596 ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ 1597 ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ 1598 ARM_FEATURE_V8, 1599 ARM_FEATURE_AARCH64, /* supports 64 bit mode */ 1600 ARM_FEATURE_CBAR, /* has cp15 CBAR */ 1601 ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ 1602 ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ 1603 ARM_FEATURE_EL2, /* has EL2 Virtualization support */ 1604 ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ 1605 ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ 1606 ARM_FEATURE_PMU, /* has PMU support */ 1607 ARM_FEATURE_VBAR, /* has cp15 VBAR */ 1608 ARM_FEATURE_M_SECURITY, /* M profile Security Extension */ 1609 ARM_FEATURE_M_MAIN, /* M profile Main Extension */ 1610 }; 1611 1612 static inline int arm_feature(CPUARMState *env, int feature) 1613 { 1614 return (env->features & (1ULL << feature)) != 0; 1615 } 1616 1617 #if !defined(CONFIG_USER_ONLY) 1618 /* Return true if exception levels below EL3 are in secure state, 1619 * or would be following an exception return to that level. 1620 * Unlike arm_is_secure() (which is always a question about the 1621 * _current_ state of the CPU) this doesn't care about the current 1622 * EL or mode. 1623 */ 1624 static inline bool arm_is_secure_below_el3(CPUARMState *env) 1625 { 1626 if (arm_feature(env, ARM_FEATURE_EL3)) { 1627 return !(env->cp15.scr_el3 & SCR_NS); 1628 } else { 1629 /* If EL3 is not supported then the secure state is implementation 1630 * defined, in which case QEMU defaults to non-secure. 1631 */ 1632 return false; 1633 } 1634 } 1635 1636 /* Return true if the CPU is AArch64 EL3 or AArch32 Mon */ 1637 static inline bool arm_is_el3_or_mon(CPUARMState *env) 1638 { 1639 if (arm_feature(env, ARM_FEATURE_EL3)) { 1640 if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { 1641 /* CPU currently in AArch64 state and EL3 */ 1642 return true; 1643 } else if (!is_a64(env) && 1644 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 1645 /* CPU currently in AArch32 state and monitor mode */ 1646 return true; 1647 } 1648 } 1649 return false; 1650 } 1651 1652 /* Return true if the processor is in secure state */ 1653 static inline bool arm_is_secure(CPUARMState *env) 1654 { 1655 if (arm_is_el3_or_mon(env)) { 1656 return true; 1657 } 1658 return arm_is_secure_below_el3(env); 1659 } 1660 1661 #else 1662 static inline bool arm_is_secure_below_el3(CPUARMState *env) 1663 { 1664 return false; 1665 } 1666 1667 static inline bool arm_is_secure(CPUARMState *env) 1668 { 1669 return false; 1670 } 1671 #endif 1672 1673 /* Return true if the specified exception level is running in AArch64 state. */ 1674 static inline bool arm_el_is_aa64(CPUARMState *env, int el) 1675 { 1676 /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, 1677 * and if we're not in EL0 then the state of EL0 isn't well defined.) 1678 */ 1679 assert(el >= 1 && el <= 3); 1680 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); 1681 1682 /* The highest exception level is always at the maximum supported 1683 * register width, and then lower levels have a register width controlled 1684 * by bits in the SCR or HCR registers. 1685 */ 1686 if (el == 3) { 1687 return aa64; 1688 } 1689 1690 if (arm_feature(env, ARM_FEATURE_EL3)) { 1691 aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); 1692 } 1693 1694 if (el == 2) { 1695 return aa64; 1696 } 1697 1698 if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) { 1699 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); 1700 } 1701 1702 return aa64; 1703 } 1704 1705 /* Function for determing whether guest cp register reads and writes should 1706 * access the secure or non-secure bank of a cp register. When EL3 is 1707 * operating in AArch32 state, the NS-bit determines whether the secure 1708 * instance of a cp register should be used. When EL3 is AArch64 (or if 1709 * it doesn't exist at all) then there is no register banking, and all 1710 * accesses are to the non-secure version. 1711 */ 1712 static inline bool access_secure_reg(CPUARMState *env) 1713 { 1714 bool ret = (arm_feature(env, ARM_FEATURE_EL3) && 1715 !arm_el_is_aa64(env, 3) && 1716 !(env->cp15.scr_el3 & SCR_NS)); 1717 1718 return ret; 1719 } 1720 1721 /* Macros for accessing a specified CP register bank */ 1722 #define A32_BANKED_REG_GET(_env, _regname, _secure) \ 1723 ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) 1724 1725 #define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ 1726 do { \ 1727 if (_secure) { \ 1728 (_env)->cp15._regname##_s = (_val); \ 1729 } else { \ 1730 (_env)->cp15._regname##_ns = (_val); \ 1731 } \ 1732 } while (0) 1733 1734 /* Macros for automatically accessing a specific CP register bank depending on 1735 * the current secure state of the system. These macros are not intended for 1736 * supporting instruction translation reads/writes as these are dependent 1737 * solely on the SCR.NS bit and not the mode. 1738 */ 1739 #define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ 1740 A32_BANKED_REG_GET((_env), _regname, \ 1741 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) 1742 1743 #define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ 1744 A32_BANKED_REG_SET((_env), _regname, \ 1745 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ 1746 (_val)) 1747 1748 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf); 1749 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 1750 uint32_t cur_el, bool secure); 1751 1752 /* Interface between CPU and Interrupt controller. */ 1753 #ifndef CONFIG_USER_ONLY 1754 bool armv7m_nvic_can_take_pending_exception(void *opaque); 1755 #else 1756 static inline bool armv7m_nvic_can_take_pending_exception(void *opaque) 1757 { 1758 return true; 1759 } 1760 #endif 1761 /** 1762 * armv7m_nvic_set_pending: mark the specified exception as pending 1763 * @opaque: the NVIC 1764 * @irq: the exception number to mark pending 1765 * @secure: false for non-banked exceptions or for the nonsecure 1766 * version of a banked exception, true for the secure version of a banked 1767 * exception. 1768 * 1769 * Marks the specified exception as pending. Note that we will assert() 1770 * if @secure is true and @irq does not specify one of the fixed set 1771 * of architecturally banked exceptions. 1772 */ 1773 void armv7m_nvic_set_pending(void *opaque, int irq, bool secure); 1774 /** 1775 * armv7m_nvic_set_pending_derived: mark this derived exception as pending 1776 * @opaque: the NVIC 1777 * @irq: the exception number to mark pending 1778 * @secure: false for non-banked exceptions or for the nonsecure 1779 * version of a banked exception, true for the secure version of a banked 1780 * exception. 1781 * 1782 * Similar to armv7m_nvic_set_pending(), but specifically for derived 1783 * exceptions (exceptions generated in the course of trying to take 1784 * a different exception). 1785 */ 1786 void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure); 1787 /** 1788 * armv7m_nvic_get_pending_irq_info: return highest priority pending 1789 * exception, and whether it targets Secure state 1790 * @opaque: the NVIC 1791 * @pirq: set to pending exception number 1792 * @ptargets_secure: set to whether pending exception targets Secure 1793 * 1794 * This function writes the number of the highest priority pending 1795 * exception (the one which would be made active by 1796 * armv7m_nvic_acknowledge_irq()) to @pirq, and sets @ptargets_secure 1797 * to true if the current highest priority pending exception should 1798 * be taken to Secure state, false for NS. 1799 */ 1800 void armv7m_nvic_get_pending_irq_info(void *opaque, int *pirq, 1801 bool *ptargets_secure); 1802 /** 1803 * armv7m_nvic_acknowledge_irq: make highest priority pending exception active 1804 * @opaque: the NVIC 1805 * 1806 * Move the current highest priority pending exception from the pending 1807 * state to the active state, and update v7m.exception to indicate that 1808 * it is the exception currently being handled. 1809 */ 1810 void armv7m_nvic_acknowledge_irq(void *opaque); 1811 /** 1812 * armv7m_nvic_complete_irq: complete specified interrupt or exception 1813 * @opaque: the NVIC 1814 * @irq: the exception number to complete 1815 * @secure: true if this exception was secure 1816 * 1817 * Returns: -1 if the irq was not active 1818 * 1 if completing this irq brought us back to base (no active irqs) 1819 * 0 if there is still an irq active after this one was completed 1820 * (Ignoring -1, this is the same as the RETTOBASE value before completion.) 1821 */ 1822 int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure); 1823 /** 1824 * armv7m_nvic_raw_execution_priority: return the raw execution priority 1825 * @opaque: the NVIC 1826 * 1827 * Returns: the raw execution priority as defined by the v8M architecture. 1828 * This is the execution priority minus the effects of AIRCR.PRIS, 1829 * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting. 1830 * (v8M ARM ARM I_PKLD.) 1831 */ 1832 int armv7m_nvic_raw_execution_priority(void *opaque); 1833 /** 1834 * armv7m_nvic_neg_prio_requested: return true if the requested execution 1835 * priority is negative for the specified security state. 1836 * @opaque: the NVIC 1837 * @secure: the security state to test 1838 * This corresponds to the pseudocode IsReqExecPriNeg(). 1839 */ 1840 #ifndef CONFIG_USER_ONLY 1841 bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure); 1842 #else 1843 static inline bool armv7m_nvic_neg_prio_requested(void *opaque, bool secure) 1844 { 1845 return false; 1846 } 1847 #endif 1848 1849 /* Interface for defining coprocessor registers. 1850 * Registers are defined in tables of arm_cp_reginfo structs 1851 * which are passed to define_arm_cp_regs(). 1852 */ 1853 1854 /* When looking up a coprocessor register we look for it 1855 * via an integer which encodes all of: 1856 * coprocessor number 1857 * Crn, Crm, opc1, opc2 fields 1858 * 32 or 64 bit register (ie is it accessed via MRC/MCR 1859 * or via MRRC/MCRR?) 1860 * non-secure/secure bank (AArch32 only) 1861 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field. 1862 * (In this case crn and opc2 should be zero.) 1863 * For AArch64, there is no 32/64 bit size distinction; 1864 * instead all registers have a 2 bit op0, 3 bit op1 and op2, 1865 * and 4 bit CRn and CRm. The encoding patterns are chosen 1866 * to be easy to convert to and from the KVM encodings, and also 1867 * so that the hashtable can contain both AArch32 and AArch64 1868 * registers (to allow for interprocessing where we might run 1869 * 32 bit code on a 64 bit core). 1870 */ 1871 /* This bit is private to our hashtable cpreg; in KVM register 1872 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64 1873 * in the upper bits of the 64 bit ID. 1874 */ 1875 #define CP_REG_AA64_SHIFT 28 1876 #define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT) 1877 1878 /* To enable banking of coprocessor registers depending on ns-bit we 1879 * add a bit to distinguish between secure and non-secure cpregs in the 1880 * hashtable. 1881 */ 1882 #define CP_REG_NS_SHIFT 29 1883 #define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT) 1884 1885 #define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \ 1886 ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \ 1887 ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2)) 1888 1889 #define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \ 1890 (CP_REG_AA64_MASK | \ 1891 ((cp) << CP_REG_ARM_COPROC_SHIFT) | \ 1892 ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ 1893 ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ 1894 ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ 1895 ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \ 1896 ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT)) 1897 1898 /* Convert a full 64 bit KVM register ID to the truncated 32 bit 1899 * version used as a key for the coprocessor register hashtable 1900 */ 1901 static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid) 1902 { 1903 uint32_t cpregid = kvmid; 1904 if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) { 1905 cpregid |= CP_REG_AA64_MASK; 1906 } else { 1907 if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) { 1908 cpregid |= (1 << 15); 1909 } 1910 1911 /* KVM is always non-secure so add the NS flag on AArch32 register 1912 * entries. 1913 */ 1914 cpregid |= 1 << CP_REG_NS_SHIFT; 1915 } 1916 return cpregid; 1917 } 1918 1919 /* Convert a truncated 32 bit hashtable key into the full 1920 * 64 bit KVM register ID. 1921 */ 1922 static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) 1923 { 1924 uint64_t kvmid; 1925 1926 if (cpregid & CP_REG_AA64_MASK) { 1927 kvmid = cpregid & ~CP_REG_AA64_MASK; 1928 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64; 1929 } else { 1930 kvmid = cpregid & ~(1 << 15); 1931 if (cpregid & (1 << 15)) { 1932 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM; 1933 } else { 1934 kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM; 1935 } 1936 } 1937 return kvmid; 1938 } 1939 1940 /* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a 1941 * special-behaviour cp reg and bits [11..8] indicate what behaviour 1942 * it has. Otherwise it is a simple cp reg, where CONST indicates that 1943 * TCG can assume the value to be constant (ie load at translate time) 1944 * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END 1945 * indicates that the TB should not be ended after a write to this register 1946 * (the default is that the TB ends after cp writes). OVERRIDE permits 1947 * a register definition to override a previous definition for the 1948 * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the 1949 * old must have the OVERRIDE bit set. 1950 * ALIAS indicates that this register is an alias view of some underlying 1951 * state which is also visible via another register, and that the other 1952 * register is handling migration and reset; registers marked ALIAS will not be 1953 * migrated but may have their state set by syncing of register state from KVM. 1954 * NO_RAW indicates that this register has no underlying state and does not 1955 * support raw access for state saving/loading; it will not be used for either 1956 * migration or KVM state synchronization. (Typically this is for "registers" 1957 * which are actually used as instructions for cache maintenance and so on.) 1958 * IO indicates that this register does I/O and therefore its accesses 1959 * need to be surrounded by gen_io_start()/gen_io_end(). In particular, 1960 * registers which implement clocks or timers require this. 1961 */ 1962 #define ARM_CP_SPECIAL 0x0001 1963 #define ARM_CP_CONST 0x0002 1964 #define ARM_CP_64BIT 0x0004 1965 #define ARM_CP_SUPPRESS_TB_END 0x0008 1966 #define ARM_CP_OVERRIDE 0x0010 1967 #define ARM_CP_ALIAS 0x0020 1968 #define ARM_CP_IO 0x0040 1969 #define ARM_CP_NO_RAW 0x0080 1970 #define ARM_CP_NOP (ARM_CP_SPECIAL | 0x0100) 1971 #define ARM_CP_WFI (ARM_CP_SPECIAL | 0x0200) 1972 #define ARM_CP_NZCV (ARM_CP_SPECIAL | 0x0300) 1973 #define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | 0x0400) 1974 #define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | 0x0500) 1975 #define ARM_LAST_SPECIAL ARM_CP_DC_ZVA 1976 #define ARM_CP_FPU 0x1000 1977 #define ARM_CP_SVE 0x2000 1978 #define ARM_CP_NO_GDB 0x4000 1979 /* Used only as a terminator for ARMCPRegInfo lists */ 1980 #define ARM_CP_SENTINEL 0xffff 1981 /* Mask of only the flag bits in a type field */ 1982 #define ARM_CP_FLAG_MASK 0x70ff 1983 1984 /* Valid values for ARMCPRegInfo state field, indicating which of 1985 * the AArch32 and AArch64 execution states this register is visible in. 1986 * If the reginfo doesn't explicitly specify then it is AArch32 only. 1987 * If the reginfo is declared to be visible in both states then a second 1988 * reginfo is synthesised for the AArch32 view of the AArch64 register, 1989 * such that the AArch32 view is the lower 32 bits of the AArch64 one. 1990 * Note that we rely on the values of these enums as we iterate through 1991 * the various states in some places. 1992 */ 1993 enum { 1994 ARM_CP_STATE_AA32 = 0, 1995 ARM_CP_STATE_AA64 = 1, 1996 ARM_CP_STATE_BOTH = 2, 1997 }; 1998 1999 /* ARM CP register secure state flags. These flags identify security state 2000 * attributes for a given CP register entry. 2001 * The existence of both or neither secure and non-secure flags indicates that 2002 * the register has both a secure and non-secure hash entry. A single one of 2003 * these flags causes the register to only be hashed for the specified 2004 * security state. 2005 * Although definitions may have any combination of the S/NS bits, each 2006 * registered entry will only have one to identify whether the entry is secure 2007 * or non-secure. 2008 */ 2009 enum { 2010 ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */ 2011 ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */ 2012 }; 2013 2014 /* Return true if cptype is a valid type field. This is used to try to 2015 * catch errors where the sentinel has been accidentally left off the end 2016 * of a list of registers. 2017 */ 2018 static inline bool cptype_valid(int cptype) 2019 { 2020 return ((cptype & ~ARM_CP_FLAG_MASK) == 0) 2021 || ((cptype & ARM_CP_SPECIAL) && 2022 ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL)); 2023 } 2024 2025 /* Access rights: 2026 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM 2027 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and 2028 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 2029 * (ie any of the privileged modes in Secure state, or Monitor mode). 2030 * If a register is accessible in one privilege level it's always accessible 2031 * in higher privilege levels too. Since "Secure PL1" also follows this rule 2032 * (ie anything visible in PL2 is visible in S-PL1, some things are only 2033 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the 2034 * terminology a little and call this PL3. 2035 * In AArch64 things are somewhat simpler as the PLx bits line up exactly 2036 * with the ELx exception levels. 2037 * 2038 * If access permissions for a register are more complex than can be 2039 * described with these bits, then use a laxer set of restrictions, and 2040 * do the more restrictive/complex check inside a helper function. 2041 */ 2042 #define PL3_R 0x80 2043 #define PL3_W 0x40 2044 #define PL2_R (0x20 | PL3_R) 2045 #define PL2_W (0x10 | PL3_W) 2046 #define PL1_R (0x08 | PL2_R) 2047 #define PL1_W (0x04 | PL2_W) 2048 #define PL0_R (0x02 | PL1_R) 2049 #define PL0_W (0x01 | PL1_W) 2050 2051 #define PL3_RW (PL3_R | PL3_W) 2052 #define PL2_RW (PL2_R | PL2_W) 2053 #define PL1_RW (PL1_R | PL1_W) 2054 #define PL0_RW (PL0_R | PL0_W) 2055 2056 /* Return the highest implemented Exception Level */ 2057 static inline int arm_highest_el(CPUARMState *env) 2058 { 2059 if (arm_feature(env, ARM_FEATURE_EL3)) { 2060 return 3; 2061 } 2062 if (arm_feature(env, ARM_FEATURE_EL2)) { 2063 return 2; 2064 } 2065 return 1; 2066 } 2067 2068 /* Return true if a v7M CPU is in Handler mode */ 2069 static inline bool arm_v7m_is_handler_mode(CPUARMState *env) 2070 { 2071 return env->v7m.exception != 0; 2072 } 2073 2074 /* Return the current Exception Level (as per ARMv8; note that this differs 2075 * from the ARMv7 Privilege Level). 2076 */ 2077 static inline int arm_current_el(CPUARMState *env) 2078 { 2079 if (arm_feature(env, ARM_FEATURE_M)) { 2080 return arm_v7m_is_handler_mode(env) || 2081 !(env->v7m.control[env->v7m.secure] & 1); 2082 } 2083 2084 if (is_a64(env)) { 2085 return extract32(env->pstate, 2, 2); 2086 } 2087 2088 switch (env->uncached_cpsr & 0x1f) { 2089 case ARM_CPU_MODE_USR: 2090 return 0; 2091 case ARM_CPU_MODE_HYP: 2092 return 2; 2093 case ARM_CPU_MODE_MON: 2094 return 3; 2095 default: 2096 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 2097 /* If EL3 is 32-bit then all secure privileged modes run in 2098 * EL3 2099 */ 2100 return 3; 2101 } 2102 2103 return 1; 2104 } 2105 } 2106 2107 typedef struct ARMCPRegInfo ARMCPRegInfo; 2108 2109 typedef enum CPAccessResult { 2110 /* Access is permitted */ 2111 CP_ACCESS_OK = 0, 2112 /* Access fails due to a configurable trap or enable which would 2113 * result in a categorized exception syndrome giving information about 2114 * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, 2115 * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or 2116 * PL1 if in EL0, otherwise to the current EL). 2117 */ 2118 CP_ACCESS_TRAP = 1, 2119 /* Access fails and results in an exception syndrome 0x0 ("uncategorized"). 2120 * Note that this is not a catch-all case -- the set of cases which may 2121 * result in this failure is specifically defined by the architecture. 2122 */ 2123 CP_ACCESS_TRAP_UNCATEGORIZED = 2, 2124 /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */ 2125 CP_ACCESS_TRAP_EL2 = 3, 2126 CP_ACCESS_TRAP_EL3 = 4, 2127 /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */ 2128 CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5, 2129 CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6, 2130 /* Access fails and results in an exception syndrome for an FP access, 2131 * trapped directly to EL2 or EL3 2132 */ 2133 CP_ACCESS_TRAP_FP_EL2 = 7, 2134 CP_ACCESS_TRAP_FP_EL3 = 8, 2135 } CPAccessResult; 2136 2137 /* Access functions for coprocessor registers. These cannot fail and 2138 * may not raise exceptions. 2139 */ 2140 typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); 2141 typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, 2142 uint64_t value); 2143 /* Access permission check functions for coprocessor registers. */ 2144 typedef CPAccessResult CPAccessFn(CPUARMState *env, 2145 const ARMCPRegInfo *opaque, 2146 bool isread); 2147 /* Hook function for register reset */ 2148 typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); 2149 2150 #define CP_ANY 0xff 2151 2152 /* Definition of an ARM coprocessor register */ 2153 struct ARMCPRegInfo { 2154 /* Name of register (useful mainly for debugging, need not be unique) */ 2155 const char *name; 2156 /* Location of register: coprocessor number and (crn,crm,opc1,opc2) 2157 * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a 2158 * 'wildcard' field -- any value of that field in the MRC/MCR insn 2159 * will be decoded to this register. The register read and write 2160 * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 2161 * used by the program, so it is possible to register a wildcard and 2162 * then behave differently on read/write if necessary. 2163 * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 2164 * must both be zero. 2165 * For AArch64-visible registers, opc0 is also used. 2166 * Since there are no "coprocessors" in AArch64, cp is purely used as a 2167 * way to distinguish (for KVM's benefit) guest-visible system registers 2168 * from demuxed ones provided to preserve the "no side effects on 2169 * KVM register read/write from QEMU" semantics. cp==0x13 is guest 2170 * visible (to match KVM's encoding); cp==0 will be converted to 2171 * cp==0x13 when the ARMCPRegInfo is registered, for convenience. 2172 */ 2173 uint8_t cp; 2174 uint8_t crn; 2175 uint8_t crm; 2176 uint8_t opc0; 2177 uint8_t opc1; 2178 uint8_t opc2; 2179 /* Execution state in which this register is visible: ARM_CP_STATE_* */ 2180 int state; 2181 /* Register type: ARM_CP_* bits/values */ 2182 int type; 2183 /* Access rights: PL*_[RW] */ 2184 int access; 2185 /* Security state: ARM_CP_SECSTATE_* bits/values */ 2186 int secure; 2187 /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when 2188 * this register was defined: can be used to hand data through to the 2189 * register read/write functions, since they are passed the ARMCPRegInfo*. 2190 */ 2191 void *opaque; 2192 /* Value of this register, if it is ARM_CP_CONST. Otherwise, if 2193 * fieldoffset is non-zero, the reset value of the register. 2194 */ 2195 uint64_t resetvalue; 2196 /* Offset of the field in CPUARMState for this register. 2197 * 2198 * This is not needed if either: 2199 * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs 2200 * 2. both readfn and writefn are specified 2201 */ 2202 ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ 2203 2204 /* Offsets of the secure and non-secure fields in CPUARMState for the 2205 * register if it is banked. These fields are only used during the static 2206 * registration of a register. During hashing the bank associated 2207 * with a given security state is copied to fieldoffset which is used from 2208 * there on out. 2209 * 2210 * It is expected that register definitions use either fieldoffset or 2211 * bank_fieldoffsets in the definition but not both. It is also expected 2212 * that both bank offsets are set when defining a banked register. This 2213 * use indicates that a register is banked. 2214 */ 2215 ptrdiff_t bank_fieldoffsets[2]; 2216 2217 /* Function for making any access checks for this register in addition to 2218 * those specified by the 'access' permissions bits. If NULL, no extra 2219 * checks required. The access check is performed at runtime, not at 2220 * translate time. 2221 */ 2222 CPAccessFn *accessfn; 2223 /* Function for handling reads of this register. If NULL, then reads 2224 * will be done by loading from the offset into CPUARMState specified 2225 * by fieldoffset. 2226 */ 2227 CPReadFn *readfn; 2228 /* Function for handling writes of this register. If NULL, then writes 2229 * will be done by writing to the offset into CPUARMState specified 2230 * by fieldoffset. 2231 */ 2232 CPWriteFn *writefn; 2233 /* Function for doing a "raw" read; used when we need to copy 2234 * coprocessor state to the kernel for KVM or out for 2235 * migration. This only needs to be provided if there is also a 2236 * readfn and it has side effects (for instance clear-on-read bits). 2237 */ 2238 CPReadFn *raw_readfn; 2239 /* Function for doing a "raw" write; used when we need to copy KVM 2240 * kernel coprocessor state into userspace, or for inbound 2241 * migration. This only needs to be provided if there is also a 2242 * writefn and it masks out "unwritable" bits or has write-one-to-clear 2243 * or similar behaviour. 2244 */ 2245 CPWriteFn *raw_writefn; 2246 /* Function for resetting the register. If NULL, then reset will be done 2247 * by writing resetvalue to the field specified in fieldoffset. If 2248 * fieldoffset is 0 then no reset will be done. 2249 */ 2250 CPResetFn *resetfn; 2251 }; 2252 2253 /* Macros which are lvalues for the field in CPUARMState for the 2254 * ARMCPRegInfo *ri. 2255 */ 2256 #define CPREG_FIELD32(env, ri) \ 2257 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) 2258 #define CPREG_FIELD64(env, ri) \ 2259 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) 2260 2261 #define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL } 2262 2263 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 2264 const ARMCPRegInfo *regs, void *opaque); 2265 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 2266 const ARMCPRegInfo *regs, void *opaque); 2267 static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs) 2268 { 2269 define_arm_cp_regs_with_opaque(cpu, regs, 0); 2270 } 2271 static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) 2272 { 2273 define_one_arm_cp_reg_with_opaque(cpu, regs, 0); 2274 } 2275 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); 2276 2277 /* CPWriteFn that can be used to implement writes-ignored behaviour */ 2278 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 2279 uint64_t value); 2280 /* CPReadFn that can be used for read-as-zero behaviour */ 2281 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); 2282 2283 /* CPResetFn that does nothing, for use if no reset is required even 2284 * if fieldoffset is non zero. 2285 */ 2286 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); 2287 2288 /* Return true if this reginfo struct's field in the cpu state struct 2289 * is 64 bits wide. 2290 */ 2291 static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) 2292 { 2293 return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); 2294 } 2295 2296 static inline bool cp_access_ok(int current_el, 2297 const ARMCPRegInfo *ri, int isread) 2298 { 2299 return (ri->access >> ((current_el * 2) + isread)) & 1; 2300 } 2301 2302 /* Raw read of a coprocessor register (as needed for migration, etc) */ 2303 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri); 2304 2305 /** 2306 * write_list_to_cpustate 2307 * @cpu: ARMCPU 2308 * 2309 * For each register listed in the ARMCPU cpreg_indexes list, write 2310 * its value from the cpreg_values list into the ARMCPUState structure. 2311 * This updates TCG's working data structures from KVM data or 2312 * from incoming migration state. 2313 * 2314 * Returns: true if all register values were updated correctly, 2315 * false if some register was unknown or could not be written. 2316 * Note that we do not stop early on failure -- we will attempt 2317 * writing all registers in the list. 2318 */ 2319 bool write_list_to_cpustate(ARMCPU *cpu); 2320 2321 /** 2322 * write_cpustate_to_list: 2323 * @cpu: ARMCPU 2324 * 2325 * For each register listed in the ARMCPU cpreg_indexes list, write 2326 * its value from the ARMCPUState structure into the cpreg_values list. 2327 * This is used to copy info from TCG's working data structures into 2328 * KVM or for outbound migration. 2329 * 2330 * Returns: true if all register values were read correctly, 2331 * false if some register was unknown or could not be read. 2332 * Note that we do not stop early on failure -- we will attempt 2333 * reading all registers in the list. 2334 */ 2335 bool write_cpustate_to_list(ARMCPU *cpu); 2336 2337 #define ARM_CPUID_TI915T 0x54029152 2338 #define ARM_CPUID_TI925T 0x54029252 2339 2340 #if defined(CONFIG_USER_ONLY) 2341 #define TARGET_PAGE_BITS 12 2342 #else 2343 /* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6 2344 * have to support 1K tiny pages. 2345 */ 2346 #define TARGET_PAGE_BITS_VARY 2347 #define TARGET_PAGE_BITS_MIN 10 2348 #endif 2349 2350 #if defined(TARGET_AARCH64) 2351 # define TARGET_PHYS_ADDR_SPACE_BITS 48 2352 # define TARGET_VIRT_ADDR_SPACE_BITS 64 2353 #else 2354 # define TARGET_PHYS_ADDR_SPACE_BITS 40 2355 # define TARGET_VIRT_ADDR_SPACE_BITS 32 2356 #endif 2357 2358 /** 2359 * arm_hcr_el2_imo(): Return the effective value of HCR_EL2.IMO. 2360 * Depending on the values of HCR_EL2.E2H and TGE, this may be 2361 * "behaves as 1 for all purposes other than direct read/write" or 2362 * "behaves as 0 for all purposes other than direct read/write" 2363 */ 2364 static inline bool arm_hcr_el2_imo(CPUARMState *env) 2365 { 2366 switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) { 2367 case HCR_TGE: 2368 return true; 2369 case HCR_TGE | HCR_E2H: 2370 return false; 2371 default: 2372 return env->cp15.hcr_el2 & HCR_IMO; 2373 } 2374 } 2375 2376 /** 2377 * arm_hcr_el2_fmo(): Return the effective value of HCR_EL2.FMO. 2378 */ 2379 static inline bool arm_hcr_el2_fmo(CPUARMState *env) 2380 { 2381 switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) { 2382 case HCR_TGE: 2383 return true; 2384 case HCR_TGE | HCR_E2H: 2385 return false; 2386 default: 2387 return env->cp15.hcr_el2 & HCR_FMO; 2388 } 2389 } 2390 2391 /** 2392 * arm_hcr_el2_amo(): Return the effective value of HCR_EL2.AMO. 2393 */ 2394 static inline bool arm_hcr_el2_amo(CPUARMState *env) 2395 { 2396 switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) { 2397 case HCR_TGE: 2398 return true; 2399 case HCR_TGE | HCR_E2H: 2400 return false; 2401 default: 2402 return env->cp15.hcr_el2 & HCR_AMO; 2403 } 2404 } 2405 2406 static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, 2407 unsigned int target_el) 2408 { 2409 CPUARMState *env = cs->env_ptr; 2410 unsigned int cur_el = arm_current_el(env); 2411 bool secure = arm_is_secure(env); 2412 bool pstate_unmasked; 2413 int8_t unmasked = 0; 2414 2415 /* Don't take exceptions if they target a lower EL. 2416 * This check should catch any exceptions that would not be taken but left 2417 * pending. 2418 */ 2419 if (cur_el > target_el) { 2420 return false; 2421 } 2422 2423 switch (excp_idx) { 2424 case EXCP_FIQ: 2425 pstate_unmasked = !(env->daif & PSTATE_F); 2426 break; 2427 2428 case EXCP_IRQ: 2429 pstate_unmasked = !(env->daif & PSTATE_I); 2430 break; 2431 2432 case EXCP_VFIQ: 2433 if (secure || !arm_hcr_el2_fmo(env) || (env->cp15.hcr_el2 & HCR_TGE)) { 2434 /* VFIQs are only taken when hypervized and non-secure. */ 2435 return false; 2436 } 2437 return !(env->daif & PSTATE_F); 2438 case EXCP_VIRQ: 2439 if (secure || !arm_hcr_el2_imo(env) || (env->cp15.hcr_el2 & HCR_TGE)) { 2440 /* VIRQs are only taken when hypervized and non-secure. */ 2441 return false; 2442 } 2443 return !(env->daif & PSTATE_I); 2444 default: 2445 g_assert_not_reached(); 2446 } 2447 2448 /* Use the target EL, current execution state and SCR/HCR settings to 2449 * determine whether the corresponding CPSR bit is used to mask the 2450 * interrupt. 2451 */ 2452 if ((target_el > cur_el) && (target_el != 1)) { 2453 /* Exceptions targeting a higher EL may not be maskable */ 2454 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 2455 /* 64-bit masking rules are simple: exceptions to EL3 2456 * can't be masked, and exceptions to EL2 can only be 2457 * masked from Secure state. The HCR and SCR settings 2458 * don't affect the masking logic, only the interrupt routing. 2459 */ 2460 if (target_el == 3 || !secure) { 2461 unmasked = 1; 2462 } 2463 } else { 2464 /* The old 32-bit-only environment has a more complicated 2465 * masking setup. HCR and SCR bits not only affect interrupt 2466 * routing but also change the behaviour of masking. 2467 */ 2468 bool hcr, scr; 2469 2470 switch (excp_idx) { 2471 case EXCP_FIQ: 2472 /* If FIQs are routed to EL3 or EL2 then there are cases where 2473 * we override the CPSR.F in determining if the exception is 2474 * masked or not. If neither of these are set then we fall back 2475 * to the CPSR.F setting otherwise we further assess the state 2476 * below. 2477 */ 2478 hcr = arm_hcr_el2_fmo(env); 2479 scr = (env->cp15.scr_el3 & SCR_FIQ); 2480 2481 /* When EL3 is 32-bit, the SCR.FW bit controls whether the 2482 * CPSR.F bit masks FIQ interrupts when taken in non-secure 2483 * state. If SCR.FW is set then FIQs can be masked by CPSR.F 2484 * when non-secure but only when FIQs are only routed to EL3. 2485 */ 2486 scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); 2487 break; 2488 case EXCP_IRQ: 2489 /* When EL3 execution state is 32-bit, if HCR.IMO is set then 2490 * we may override the CPSR.I masking when in non-secure state. 2491 * The SCR.IRQ setting has already been taken into consideration 2492 * when setting the target EL, so it does not have a further 2493 * affect here. 2494 */ 2495 hcr = arm_hcr_el2_imo(env); 2496 scr = false; 2497 break; 2498 default: 2499 g_assert_not_reached(); 2500 } 2501 2502 if ((scr || hcr) && !secure) { 2503 unmasked = 1; 2504 } 2505 } 2506 } 2507 2508 /* The PSTATE bits only mask the interrupt if we have not overriden the 2509 * ability above. 2510 */ 2511 return unmasked || pstate_unmasked; 2512 } 2513 2514 #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU 2515 #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX) 2516 #define CPU_RESOLVING_TYPE TYPE_ARM_CPU 2517 2518 #define cpu_signal_handler cpu_arm_signal_handler 2519 #define cpu_list arm_cpu_list 2520 2521 /* ARM has the following "translation regimes" (as the ARM ARM calls them): 2522 * 2523 * If EL3 is 64-bit: 2524 * + NonSecure EL1 & 0 stage 1 2525 * + NonSecure EL1 & 0 stage 2 2526 * + NonSecure EL2 2527 * + Secure EL1 & EL0 2528 * + Secure EL3 2529 * If EL3 is 32-bit: 2530 * + NonSecure PL1 & 0 stage 1 2531 * + NonSecure PL1 & 0 stage 2 2532 * + NonSecure PL2 2533 * + Secure PL0 & PL1 2534 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) 2535 * 2536 * For QEMU, an mmu_idx is not quite the same as a translation regime because: 2537 * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they 2538 * may differ in access permissions even if the VA->PA map is the same 2539 * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 2540 * translation, which means that we have one mmu_idx that deals with two 2541 * concatenated translation regimes [this sort of combined s1+2 TLB is 2542 * architecturally permitted] 2543 * 3. we don't need to allocate an mmu_idx to translations that we won't be 2544 * handling via the TLB. The only way to do a stage 1 translation without 2545 * the immediate stage 2 translation is via the ATS or AT system insns, 2546 * which can be slow-pathed and always do a page table walk. 2547 * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" 2548 * translation regimes, because they map reasonably well to each other 2549 * and they can't both be active at the same time. 2550 * This gives us the following list of mmu_idx values: 2551 * 2552 * NS EL0 (aka NS PL0) stage 1+2 2553 * NS EL1 (aka NS PL1) stage 1+2 2554 * NS EL2 (aka NS PL2) 2555 * S EL3 (aka S PL1) 2556 * S EL0 (aka S PL0) 2557 * S EL1 (not used if EL3 is 32 bit) 2558 * NS EL0+1 stage 2 2559 * 2560 * (The last of these is an mmu_idx because we want to be able to use the TLB 2561 * for the accesses done as part of a stage 1 page table walk, rather than 2562 * having to walk the stage 2 page table over and over.) 2563 * 2564 * R profile CPUs have an MPU, but can use the same set of MMU indexes 2565 * as A profile. They only need to distinguish NS EL0 and NS EL1 (and 2566 * NS EL2 if we ever model a Cortex-R52). 2567 * 2568 * M profile CPUs are rather different as they do not have a true MMU. 2569 * They have the following different MMU indexes: 2570 * User 2571 * Privileged 2572 * User, execution priority negative (ie the MPU HFNMIENA bit may apply) 2573 * Privileged, execution priority negative (ditto) 2574 * If the CPU supports the v8M Security Extension then there are also: 2575 * Secure User 2576 * Secure Privileged 2577 * Secure User, execution priority negative 2578 * Secure Privileged, execution priority negative 2579 * 2580 * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code 2581 * are not quite the same -- different CPU types (most notably M profile 2582 * vs A/R profile) would like to use MMU indexes with different semantics, 2583 * but since we don't ever need to use all of those in a single CPU we 2584 * can avoid setting NB_MMU_MODES to more than 8. The lower bits of 2585 * ARMMMUIdx are the core TLB mmu index, and the higher bits are always 2586 * the same for any particular CPU. 2587 * Variables of type ARMMUIdx are always full values, and the core 2588 * index values are in variables of type 'int'. 2589 * 2590 * Our enumeration includes at the end some entries which are not "true" 2591 * mmu_idx values in that they don't have corresponding TLBs and are only 2592 * valid for doing slow path page table walks. 2593 * 2594 * The constant names here are patterned after the general style of the names 2595 * of the AT/ATS operations. 2596 * The values used are carefully arranged to make mmu_idx => EL lookup easy. 2597 * For M profile we arrange them to have a bit for priv, a bit for negpri 2598 * and a bit for secure. 2599 */ 2600 #define ARM_MMU_IDX_A 0x10 /* A profile */ 2601 #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ 2602 #define ARM_MMU_IDX_M 0x40 /* M profile */ 2603 2604 /* meanings of the bits for M profile mmu idx values */ 2605 #define ARM_MMU_IDX_M_PRIV 0x1 2606 #define ARM_MMU_IDX_M_NEGPRI 0x2 2607 #define ARM_MMU_IDX_M_S 0x4 2608 2609 #define ARM_MMU_IDX_TYPE_MASK (~0x7) 2610 #define ARM_MMU_IDX_COREIDX_MASK 0x7 2611 2612 typedef enum ARMMMUIdx { 2613 ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A, 2614 ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A, 2615 ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A, 2616 ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A, 2617 ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A, 2618 ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A, 2619 ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A, 2620 ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M, 2621 ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M, 2622 ARMMMUIdx_MUserNegPri = 2 | ARM_MMU_IDX_M, 2623 ARMMMUIdx_MPrivNegPri = 3 | ARM_MMU_IDX_M, 2624 ARMMMUIdx_MSUser = 4 | ARM_MMU_IDX_M, 2625 ARMMMUIdx_MSPriv = 5 | ARM_MMU_IDX_M, 2626 ARMMMUIdx_MSUserNegPri = 6 | ARM_MMU_IDX_M, 2627 ARMMMUIdx_MSPrivNegPri = 7 | ARM_MMU_IDX_M, 2628 /* Indexes below here don't have TLBs and are used only for AT system 2629 * instructions or for the first stage of an S12 page table walk. 2630 */ 2631 ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB, 2632 ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB, 2633 } ARMMMUIdx; 2634 2635 /* Bit macros for the core-mmu-index values for each index, 2636 * for use when calling tlb_flush_by_mmuidx() and friends. 2637 */ 2638 typedef enum ARMMMUIdxBit { 2639 ARMMMUIdxBit_S12NSE0 = 1 << 0, 2640 ARMMMUIdxBit_S12NSE1 = 1 << 1, 2641 ARMMMUIdxBit_S1E2 = 1 << 2, 2642 ARMMMUIdxBit_S1E3 = 1 << 3, 2643 ARMMMUIdxBit_S1SE0 = 1 << 4, 2644 ARMMMUIdxBit_S1SE1 = 1 << 5, 2645 ARMMMUIdxBit_S2NS = 1 << 6, 2646 ARMMMUIdxBit_MUser = 1 << 0, 2647 ARMMMUIdxBit_MPriv = 1 << 1, 2648 ARMMMUIdxBit_MUserNegPri = 1 << 2, 2649 ARMMMUIdxBit_MPrivNegPri = 1 << 3, 2650 ARMMMUIdxBit_MSUser = 1 << 4, 2651 ARMMMUIdxBit_MSPriv = 1 << 5, 2652 ARMMMUIdxBit_MSUserNegPri = 1 << 6, 2653 ARMMMUIdxBit_MSPrivNegPri = 1 << 7, 2654 } ARMMMUIdxBit; 2655 2656 #define MMU_USER_IDX 0 2657 2658 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 2659 { 2660 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 2661 } 2662 2663 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 2664 { 2665 if (arm_feature(env, ARM_FEATURE_M)) { 2666 return mmu_idx | ARM_MMU_IDX_M; 2667 } else { 2668 return mmu_idx | ARM_MMU_IDX_A; 2669 } 2670 } 2671 2672 /* Return the exception level we're running at if this is our mmu_idx */ 2673 static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 2674 { 2675 switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) { 2676 case ARM_MMU_IDX_A: 2677 return mmu_idx & 3; 2678 case ARM_MMU_IDX_M: 2679 return mmu_idx & ARM_MMU_IDX_M_PRIV; 2680 default: 2681 g_assert_not_reached(); 2682 } 2683 } 2684 2685 /* Return the MMU index for a v7M CPU in the specified security and 2686 * privilege state 2687 */ 2688 static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, 2689 bool secstate, 2690 bool priv) 2691 { 2692 ARMMMUIdx mmu_idx = ARM_MMU_IDX_M; 2693 2694 if (priv) { 2695 mmu_idx |= ARM_MMU_IDX_M_PRIV; 2696 } 2697 2698 if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) { 2699 mmu_idx |= ARM_MMU_IDX_M_NEGPRI; 2700 } 2701 2702 if (secstate) { 2703 mmu_idx |= ARM_MMU_IDX_M_S; 2704 } 2705 2706 return mmu_idx; 2707 } 2708 2709 /* Return the MMU index for a v7M CPU in the specified security state */ 2710 static inline ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, 2711 bool secstate) 2712 { 2713 bool priv = arm_current_el(env) != 0; 2714 2715 return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv); 2716 } 2717 2718 /* Determine the current mmu_idx to use for normal loads/stores */ 2719 static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) 2720 { 2721 int el = arm_current_el(env); 2722 2723 if (arm_feature(env, ARM_FEATURE_M)) { 2724 ARMMMUIdx mmu_idx = arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 2725 2726 return arm_to_core_mmu_idx(mmu_idx); 2727 } 2728 2729 if (el < 2 && arm_is_secure_below_el3(env)) { 2730 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0 + el); 2731 } 2732 return el; 2733 } 2734 2735 /* Indexes used when registering address spaces with cpu_address_space_init */ 2736 typedef enum ARMASIdx { 2737 ARMASIdx_NS = 0, 2738 ARMASIdx_S = 1, 2739 } ARMASIdx; 2740 2741 /* Return the Exception Level targeted by debug exceptions. */ 2742 static inline int arm_debug_target_el(CPUARMState *env) 2743 { 2744 bool secure = arm_is_secure(env); 2745 bool route_to_el2 = false; 2746 2747 if (arm_feature(env, ARM_FEATURE_EL2) && !secure) { 2748 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || 2749 env->cp15.mdcr_el2 & MDCR_TDE; 2750 } 2751 2752 if (route_to_el2) { 2753 return 2; 2754 } else if (arm_feature(env, ARM_FEATURE_EL3) && 2755 !arm_el_is_aa64(env, 3) && secure) { 2756 return 3; 2757 } else { 2758 return 1; 2759 } 2760 } 2761 2762 static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu) 2763 { 2764 /* If all the CLIDR.Ctypem bits are 0 there are no caches, and 2765 * CSSELR is RAZ/WI. 2766 */ 2767 return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0; 2768 } 2769 2770 /* See AArch64.GenerateDebugExceptionsFrom() in ARM ARM pseudocode */ 2771 static inline bool aa64_generate_debug_exceptions(CPUARMState *env) 2772 { 2773 int cur_el = arm_current_el(env); 2774 int debug_el; 2775 2776 if (cur_el == 3) { 2777 return false; 2778 } 2779 2780 /* MDCR_EL3.SDD disables debug events from Secure state */ 2781 if (arm_is_secure_below_el3(env) 2782 && extract32(env->cp15.mdcr_el3, 16, 1)) { 2783 return false; 2784 } 2785 2786 /* 2787 * Same EL to same EL debug exceptions need MDSCR_KDE enabled 2788 * while not masking the (D)ebug bit in DAIF. 2789 */ 2790 debug_el = arm_debug_target_el(env); 2791 2792 if (cur_el == debug_el) { 2793 return extract32(env->cp15.mdscr_el1, 13, 1) 2794 && !(env->daif & PSTATE_D); 2795 } 2796 2797 /* Otherwise the debug target needs to be a higher EL */ 2798 return debug_el > cur_el; 2799 } 2800 2801 static inline bool aa32_generate_debug_exceptions(CPUARMState *env) 2802 { 2803 int el = arm_current_el(env); 2804 2805 if (el == 0 && arm_el_is_aa64(env, 1)) { 2806 return aa64_generate_debug_exceptions(env); 2807 } 2808 2809 if (arm_is_secure(env)) { 2810 int spd; 2811 2812 if (el == 0 && (env->cp15.sder & 1)) { 2813 /* SDER.SUIDEN means debug exceptions from Secure EL0 2814 * are always enabled. Otherwise they are controlled by 2815 * SDCR.SPD like those from other Secure ELs. 2816 */ 2817 return true; 2818 } 2819 2820 spd = extract32(env->cp15.mdcr_el3, 14, 2); 2821 switch (spd) { 2822 case 1: 2823 /* SPD == 0b01 is reserved, but behaves as 0b00. */ 2824 case 0: 2825 /* For 0b00 we return true if external secure invasive debug 2826 * is enabled. On real hardware this is controlled by external 2827 * signals to the core. QEMU always permits debug, and behaves 2828 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. 2829 */ 2830 return true; 2831 case 2: 2832 return false; 2833 case 3: 2834 return true; 2835 } 2836 } 2837 2838 return el != 2; 2839 } 2840 2841 /* Return true if debugging exceptions are currently enabled. 2842 * This corresponds to what in ARM ARM pseudocode would be 2843 * if UsingAArch32() then 2844 * return AArch32.GenerateDebugExceptions() 2845 * else 2846 * return AArch64.GenerateDebugExceptions() 2847 * We choose to push the if() down into this function for clarity, 2848 * since the pseudocode has it at all callsites except for the one in 2849 * CheckSoftwareStep(), where it is elided because both branches would 2850 * always return the same value. 2851 */ 2852 static inline bool arm_generate_debug_exceptions(CPUARMState *env) 2853 { 2854 if (env->aarch64) { 2855 return aa64_generate_debug_exceptions(env); 2856 } else { 2857 return aa32_generate_debug_exceptions(env); 2858 } 2859 } 2860 2861 /* Is single-stepping active? (Note that the "is EL_D AArch64?" check 2862 * implicitly means this always returns false in pre-v8 CPUs.) 2863 */ 2864 static inline bool arm_singlestep_active(CPUARMState *env) 2865 { 2866 return extract32(env->cp15.mdscr_el1, 0, 1) 2867 && arm_el_is_aa64(env, arm_debug_target_el(env)) 2868 && arm_generate_debug_exceptions(env); 2869 } 2870 2871 static inline bool arm_sctlr_b(CPUARMState *env) 2872 { 2873 return 2874 /* We need not implement SCTLR.ITD in user-mode emulation, so 2875 * let linux-user ignore the fact that it conflicts with SCTLR_B. 2876 * This lets people run BE32 binaries with "-cpu any". 2877 */ 2878 #ifndef CONFIG_USER_ONLY 2879 !arm_feature(env, ARM_FEATURE_V7) && 2880 #endif 2881 (env->cp15.sctlr_el[1] & SCTLR_B) != 0; 2882 } 2883 2884 /* Return true if the processor is in big-endian mode. */ 2885 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) 2886 { 2887 int cur_el; 2888 2889 /* In 32bit endianness is determined by looking at CPSR's E bit */ 2890 if (!is_a64(env)) { 2891 return 2892 #ifdef CONFIG_USER_ONLY 2893 /* In system mode, BE32 is modelled in line with the 2894 * architecture (as word-invariant big-endianness), where loads 2895 * and stores are done little endian but from addresses which 2896 * are adjusted by XORing with the appropriate constant. So the 2897 * endianness to use for the raw data access is not affected by 2898 * SCTLR.B. 2899 * In user mode, however, we model BE32 as byte-invariant 2900 * big-endianness (because user-only code cannot tell the 2901 * difference), and so we need to use a data access endianness 2902 * that depends on SCTLR.B. 2903 */ 2904 arm_sctlr_b(env) || 2905 #endif 2906 ((env->uncached_cpsr & CPSR_E) ? 1 : 0); 2907 } 2908 2909 cur_el = arm_current_el(env); 2910 2911 if (cur_el == 0) { 2912 return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0; 2913 } 2914 2915 return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0; 2916 } 2917 2918 #include "exec/cpu-all.h" 2919 2920 /* Bit usage in the TB flags field: bit 31 indicates whether we are 2921 * in 32 or 64 bit mode. The meaning of the other bits depends on that. 2922 * We put flags which are shared between 32 and 64 bit mode at the top 2923 * of the word, and flags which apply to only one mode at the bottom. 2924 */ 2925 #define ARM_TBFLAG_AARCH64_STATE_SHIFT 31 2926 #define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT) 2927 #define ARM_TBFLAG_MMUIDX_SHIFT 28 2928 #define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT) 2929 #define ARM_TBFLAG_SS_ACTIVE_SHIFT 27 2930 #define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT) 2931 #define ARM_TBFLAG_PSTATE_SS_SHIFT 26 2932 #define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT) 2933 /* Target EL if we take a floating-point-disabled exception */ 2934 #define ARM_TBFLAG_FPEXC_EL_SHIFT 24 2935 #define ARM_TBFLAG_FPEXC_EL_MASK (0x3 << ARM_TBFLAG_FPEXC_EL_SHIFT) 2936 2937 /* Bit usage when in AArch32 state: */ 2938 #define ARM_TBFLAG_THUMB_SHIFT 0 2939 #define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT) 2940 #define ARM_TBFLAG_VECLEN_SHIFT 1 2941 #define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT) 2942 #define ARM_TBFLAG_VECSTRIDE_SHIFT 4 2943 #define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT) 2944 #define ARM_TBFLAG_VFPEN_SHIFT 7 2945 #define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT) 2946 #define ARM_TBFLAG_CONDEXEC_SHIFT 8 2947 #define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT) 2948 #define ARM_TBFLAG_SCTLR_B_SHIFT 16 2949 #define ARM_TBFLAG_SCTLR_B_MASK (1 << ARM_TBFLAG_SCTLR_B_SHIFT) 2950 /* We store the bottom two bits of the CPAR as TB flags and handle 2951 * checks on the other bits at runtime 2952 */ 2953 #define ARM_TBFLAG_XSCALE_CPAR_SHIFT 17 2954 #define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT) 2955 /* Indicates whether cp register reads and writes by guest code should access 2956 * the secure or nonsecure bank of banked registers; note that this is not 2957 * the same thing as the current security state of the processor! 2958 */ 2959 #define ARM_TBFLAG_NS_SHIFT 19 2960 #define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT) 2961 #define ARM_TBFLAG_BE_DATA_SHIFT 20 2962 #define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT) 2963 /* For M profile only, Handler (ie not Thread) mode */ 2964 #define ARM_TBFLAG_HANDLER_SHIFT 21 2965 #define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT) 2966 /* For M profile only, whether we should generate stack-limit checks */ 2967 #define ARM_TBFLAG_STACKCHECK_SHIFT 22 2968 #define ARM_TBFLAG_STACKCHECK_MASK (1 << ARM_TBFLAG_STACKCHECK_SHIFT) 2969 2970 /* Bit usage when in AArch64 state */ 2971 #define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */ 2972 #define ARM_TBFLAG_TBI0_MASK (0x1ull << ARM_TBFLAG_TBI0_SHIFT) 2973 #define ARM_TBFLAG_TBI1_SHIFT 1 /* TBI1 for EL0/1 */ 2974 #define ARM_TBFLAG_TBI1_MASK (0x1ull << ARM_TBFLAG_TBI1_SHIFT) 2975 #define ARM_TBFLAG_SVEEXC_EL_SHIFT 2 2976 #define ARM_TBFLAG_SVEEXC_EL_MASK (0x3 << ARM_TBFLAG_SVEEXC_EL_SHIFT) 2977 #define ARM_TBFLAG_ZCR_LEN_SHIFT 4 2978 #define ARM_TBFLAG_ZCR_LEN_MASK (0xf << ARM_TBFLAG_ZCR_LEN_SHIFT) 2979 2980 /* some convenience accessor macros */ 2981 #define ARM_TBFLAG_AARCH64_STATE(F) \ 2982 (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT) 2983 #define ARM_TBFLAG_MMUIDX(F) \ 2984 (((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT) 2985 #define ARM_TBFLAG_SS_ACTIVE(F) \ 2986 (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT) 2987 #define ARM_TBFLAG_PSTATE_SS(F) \ 2988 (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT) 2989 #define ARM_TBFLAG_FPEXC_EL(F) \ 2990 (((F) & ARM_TBFLAG_FPEXC_EL_MASK) >> ARM_TBFLAG_FPEXC_EL_SHIFT) 2991 #define ARM_TBFLAG_THUMB(F) \ 2992 (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT) 2993 #define ARM_TBFLAG_VECLEN(F) \ 2994 (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT) 2995 #define ARM_TBFLAG_VECSTRIDE(F) \ 2996 (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT) 2997 #define ARM_TBFLAG_VFPEN(F) \ 2998 (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT) 2999 #define ARM_TBFLAG_CONDEXEC(F) \ 3000 (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT) 3001 #define ARM_TBFLAG_SCTLR_B(F) \ 3002 (((F) & ARM_TBFLAG_SCTLR_B_MASK) >> ARM_TBFLAG_SCTLR_B_SHIFT) 3003 #define ARM_TBFLAG_XSCALE_CPAR(F) \ 3004 (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT) 3005 #define ARM_TBFLAG_NS(F) \ 3006 (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT) 3007 #define ARM_TBFLAG_BE_DATA(F) \ 3008 (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT) 3009 #define ARM_TBFLAG_HANDLER(F) \ 3010 (((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT) 3011 #define ARM_TBFLAG_STACKCHECK(F) \ 3012 (((F) & ARM_TBFLAG_STACKCHECK_MASK) >> ARM_TBFLAG_STACKCHECK_SHIFT) 3013 #define ARM_TBFLAG_TBI0(F) \ 3014 (((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT) 3015 #define ARM_TBFLAG_TBI1(F) \ 3016 (((F) & ARM_TBFLAG_TBI1_MASK) >> ARM_TBFLAG_TBI1_SHIFT) 3017 #define ARM_TBFLAG_SVEEXC_EL(F) \ 3018 (((F) & ARM_TBFLAG_SVEEXC_EL_MASK) >> ARM_TBFLAG_SVEEXC_EL_SHIFT) 3019 #define ARM_TBFLAG_ZCR_LEN(F) \ 3020 (((F) & ARM_TBFLAG_ZCR_LEN_MASK) >> ARM_TBFLAG_ZCR_LEN_SHIFT) 3021 3022 static inline bool bswap_code(bool sctlr_b) 3023 { 3024 #ifdef CONFIG_USER_ONLY 3025 /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian. 3026 * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0 3027 * would also end up as a mixed-endian mode with BE code, LE data. 3028 */ 3029 return 3030 #ifdef TARGET_WORDS_BIGENDIAN 3031 1 ^ 3032 #endif 3033 sctlr_b; 3034 #else 3035 /* All code access in ARM is little endian, and there are no loaders 3036 * doing swaps that need to be reversed 3037 */ 3038 return 0; 3039 #endif 3040 } 3041 3042 #ifdef CONFIG_USER_ONLY 3043 static inline bool arm_cpu_bswap_data(CPUARMState *env) 3044 { 3045 return 3046 #ifdef TARGET_WORDS_BIGENDIAN 3047 1 ^ 3048 #endif 3049 arm_cpu_data_is_big_endian(env); 3050 } 3051 #endif 3052 3053 #ifndef CONFIG_USER_ONLY 3054 /** 3055 * arm_regime_tbi0: 3056 * @env: CPUARMState 3057 * @mmu_idx: MMU index indicating required translation regime 3058 * 3059 * Extracts the TBI0 value from the appropriate TCR for the current EL 3060 * 3061 * Returns: the TBI0 value. 3062 */ 3063 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx); 3064 3065 /** 3066 * arm_regime_tbi1: 3067 * @env: CPUARMState 3068 * @mmu_idx: MMU index indicating required translation regime 3069 * 3070 * Extracts the TBI1 value from the appropriate TCR for the current EL 3071 * 3072 * Returns: the TBI1 value. 3073 */ 3074 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx); 3075 #else 3076 /* We can't handle tagged addresses properly in user-only mode */ 3077 static inline uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx) 3078 { 3079 return 0; 3080 } 3081 3082 static inline uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx) 3083 { 3084 return 0; 3085 } 3086 #endif 3087 3088 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 3089 target_ulong *cs_base, uint32_t *flags); 3090 3091 enum { 3092 QEMU_PSCI_CONDUIT_DISABLED = 0, 3093 QEMU_PSCI_CONDUIT_SMC = 1, 3094 QEMU_PSCI_CONDUIT_HVC = 2, 3095 }; 3096 3097 #ifndef CONFIG_USER_ONLY 3098 /* Return the address space index to use for a memory access */ 3099 static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) 3100 { 3101 return attrs.secure ? ARMASIdx_S : ARMASIdx_NS; 3102 } 3103 3104 /* Return the AddressSpace to use for a memory access 3105 * (which depends on whether the access is S or NS, and whether 3106 * the board gave us a separate AddressSpace for S accesses). 3107 */ 3108 static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs) 3109 { 3110 return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs)); 3111 } 3112 #endif 3113 3114 /** 3115 * arm_register_pre_el_change_hook: 3116 * Register a hook function which will be called immediately before this 3117 * CPU changes exception level or mode. The hook function will be 3118 * passed a pointer to the ARMCPU and the opaque data pointer passed 3119 * to this function when the hook was registered. 3120 * 3121 * Note that if a pre-change hook is called, any registered post-change hooks 3122 * are guaranteed to subsequently be called. 3123 */ 3124 void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, 3125 void *opaque); 3126 /** 3127 * arm_register_el_change_hook: 3128 * Register a hook function which will be called immediately after this 3129 * CPU changes exception level or mode. The hook function will be 3130 * passed a pointer to the ARMCPU and the opaque data pointer passed 3131 * to this function when the hook was registered. 3132 * 3133 * Note that any registered hooks registered here are guaranteed to be called 3134 * if pre-change hooks have been. 3135 */ 3136 void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void 3137 *opaque); 3138 3139 /** 3140 * aa32_vfp_dreg: 3141 * Return a pointer to the Dn register within env in 32-bit mode. 3142 */ 3143 static inline uint64_t *aa32_vfp_dreg(CPUARMState *env, unsigned regno) 3144 { 3145 return &env->vfp.zregs[regno >> 1].d[regno & 1]; 3146 } 3147 3148 /** 3149 * aa32_vfp_qreg: 3150 * Return a pointer to the Qn register within env in 32-bit mode. 3151 */ 3152 static inline uint64_t *aa32_vfp_qreg(CPUARMState *env, unsigned regno) 3153 { 3154 return &env->vfp.zregs[regno].d[0]; 3155 } 3156 3157 /** 3158 * aa64_vfp_qreg: 3159 * Return a pointer to the Qn register within env in 64-bit mode. 3160 */ 3161 static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) 3162 { 3163 return &env->vfp.zregs[regno].d[0]; 3164 } 3165 3166 /* Shared between translate-sve.c and sve_helper.c. */ 3167 extern const uint64_t pred_esz_masks[4]; 3168 3169 /* 3170 * 32-bit feature tests via id registers. 3171 */ 3172 static inline bool isar_feature_thumb_div(const ARMISARegisters *id) 3173 { 3174 return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; 3175 } 3176 3177 static inline bool isar_feature_arm_div(const ARMISARegisters *id) 3178 { 3179 return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; 3180 } 3181 3182 static inline bool isar_feature_jazelle(const ARMISARegisters *id) 3183 { 3184 return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; 3185 } 3186 3187 static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) 3188 { 3189 return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; 3190 } 3191 3192 static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) 3193 { 3194 return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; 3195 } 3196 3197 static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) 3198 { 3199 return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; 3200 } 3201 3202 static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) 3203 { 3204 return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; 3205 } 3206 3207 static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) 3208 { 3209 return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; 3210 } 3211 3212 static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) 3213 { 3214 return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; 3215 } 3216 3217 static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) 3218 { 3219 return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; 3220 } 3221 3222 static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) 3223 { 3224 return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; 3225 } 3226 3227 static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) 3228 { 3229 /* 3230 * This is a placeholder for use by VCMA until the rest of 3231 * the ARMv8.2-FP16 extension is implemented for aa32 mode. 3232 * At which point we can properly set and check MVFR1.FPHP. 3233 */ 3234 return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; 3235 } 3236 3237 /* 3238 * 64-bit feature tests via id registers. 3239 */ 3240 static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) 3241 { 3242 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; 3243 } 3244 3245 static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) 3246 { 3247 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; 3248 } 3249 3250 static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) 3251 { 3252 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; 3253 } 3254 3255 static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) 3256 { 3257 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; 3258 } 3259 3260 static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) 3261 { 3262 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; 3263 } 3264 3265 static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) 3266 { 3267 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; 3268 } 3269 3270 static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) 3271 { 3272 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; 3273 } 3274 3275 static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) 3276 { 3277 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; 3278 } 3279 3280 static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) 3281 { 3282 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; 3283 } 3284 3285 static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) 3286 { 3287 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; 3288 } 3289 3290 static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) 3291 { 3292 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; 3293 } 3294 3295 static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) 3296 { 3297 return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; 3298 } 3299 3300 static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) 3301 { 3302 return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; 3303 } 3304 3305 static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) 3306 { 3307 /* We always set the AdvSIMD and FP fields identically wrt FP16. */ 3308 return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; 3309 } 3310 3311 static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id) 3312 { 3313 return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2; 3314 } 3315 3316 static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) 3317 { 3318 return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; 3319 } 3320 3321 /* 3322 * Forward to the above feature tests given an ARMCPU pointer. 3323 */ 3324 #define cpu_isar_feature(name, cpu) \ 3325 ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); }) 3326 3327 #endif 3328