1 /* 2 * ARM virtual CPU header 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #ifndef ARM_CPU_H 21 #define ARM_CPU_H 22 23 #include "kvm-consts.h" 24 #include "hw/registerfields.h" 25 26 #if defined(TARGET_AARCH64) 27 /* AArch64 definitions */ 28 # define TARGET_LONG_BITS 64 29 #else 30 # define TARGET_LONG_BITS 32 31 #endif 32 33 /* ARM processors have a weak memory model */ 34 #define TCG_GUEST_DEFAULT_MO (0) 35 36 #define CPUArchState struct CPUARMState 37 38 #include "qemu-common.h" 39 #include "cpu-qom.h" 40 #include "exec/cpu-defs.h" 41 42 #include "fpu/softfloat.h" 43 44 #define EXCP_UDEF 1 /* undefined instruction */ 45 #define EXCP_SWI 2 /* software interrupt */ 46 #define EXCP_PREFETCH_ABORT 3 47 #define EXCP_DATA_ABORT 4 48 #define EXCP_IRQ 5 49 #define EXCP_FIQ 6 50 #define EXCP_BKPT 7 51 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */ 52 #define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */ 53 #define EXCP_HVC 11 /* HyperVisor Call */ 54 #define EXCP_HYP_TRAP 12 55 #define EXCP_SMC 13 /* Secure Monitor Call */ 56 #define EXCP_VIRQ 14 57 #define EXCP_VFIQ 15 58 #define EXCP_SEMIHOST 16 /* semihosting call */ 59 #define EXCP_NOCP 17 /* v7M NOCP UsageFault */ 60 #define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */ 61 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */ 62 63 #define ARMV7M_EXCP_RESET 1 64 #define ARMV7M_EXCP_NMI 2 65 #define ARMV7M_EXCP_HARD 3 66 #define ARMV7M_EXCP_MEM 4 67 #define ARMV7M_EXCP_BUS 5 68 #define ARMV7M_EXCP_USAGE 6 69 #define ARMV7M_EXCP_SVC 11 70 #define ARMV7M_EXCP_DEBUG 12 71 #define ARMV7M_EXCP_PENDSV 14 72 #define ARMV7M_EXCP_SYSTICK 15 73 74 /* ARM-specific interrupt pending bits. */ 75 #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1 76 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2 77 #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3 78 79 /* The usual mapping for an AArch64 system register to its AArch32 80 * counterpart is for the 32 bit world to have access to the lower 81 * half only (with writes leaving the upper half untouched). It's 82 * therefore useful to be able to pass TCG the offset of the least 83 * significant half of a uint64_t struct member. 84 */ 85 #ifdef HOST_WORDS_BIGENDIAN 86 #define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t)) 87 #define offsetofhigh32(S, M) offsetof(S, M) 88 #else 89 #define offsetoflow32(S, M) offsetof(S, M) 90 #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t)) 91 #endif 92 93 /* Meanings of the ARMCPU object's four inbound GPIO lines */ 94 #define ARM_CPU_IRQ 0 95 #define ARM_CPU_FIQ 1 96 #define ARM_CPU_VIRQ 2 97 #define ARM_CPU_VFIQ 3 98 99 #define NB_MMU_MODES 7 100 /* ARM-specific extra insn start words: 101 * 1: Conditional execution bits 102 * 2: Partial exception syndrome for data aborts 103 */ 104 #define TARGET_INSN_START_EXTRA_WORDS 2 105 106 /* The 2nd extra word holding syndrome info for data aborts does not use 107 * the upper 6 bits nor the lower 14 bits. We mask and shift it down to 108 * help the sleb128 encoder do a better job. 109 * When restoring the CPU state, we shift it back up. 110 */ 111 #define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1) 112 #define ARM_INSN_START_WORD2_SHIFT 14 113 114 /* We currently assume float and double are IEEE single and double 115 precision respectively. 116 Doing runtime conversions is tricky because VFP registers may contain 117 integer values (eg. as the result of a FTOSI instruction). 118 s<2n> maps to the least significant half of d<n> 119 s<2n+1> maps to the most significant half of d<n> 120 */ 121 122 /* CPU state for each instance of a generic timer (in cp15 c14) */ 123 typedef struct ARMGenericTimer { 124 uint64_t cval; /* Timer CompareValue register */ 125 uint64_t ctl; /* Timer Control register */ 126 } ARMGenericTimer; 127 128 #define GTIMER_PHYS 0 129 #define GTIMER_VIRT 1 130 #define GTIMER_HYP 2 131 #define GTIMER_SEC 3 132 #define NUM_GTIMERS 4 133 134 typedef struct { 135 uint64_t raw_tcr; 136 uint32_t mask; 137 uint32_t base_mask; 138 } TCR; 139 140 typedef struct CPUARMState { 141 /* Regs for current mode. */ 142 uint32_t regs[16]; 143 144 /* 32/64 switch only happens when taking and returning from 145 * exceptions so the overlap semantics are taken care of then 146 * instead of having a complicated union. 147 */ 148 /* Regs for A64 mode. */ 149 uint64_t xregs[32]; 150 uint64_t pc; 151 /* PSTATE isn't an architectural register for ARMv8. However, it is 152 * convenient for us to assemble the underlying state into a 32 bit format 153 * identical to the architectural format used for the SPSR. (This is also 154 * what the Linux kernel's 'pstate' field in signal handlers and KVM's 155 * 'pstate' register are.) Of the PSTATE bits: 156 * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same 157 * semantics as for AArch32, as described in the comments on each field) 158 * nRW (also known as M[4]) is kept, inverted, in env->aarch64 159 * DAIF (exception masks) are kept in env->daif 160 * all other bits are stored in their correct places in env->pstate 161 */ 162 uint32_t pstate; 163 uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */ 164 165 /* Frequently accessed CPSR bits are stored separately for efficiency. 166 This contains all the other bits. Use cpsr_{read,write} to access 167 the whole CPSR. */ 168 uint32_t uncached_cpsr; 169 uint32_t spsr; 170 171 /* Banked registers. */ 172 uint64_t banked_spsr[8]; 173 uint32_t banked_r13[8]; 174 uint32_t banked_r14[8]; 175 176 /* These hold r8-r12. */ 177 uint32_t usr_regs[5]; 178 uint32_t fiq_regs[5]; 179 180 /* cpsr flag cache for faster execution */ 181 uint32_t CF; /* 0 or 1 */ 182 uint32_t VF; /* V is the bit 31. All other bits are undefined */ 183 uint32_t NF; /* N is bit 31. All other bits are undefined. */ 184 uint32_t ZF; /* Z set if zero. */ 185 uint32_t QF; /* 0 or 1 */ 186 uint32_t GE; /* cpsr[19:16] */ 187 uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */ 188 uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */ 189 uint64_t daif; /* exception masks, in the bits they are in PSTATE */ 190 191 uint64_t elr_el[4]; /* AArch64 exception link regs */ 192 uint64_t sp_el[4]; /* AArch64 banked stack pointers */ 193 194 /* System control coprocessor (cp15) */ 195 struct { 196 uint32_t c0_cpuid; 197 union { /* Cache size selection */ 198 struct { 199 uint64_t _unused_csselr0; 200 uint64_t csselr_ns; 201 uint64_t _unused_csselr1; 202 uint64_t csselr_s; 203 }; 204 uint64_t csselr_el[4]; 205 }; 206 union { /* System control register. */ 207 struct { 208 uint64_t _unused_sctlr; 209 uint64_t sctlr_ns; 210 uint64_t hsctlr; 211 uint64_t sctlr_s; 212 }; 213 uint64_t sctlr_el[4]; 214 }; 215 uint64_t cpacr_el1; /* Architectural feature access control register */ 216 uint64_t cptr_el[4]; /* ARMv8 feature trap registers */ 217 uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ 218 uint64_t sder; /* Secure debug enable register. */ 219 uint32_t nsacr; /* Non-secure access control register. */ 220 union { /* MMU translation table base 0. */ 221 struct { 222 uint64_t _unused_ttbr0_0; 223 uint64_t ttbr0_ns; 224 uint64_t _unused_ttbr0_1; 225 uint64_t ttbr0_s; 226 }; 227 uint64_t ttbr0_el[4]; 228 }; 229 union { /* MMU translation table base 1. */ 230 struct { 231 uint64_t _unused_ttbr1_0; 232 uint64_t ttbr1_ns; 233 uint64_t _unused_ttbr1_1; 234 uint64_t ttbr1_s; 235 }; 236 uint64_t ttbr1_el[4]; 237 }; 238 uint64_t vttbr_el2; /* Virtualization Translation Table Base. */ 239 /* MMU translation table base control. */ 240 TCR tcr_el[4]; 241 TCR vtcr_el2; /* Virtualization Translation Control. */ 242 uint32_t c2_data; /* MPU data cacheable bits. */ 243 uint32_t c2_insn; /* MPU instruction cacheable bits. */ 244 union { /* MMU domain access control register 245 * MPU write buffer control. 246 */ 247 struct { 248 uint64_t dacr_ns; 249 uint64_t dacr_s; 250 }; 251 struct { 252 uint64_t dacr32_el2; 253 }; 254 }; 255 uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */ 256 uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */ 257 uint64_t hcr_el2; /* Hypervisor configuration register */ 258 uint64_t scr_el3; /* Secure configuration register. */ 259 union { /* Fault status registers. */ 260 struct { 261 uint64_t ifsr_ns; 262 uint64_t ifsr_s; 263 }; 264 struct { 265 uint64_t ifsr32_el2; 266 }; 267 }; 268 union { 269 struct { 270 uint64_t _unused_dfsr; 271 uint64_t dfsr_ns; 272 uint64_t hsr; 273 uint64_t dfsr_s; 274 }; 275 uint64_t esr_el[4]; 276 }; 277 uint32_t c6_region[8]; /* MPU base/size registers. */ 278 union { /* Fault address registers. */ 279 struct { 280 uint64_t _unused_far0; 281 #ifdef HOST_WORDS_BIGENDIAN 282 uint32_t ifar_ns; 283 uint32_t dfar_ns; 284 uint32_t ifar_s; 285 uint32_t dfar_s; 286 #else 287 uint32_t dfar_ns; 288 uint32_t ifar_ns; 289 uint32_t dfar_s; 290 uint32_t ifar_s; 291 #endif 292 uint64_t _unused_far3; 293 }; 294 uint64_t far_el[4]; 295 }; 296 uint64_t hpfar_el2; 297 uint64_t hstr_el2; 298 union { /* Translation result. */ 299 struct { 300 uint64_t _unused_par_0; 301 uint64_t par_ns; 302 uint64_t _unused_par_1; 303 uint64_t par_s; 304 }; 305 uint64_t par_el[4]; 306 }; 307 308 uint32_t c6_rgnr; 309 310 uint32_t c9_insn; /* Cache lockdown registers. */ 311 uint32_t c9_data; 312 uint64_t c9_pmcr; /* performance monitor control register */ 313 uint64_t c9_pmcnten; /* perf monitor counter enables */ 314 uint32_t c9_pmovsr; /* perf monitor overflow status */ 315 uint32_t c9_pmuserenr; /* perf monitor user enable */ 316 uint64_t c9_pmselr; /* perf monitor counter selection register */ 317 uint64_t c9_pminten; /* perf monitor interrupt enables */ 318 union { /* Memory attribute redirection */ 319 struct { 320 #ifdef HOST_WORDS_BIGENDIAN 321 uint64_t _unused_mair_0; 322 uint32_t mair1_ns; 323 uint32_t mair0_ns; 324 uint64_t _unused_mair_1; 325 uint32_t mair1_s; 326 uint32_t mair0_s; 327 #else 328 uint64_t _unused_mair_0; 329 uint32_t mair0_ns; 330 uint32_t mair1_ns; 331 uint64_t _unused_mair_1; 332 uint32_t mair0_s; 333 uint32_t mair1_s; 334 #endif 335 }; 336 uint64_t mair_el[4]; 337 }; 338 union { /* vector base address register */ 339 struct { 340 uint64_t _unused_vbar; 341 uint64_t vbar_ns; 342 uint64_t hvbar; 343 uint64_t vbar_s; 344 }; 345 uint64_t vbar_el[4]; 346 }; 347 uint32_t mvbar; /* (monitor) vector base address register */ 348 struct { /* FCSE PID. */ 349 uint32_t fcseidr_ns; 350 uint32_t fcseidr_s; 351 }; 352 union { /* Context ID. */ 353 struct { 354 uint64_t _unused_contextidr_0; 355 uint64_t contextidr_ns; 356 uint64_t _unused_contextidr_1; 357 uint64_t contextidr_s; 358 }; 359 uint64_t contextidr_el[4]; 360 }; 361 union { /* User RW Thread register. */ 362 struct { 363 uint64_t tpidrurw_ns; 364 uint64_t tpidrprw_ns; 365 uint64_t htpidr; 366 uint64_t _tpidr_el3; 367 }; 368 uint64_t tpidr_el[4]; 369 }; 370 /* The secure banks of these registers don't map anywhere */ 371 uint64_t tpidrurw_s; 372 uint64_t tpidrprw_s; 373 uint64_t tpidruro_s; 374 375 union { /* User RO Thread register. */ 376 uint64_t tpidruro_ns; 377 uint64_t tpidrro_el[1]; 378 }; 379 uint64_t c14_cntfrq; /* Counter Frequency register */ 380 uint64_t c14_cntkctl; /* Timer Control register */ 381 uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */ 382 uint64_t cntvoff_el2; /* Counter Virtual Offset register */ 383 ARMGenericTimer c14_timer[NUM_GTIMERS]; 384 uint32_t c15_cpar; /* XScale Coprocessor Access Register */ 385 uint32_t c15_ticonfig; /* TI925T configuration byte. */ 386 uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ 387 uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ 388 uint32_t c15_threadid; /* TI debugger thread-ID. */ 389 uint32_t c15_config_base_address; /* SCU base address. */ 390 uint32_t c15_diagnostic; /* diagnostic register */ 391 uint32_t c15_power_diagnostic; 392 uint32_t c15_power_control; /* power control */ 393 uint64_t dbgbvr[16]; /* breakpoint value registers */ 394 uint64_t dbgbcr[16]; /* breakpoint control registers */ 395 uint64_t dbgwvr[16]; /* watchpoint value registers */ 396 uint64_t dbgwcr[16]; /* watchpoint control registers */ 397 uint64_t mdscr_el1; 398 uint64_t oslsr_el1; /* OS Lock Status */ 399 uint64_t mdcr_el2; 400 uint64_t mdcr_el3; 401 /* If the counter is enabled, this stores the last time the counter 402 * was reset. Otherwise it stores the counter value 403 */ 404 uint64_t c15_ccnt; 405 uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */ 406 uint64_t vpidr_el2; /* Virtualization Processor ID Register */ 407 uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */ 408 } cp15; 409 410 struct { 411 uint32_t other_sp; 412 uint32_t vecbase; 413 uint32_t basepri; 414 uint32_t control; 415 uint32_t ccr; /* Configuration and Control */ 416 uint32_t cfsr; /* Configurable Fault Status */ 417 uint32_t hfsr; /* HardFault Status */ 418 uint32_t dfsr; /* Debug Fault Status Register */ 419 uint32_t mmfar; /* MemManage Fault Address */ 420 uint32_t bfar; /* BusFault Address */ 421 unsigned mpu_ctrl; /* MPU_CTRL (some bits kept in sctlr_el[1]) */ 422 int exception; 423 } v7m; 424 425 /* Information associated with an exception about to be taken: 426 * code which raises an exception must set cs->exception_index and 427 * the relevant parts of this structure; the cpu_do_interrupt function 428 * will then set the guest-visible registers as part of the exception 429 * entry process. 430 */ 431 struct { 432 uint32_t syndrome; /* AArch64 format syndrome register */ 433 uint32_t fsr; /* AArch32 format fault status register info */ 434 uint64_t vaddress; /* virtual addr associated with exception, if any */ 435 uint32_t target_el; /* EL the exception should be targeted for */ 436 /* If we implement EL2 we will also need to store information 437 * about the intermediate physical address for stage 2 faults. 438 */ 439 } exception; 440 441 /* Thumb-2 EE state. */ 442 uint32_t teecr; 443 uint32_t teehbr; 444 445 /* VFP coprocessor state. */ 446 struct { 447 /* VFP/Neon register state. Note that the mapping between S, D and Q 448 * views of the register bank differs between AArch64 and AArch32: 449 * In AArch32: 450 * Qn = regs[2n+1]:regs[2n] 451 * Dn = regs[n] 452 * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n 453 * (and regs[32] to regs[63] are inaccessible) 454 * In AArch64: 455 * Qn = regs[2n+1]:regs[2n] 456 * Dn = regs[2n] 457 * Sn = regs[2n] bits 31..0 458 * This corresponds to the architecturally defined mapping between 459 * the two execution states, and means we do not need to explicitly 460 * map these registers when changing states. 461 */ 462 float64 regs[64]; 463 464 uint32_t xregs[16]; 465 /* We store these fpcsr fields separately for convenience. */ 466 int vec_len; 467 int vec_stride; 468 469 /* scratch space when Tn are not sufficient. */ 470 uint32_t scratch[8]; 471 472 /* fp_status is the "normal" fp status. standard_fp_status retains 473 * values corresponding to the ARM "Standard FPSCR Value", ie 474 * default-NaN, flush-to-zero, round-to-nearest and is used by 475 * any operations (generally Neon) which the architecture defines 476 * as controlled by the standard FPSCR value rather than the FPSCR. 477 * 478 * To avoid having to transfer exception bits around, we simply 479 * say that the FPSCR cumulative exception flags are the logical 480 * OR of the flags in the two fp statuses. This relies on the 481 * only thing which needs to read the exception flags being 482 * an explicit FPSCR read. 483 */ 484 float_status fp_status; 485 float_status standard_fp_status; 486 } vfp; 487 uint64_t exclusive_addr; 488 uint64_t exclusive_val; 489 uint64_t exclusive_high; 490 491 /* iwMMXt coprocessor state. */ 492 struct { 493 uint64_t regs[16]; 494 uint64_t val; 495 496 uint32_t cregs[16]; 497 } iwmmxt; 498 499 #if defined(CONFIG_USER_ONLY) 500 /* For usermode syscall translation. */ 501 int eabi; 502 #endif 503 504 struct CPUBreakpoint *cpu_breakpoint[16]; 505 struct CPUWatchpoint *cpu_watchpoint[16]; 506 507 /* Fields up to this point are cleared by a CPU reset */ 508 struct {} end_reset_fields; 509 510 CPU_COMMON 511 512 /* Fields after CPU_COMMON are preserved across CPU reset. */ 513 514 /* Internal CPU feature flags. */ 515 uint64_t features; 516 517 /* PMSAv7 MPU */ 518 struct { 519 uint32_t *drbar; 520 uint32_t *drsr; 521 uint32_t *dracr; 522 } pmsav7; 523 524 void *nvic; 525 const struct arm_boot_info *boot_info; 526 /* Store GICv3CPUState to access from this struct */ 527 void *gicv3state; 528 } CPUARMState; 529 530 /** 531 * ARMELChangeHook: 532 * type of a function which can be registered via arm_register_el_change_hook() 533 * to get callbacks when the CPU changes its exception level or mode. 534 */ 535 typedef void ARMELChangeHook(ARMCPU *cpu, void *opaque); 536 537 538 /* These values map onto the return values for 539 * QEMU_PSCI_0_2_FN_AFFINITY_INFO */ 540 typedef enum ARMPSCIState { 541 PSCI_ON = 0, 542 PSCI_OFF = 1, 543 PSCI_ON_PENDING = 2 544 } ARMPSCIState; 545 546 /** 547 * ARMCPU: 548 * @env: #CPUARMState 549 * 550 * An ARM CPU core. 551 */ 552 struct ARMCPU { 553 /*< private >*/ 554 CPUState parent_obj; 555 /*< public >*/ 556 557 CPUARMState env; 558 559 /* Coprocessor information */ 560 GHashTable *cp_regs; 561 /* For marshalling (mostly coprocessor) register state between the 562 * kernel and QEMU (for KVM) and between two QEMUs (for migration), 563 * we use these arrays. 564 */ 565 /* List of register indexes managed via these arrays; (full KVM style 566 * 64 bit indexes, not CPRegInfo 32 bit indexes) 567 */ 568 uint64_t *cpreg_indexes; 569 /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */ 570 uint64_t *cpreg_values; 571 /* Length of the indexes, values, reset_values arrays */ 572 int32_t cpreg_array_len; 573 /* These are used only for migration: incoming data arrives in 574 * these fields and is sanity checked in post_load before copying 575 * to the working data structures above. 576 */ 577 uint64_t *cpreg_vmstate_indexes; 578 uint64_t *cpreg_vmstate_values; 579 int32_t cpreg_vmstate_array_len; 580 581 /* Timers used by the generic (architected) timer */ 582 QEMUTimer *gt_timer[NUM_GTIMERS]; 583 /* GPIO outputs for generic timer */ 584 qemu_irq gt_timer_outputs[NUM_GTIMERS]; 585 /* GPIO output for GICv3 maintenance interrupt signal */ 586 qemu_irq gicv3_maintenance_interrupt; 587 588 /* MemoryRegion to use for secure physical accesses */ 589 MemoryRegion *secure_memory; 590 591 /* 'compatible' string for this CPU for Linux device trees */ 592 const char *dtb_compatible; 593 594 /* PSCI version for this CPU 595 * Bits[31:16] = Major Version 596 * Bits[15:0] = Minor Version 597 */ 598 uint32_t psci_version; 599 600 /* Should CPU start in PSCI powered-off state? */ 601 bool start_powered_off; 602 603 /* Current power state, access guarded by BQL */ 604 ARMPSCIState power_state; 605 606 /* CPU has virtualization extension */ 607 bool has_el2; 608 /* CPU has security extension */ 609 bool has_el3; 610 /* CPU has PMU (Performance Monitor Unit) */ 611 bool has_pmu; 612 613 /* CPU has memory protection unit */ 614 bool has_mpu; 615 /* PMSAv7 MPU number of supported regions */ 616 uint32_t pmsav7_dregion; 617 618 /* PSCI conduit used to invoke PSCI methods 619 * 0 - disabled, 1 - smc, 2 - hvc 620 */ 621 uint32_t psci_conduit; 622 623 /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or 624 * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type. 625 */ 626 uint32_t kvm_target; 627 628 /* KVM init features for this CPU */ 629 uint32_t kvm_init_features[7]; 630 631 /* Uniprocessor system with MP extensions */ 632 bool mp_is_up; 633 634 /* The instance init functions for implementation-specific subclasses 635 * set these fields to specify the implementation-dependent values of 636 * various constant registers and reset values of non-constant 637 * registers. 638 * Some of these might become QOM properties eventually. 639 * Field names match the official register names as defined in the 640 * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix 641 * is used for reset values of non-constant registers; no reset_ 642 * prefix means a constant register. 643 */ 644 uint32_t midr; 645 uint32_t revidr; 646 uint32_t reset_fpsid; 647 uint32_t mvfr0; 648 uint32_t mvfr1; 649 uint32_t mvfr2; 650 uint32_t ctr; 651 uint32_t reset_sctlr; 652 uint32_t id_pfr0; 653 uint32_t id_pfr1; 654 uint32_t id_dfr0; 655 uint32_t pmceid0; 656 uint32_t pmceid1; 657 uint32_t id_afr0; 658 uint32_t id_mmfr0; 659 uint32_t id_mmfr1; 660 uint32_t id_mmfr2; 661 uint32_t id_mmfr3; 662 uint32_t id_mmfr4; 663 uint32_t id_isar0; 664 uint32_t id_isar1; 665 uint32_t id_isar2; 666 uint32_t id_isar3; 667 uint32_t id_isar4; 668 uint32_t id_isar5; 669 uint64_t id_aa64pfr0; 670 uint64_t id_aa64pfr1; 671 uint64_t id_aa64dfr0; 672 uint64_t id_aa64dfr1; 673 uint64_t id_aa64afr0; 674 uint64_t id_aa64afr1; 675 uint64_t id_aa64isar0; 676 uint64_t id_aa64isar1; 677 uint64_t id_aa64mmfr0; 678 uint64_t id_aa64mmfr1; 679 uint32_t dbgdidr; 680 uint32_t clidr; 681 uint64_t mp_affinity; /* MP ID without feature bits */ 682 /* The elements of this array are the CCSIDR values for each cache, 683 * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc. 684 */ 685 uint32_t ccsidr[16]; 686 uint64_t reset_cbar; 687 uint32_t reset_auxcr; 688 bool reset_hivecs; 689 /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */ 690 uint32_t dcz_blocksize; 691 uint64_t rvbar; 692 693 /* Configurable aspects of GIC cpu interface (which is part of the CPU) */ 694 int gic_num_lrs; /* number of list registers */ 695 int gic_vpribits; /* number of virtual priority bits */ 696 int gic_vprebits; /* number of virtual preemption bits */ 697 698 /* Whether the cfgend input is high (i.e. this CPU should reset into 699 * big-endian mode). This setting isn't used directly: instead it modifies 700 * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the 701 * architecture version. 702 */ 703 bool cfgend; 704 705 ARMELChangeHook *el_change_hook; 706 void *el_change_hook_opaque; 707 708 int32_t node_id; /* NUMA node this CPU belongs to */ 709 710 /* Used to synchronize KVM and QEMU in-kernel device levels */ 711 uint8_t device_irq_level; 712 }; 713 714 static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) 715 { 716 return container_of(env, ARMCPU, env); 717 } 718 719 uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz); 720 721 #define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e)) 722 723 #define ENV_OFFSET offsetof(ARMCPU, env) 724 725 #ifndef CONFIG_USER_ONLY 726 extern const struct VMStateDescription vmstate_arm_cpu; 727 #endif 728 729 void arm_cpu_do_interrupt(CPUState *cpu); 730 void arm_v7m_cpu_do_interrupt(CPUState *cpu); 731 bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); 732 733 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, 734 int flags); 735 736 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, 737 MemTxAttrs *attrs); 738 739 int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 740 int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 741 742 int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, 743 int cpuid, void *opaque); 744 int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 745 int cpuid, void *opaque); 746 747 #ifdef TARGET_AARCH64 748 int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); 749 int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); 750 #endif 751 752 ARMCPU *cpu_arm_init(const char *cpu_model); 753 target_ulong do_arm_semihosting(CPUARMState *env); 754 void aarch64_sync_32_to_64(CPUARMState *env); 755 void aarch64_sync_64_to_32(CPUARMState *env); 756 757 static inline bool is_a64(CPUARMState *env) 758 { 759 return env->aarch64; 760 } 761 762 /* you can call this signal handler from your SIGBUS and SIGSEGV 763 signal handlers to inform the virtual CPU of exceptions. non zero 764 is returned if the signal was handled by the virtual CPU. */ 765 int cpu_arm_signal_handler(int host_signum, void *pinfo, 766 void *puc); 767 768 /** 769 * pmccntr_sync 770 * @env: CPUARMState 771 * 772 * Synchronises the counter in the PMCCNTR. This must always be called twice, 773 * once before any action that might affect the timer and again afterwards. 774 * The function is used to swap the state of the register if required. 775 * This only happens when not in user mode (!CONFIG_USER_ONLY) 776 */ 777 void pmccntr_sync(CPUARMState *env); 778 779 /* SCTLR bit meanings. Several bits have been reused in newer 780 * versions of the architecture; in that case we define constants 781 * for both old and new bit meanings. Code which tests against those 782 * bits should probably check or otherwise arrange that the CPU 783 * is the architectural version it expects. 784 */ 785 #define SCTLR_M (1U << 0) 786 #define SCTLR_A (1U << 1) 787 #define SCTLR_C (1U << 2) 788 #define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */ 789 #define SCTLR_SA (1U << 3) 790 #define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */ 791 #define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */ 792 #define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */ 793 #define SCTLR_CP15BEN (1U << 5) /* v7 onward */ 794 #define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */ 795 #define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */ 796 #define SCTLR_ITD (1U << 7) /* v8 onward */ 797 #define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */ 798 #define SCTLR_SED (1U << 8) /* v8 onward */ 799 #define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */ 800 #define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */ 801 #define SCTLR_F (1U << 10) /* up to v6 */ 802 #define SCTLR_SW (1U << 10) /* v7 onward */ 803 #define SCTLR_Z (1U << 11) 804 #define SCTLR_I (1U << 12) 805 #define SCTLR_V (1U << 13) 806 #define SCTLR_RR (1U << 14) /* up to v7 */ 807 #define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */ 808 #define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */ 809 #define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */ 810 #define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */ 811 #define SCTLR_nTWI (1U << 16) /* v8 onward */ 812 #define SCTLR_HA (1U << 17) 813 #define SCTLR_BR (1U << 17) /* PMSA only */ 814 #define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */ 815 #define SCTLR_nTWE (1U << 18) /* v8 onward */ 816 #define SCTLR_WXN (1U << 19) 817 #define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */ 818 #define SCTLR_UWXN (1U << 20) /* v7 onward */ 819 #define SCTLR_FI (1U << 21) 820 #define SCTLR_U (1U << 22) 821 #define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */ 822 #define SCTLR_VE (1U << 24) /* up to v7 */ 823 #define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */ 824 #define SCTLR_EE (1U << 25) 825 #define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */ 826 #define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */ 827 #define SCTLR_NMFI (1U << 27) 828 #define SCTLR_TRE (1U << 28) 829 #define SCTLR_AFE (1U << 29) 830 #define SCTLR_TE (1U << 30) 831 832 #define CPTR_TCPAC (1U << 31) 833 #define CPTR_TTA (1U << 20) 834 #define CPTR_TFP (1U << 10) 835 836 #define MDCR_EPMAD (1U << 21) 837 #define MDCR_EDAD (1U << 20) 838 #define MDCR_SPME (1U << 17) 839 #define MDCR_SDD (1U << 16) 840 #define MDCR_SPD (3U << 14) 841 #define MDCR_TDRA (1U << 11) 842 #define MDCR_TDOSA (1U << 10) 843 #define MDCR_TDA (1U << 9) 844 #define MDCR_TDE (1U << 8) 845 #define MDCR_HPME (1U << 7) 846 #define MDCR_TPM (1U << 6) 847 #define MDCR_TPMCR (1U << 5) 848 849 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */ 850 #define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD) 851 852 #define CPSR_M (0x1fU) 853 #define CPSR_T (1U << 5) 854 #define CPSR_F (1U << 6) 855 #define CPSR_I (1U << 7) 856 #define CPSR_A (1U << 8) 857 #define CPSR_E (1U << 9) 858 #define CPSR_IT_2_7 (0xfc00U) 859 #define CPSR_GE (0xfU << 16) 860 #define CPSR_IL (1U << 20) 861 /* Note that the RESERVED bits include bit 21, which is PSTATE_SS in 862 * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use 863 * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32, 864 * where it is live state but not accessible to the AArch32 code. 865 */ 866 #define CPSR_RESERVED (0x7U << 21) 867 #define CPSR_J (1U << 24) 868 #define CPSR_IT_0_1 (3U << 25) 869 #define CPSR_Q (1U << 27) 870 #define CPSR_V (1U << 28) 871 #define CPSR_C (1U << 29) 872 #define CPSR_Z (1U << 30) 873 #define CPSR_N (1U << 31) 874 #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V) 875 #define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F) 876 877 #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7) 878 #define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \ 879 | CPSR_NZCV) 880 /* Bits writable in user mode. */ 881 #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE) 882 /* Execution state bits. MRS read as zero, MSR writes ignored. */ 883 #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL) 884 /* Mask of bits which may be set by exception return copying them from SPSR */ 885 #define CPSR_ERET_MASK (~CPSR_RESERVED) 886 887 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */ 888 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */ 889 #define TTBCR_PD0 (1U << 4) 890 #define TTBCR_PD1 (1U << 5) 891 #define TTBCR_EPD0 (1U << 7) 892 #define TTBCR_IRGN0 (3U << 8) 893 #define TTBCR_ORGN0 (3U << 10) 894 #define TTBCR_SH0 (3U << 12) 895 #define TTBCR_T1SZ (3U << 16) 896 #define TTBCR_A1 (1U << 22) 897 #define TTBCR_EPD1 (1U << 23) 898 #define TTBCR_IRGN1 (3U << 24) 899 #define TTBCR_ORGN1 (3U << 26) 900 #define TTBCR_SH1 (1U << 28) 901 #define TTBCR_EAE (1U << 31) 902 903 /* Bit definitions for ARMv8 SPSR (PSTATE) format. 904 * Only these are valid when in AArch64 mode; in 905 * AArch32 mode SPSRs are basically CPSR-format. 906 */ 907 #define PSTATE_SP (1U) 908 #define PSTATE_M (0xFU) 909 #define PSTATE_nRW (1U << 4) 910 #define PSTATE_F (1U << 6) 911 #define PSTATE_I (1U << 7) 912 #define PSTATE_A (1U << 8) 913 #define PSTATE_D (1U << 9) 914 #define PSTATE_IL (1U << 20) 915 #define PSTATE_SS (1U << 21) 916 #define PSTATE_V (1U << 28) 917 #define PSTATE_C (1U << 29) 918 #define PSTATE_Z (1U << 30) 919 #define PSTATE_N (1U << 31) 920 #define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V) 921 #define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F) 922 #define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF) 923 /* Mode values for AArch64 */ 924 #define PSTATE_MODE_EL3h 13 925 #define PSTATE_MODE_EL3t 12 926 #define PSTATE_MODE_EL2h 9 927 #define PSTATE_MODE_EL2t 8 928 #define PSTATE_MODE_EL1h 5 929 #define PSTATE_MODE_EL1t 4 930 #define PSTATE_MODE_EL0t 0 931 932 /* Map EL and handler into a PSTATE_MODE. */ 933 static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) 934 { 935 return (el << 2) | handler; 936 } 937 938 /* Return the current PSTATE value. For the moment we don't support 32<->64 bit 939 * interprocessing, so we don't attempt to sync with the cpsr state used by 940 * the 32 bit decoder. 941 */ 942 static inline uint32_t pstate_read(CPUARMState *env) 943 { 944 int ZF; 945 946 ZF = (env->ZF == 0); 947 return (env->NF & 0x80000000) | (ZF << 30) 948 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) 949 | env->pstate | env->daif; 950 } 951 952 static inline void pstate_write(CPUARMState *env, uint32_t val) 953 { 954 env->ZF = (~val) & PSTATE_Z; 955 env->NF = val; 956 env->CF = (val >> 29) & 1; 957 env->VF = (val << 3) & 0x80000000; 958 env->daif = val & PSTATE_DAIF; 959 env->pstate = val & ~CACHED_PSTATE_BITS; 960 } 961 962 /* Return the current CPSR value. */ 963 uint32_t cpsr_read(CPUARMState *env); 964 965 typedef enum CPSRWriteType { 966 CPSRWriteByInstr = 0, /* from guest MSR or CPS */ 967 CPSRWriteExceptionReturn = 1, /* from guest exception return insn */ 968 CPSRWriteRaw = 2, /* trust values, do not switch reg banks */ 969 CPSRWriteByGDBStub = 3, /* from the GDB stub */ 970 } CPSRWriteType; 971 972 /* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/ 973 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 974 CPSRWriteType write_type); 975 976 /* Return the current xPSR value. */ 977 static inline uint32_t xpsr_read(CPUARMState *env) 978 { 979 int ZF; 980 ZF = (env->ZF == 0); 981 return (env->NF & 0x80000000) | (ZF << 30) 982 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 983 | (env->thumb << 24) | ((env->condexec_bits & 3) << 25) 984 | ((env->condexec_bits & 0xfc) << 8) 985 | env->v7m.exception; 986 } 987 988 /* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */ 989 static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) 990 { 991 if (mask & CPSR_NZCV) { 992 env->ZF = (~val) & CPSR_Z; 993 env->NF = val; 994 env->CF = (val >> 29) & 1; 995 env->VF = (val << 3) & 0x80000000; 996 } 997 if (mask & CPSR_Q) 998 env->QF = ((val & CPSR_Q) != 0); 999 if (mask & (1 << 24)) 1000 env->thumb = ((val & (1 << 24)) != 0); 1001 if (mask & CPSR_IT_0_1) { 1002 env->condexec_bits &= ~3; 1003 env->condexec_bits |= (val >> 25) & 3; 1004 } 1005 if (mask & CPSR_IT_2_7) { 1006 env->condexec_bits &= 3; 1007 env->condexec_bits |= (val >> 8) & 0xfc; 1008 } 1009 if (mask & 0x1ff) { 1010 env->v7m.exception = val & 0x1ff; 1011 } 1012 } 1013 1014 #define HCR_VM (1ULL << 0) 1015 #define HCR_SWIO (1ULL << 1) 1016 #define HCR_PTW (1ULL << 2) 1017 #define HCR_FMO (1ULL << 3) 1018 #define HCR_IMO (1ULL << 4) 1019 #define HCR_AMO (1ULL << 5) 1020 #define HCR_VF (1ULL << 6) 1021 #define HCR_VI (1ULL << 7) 1022 #define HCR_VSE (1ULL << 8) 1023 #define HCR_FB (1ULL << 9) 1024 #define HCR_BSU_MASK (3ULL << 10) 1025 #define HCR_DC (1ULL << 12) 1026 #define HCR_TWI (1ULL << 13) 1027 #define HCR_TWE (1ULL << 14) 1028 #define HCR_TID0 (1ULL << 15) 1029 #define HCR_TID1 (1ULL << 16) 1030 #define HCR_TID2 (1ULL << 17) 1031 #define HCR_TID3 (1ULL << 18) 1032 #define HCR_TSC (1ULL << 19) 1033 #define HCR_TIDCP (1ULL << 20) 1034 #define HCR_TACR (1ULL << 21) 1035 #define HCR_TSW (1ULL << 22) 1036 #define HCR_TPC (1ULL << 23) 1037 #define HCR_TPU (1ULL << 24) 1038 #define HCR_TTLB (1ULL << 25) 1039 #define HCR_TVM (1ULL << 26) 1040 #define HCR_TGE (1ULL << 27) 1041 #define HCR_TDZ (1ULL << 28) 1042 #define HCR_HCD (1ULL << 29) 1043 #define HCR_TRVM (1ULL << 30) 1044 #define HCR_RW (1ULL << 31) 1045 #define HCR_CD (1ULL << 32) 1046 #define HCR_ID (1ULL << 33) 1047 #define HCR_MASK ((1ULL << 34) - 1) 1048 1049 #define SCR_NS (1U << 0) 1050 #define SCR_IRQ (1U << 1) 1051 #define SCR_FIQ (1U << 2) 1052 #define SCR_EA (1U << 3) 1053 #define SCR_FW (1U << 4) 1054 #define SCR_AW (1U << 5) 1055 #define SCR_NET (1U << 6) 1056 #define SCR_SMD (1U << 7) 1057 #define SCR_HCE (1U << 8) 1058 #define SCR_SIF (1U << 9) 1059 #define SCR_RW (1U << 10) 1060 #define SCR_ST (1U << 11) 1061 #define SCR_TWI (1U << 12) 1062 #define SCR_TWE (1U << 13) 1063 #define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST)) 1064 #define SCR_AARCH64_MASK (0x3fff & ~SCR_NET) 1065 1066 /* Return the current FPSCR value. */ 1067 uint32_t vfp_get_fpscr(CPUARMState *env); 1068 void vfp_set_fpscr(CPUARMState *env, uint32_t val); 1069 1070 /* For A64 the FPSCR is split into two logically distinct registers, 1071 * FPCR and FPSR. However since they still use non-overlapping bits 1072 * we store the underlying state in fpscr and just mask on read/write. 1073 */ 1074 #define FPSR_MASK 0xf800009f 1075 #define FPCR_MASK 0x07f79f00 1076 static inline uint32_t vfp_get_fpsr(CPUARMState *env) 1077 { 1078 return vfp_get_fpscr(env) & FPSR_MASK; 1079 } 1080 1081 static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val) 1082 { 1083 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK); 1084 vfp_set_fpscr(env, new_fpscr); 1085 } 1086 1087 static inline uint32_t vfp_get_fpcr(CPUARMState *env) 1088 { 1089 return vfp_get_fpscr(env) & FPCR_MASK; 1090 } 1091 1092 static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val) 1093 { 1094 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK); 1095 vfp_set_fpscr(env, new_fpscr); 1096 } 1097 1098 enum arm_cpu_mode { 1099 ARM_CPU_MODE_USR = 0x10, 1100 ARM_CPU_MODE_FIQ = 0x11, 1101 ARM_CPU_MODE_IRQ = 0x12, 1102 ARM_CPU_MODE_SVC = 0x13, 1103 ARM_CPU_MODE_MON = 0x16, 1104 ARM_CPU_MODE_ABT = 0x17, 1105 ARM_CPU_MODE_HYP = 0x1a, 1106 ARM_CPU_MODE_UND = 0x1b, 1107 ARM_CPU_MODE_SYS = 0x1f 1108 }; 1109 1110 /* VFP system registers. */ 1111 #define ARM_VFP_FPSID 0 1112 #define ARM_VFP_FPSCR 1 1113 #define ARM_VFP_MVFR2 5 1114 #define ARM_VFP_MVFR1 6 1115 #define ARM_VFP_MVFR0 7 1116 #define ARM_VFP_FPEXC 8 1117 #define ARM_VFP_FPINST 9 1118 #define ARM_VFP_FPINST2 10 1119 1120 /* iwMMXt coprocessor control registers. */ 1121 #define ARM_IWMMXT_wCID 0 1122 #define ARM_IWMMXT_wCon 1 1123 #define ARM_IWMMXT_wCSSF 2 1124 #define ARM_IWMMXT_wCASF 3 1125 #define ARM_IWMMXT_wCGR0 8 1126 #define ARM_IWMMXT_wCGR1 9 1127 #define ARM_IWMMXT_wCGR2 10 1128 #define ARM_IWMMXT_wCGR3 11 1129 1130 /* V7M CCR bits */ 1131 FIELD(V7M_CCR, NONBASETHRDENA, 0, 1) 1132 FIELD(V7M_CCR, USERSETMPEND, 1, 1) 1133 FIELD(V7M_CCR, UNALIGN_TRP, 3, 1) 1134 FIELD(V7M_CCR, DIV_0_TRP, 4, 1) 1135 FIELD(V7M_CCR, BFHFNMIGN, 8, 1) 1136 FIELD(V7M_CCR, STKALIGN, 9, 1) 1137 FIELD(V7M_CCR, DC, 16, 1) 1138 FIELD(V7M_CCR, IC, 17, 1) 1139 1140 /* V7M CFSR bits for MMFSR */ 1141 FIELD(V7M_CFSR, IACCVIOL, 0, 1) 1142 FIELD(V7M_CFSR, DACCVIOL, 1, 1) 1143 FIELD(V7M_CFSR, MUNSTKERR, 3, 1) 1144 FIELD(V7M_CFSR, MSTKERR, 4, 1) 1145 FIELD(V7M_CFSR, MLSPERR, 5, 1) 1146 FIELD(V7M_CFSR, MMARVALID, 7, 1) 1147 1148 /* V7M CFSR bits for BFSR */ 1149 FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1) 1150 FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1) 1151 FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1) 1152 FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1) 1153 FIELD(V7M_CFSR, STKERR, 8 + 4, 1) 1154 FIELD(V7M_CFSR, LSPERR, 8 + 5, 1) 1155 FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1) 1156 1157 /* V7M CFSR bits for UFSR */ 1158 FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1) 1159 FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1) 1160 FIELD(V7M_CFSR, INVPC, 16 + 2, 1) 1161 FIELD(V7M_CFSR, NOCP, 16 + 3, 1) 1162 FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1) 1163 FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1) 1164 1165 /* V7M HFSR bits */ 1166 FIELD(V7M_HFSR, VECTTBL, 1, 1) 1167 FIELD(V7M_HFSR, FORCED, 30, 1) 1168 FIELD(V7M_HFSR, DEBUGEVT, 31, 1) 1169 1170 /* V7M DFSR bits */ 1171 FIELD(V7M_DFSR, HALTED, 0, 1) 1172 FIELD(V7M_DFSR, BKPT, 1, 1) 1173 FIELD(V7M_DFSR, DWTTRAP, 2, 1) 1174 FIELD(V7M_DFSR, VCATCH, 3, 1) 1175 FIELD(V7M_DFSR, EXTERNAL, 4, 1) 1176 1177 /* v7M MPU_CTRL bits */ 1178 FIELD(V7M_MPU_CTRL, ENABLE, 0, 1) 1179 FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1) 1180 FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1) 1181 1182 /* If adding a feature bit which corresponds to a Linux ELF 1183 * HWCAP bit, remember to update the feature-bit-to-hwcap 1184 * mapping in linux-user/elfload.c:get_elf_hwcap(). 1185 */ 1186 enum arm_features { 1187 ARM_FEATURE_VFP, 1188 ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ 1189 ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ 1190 ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ 1191 ARM_FEATURE_V6, 1192 ARM_FEATURE_V6K, 1193 ARM_FEATURE_V7, 1194 ARM_FEATURE_THUMB2, 1195 ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */ 1196 ARM_FEATURE_VFP3, 1197 ARM_FEATURE_VFP_FP16, 1198 ARM_FEATURE_NEON, 1199 ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */ 1200 ARM_FEATURE_M, /* Microcontroller profile. */ 1201 ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */ 1202 ARM_FEATURE_THUMB2EE, 1203 ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */ 1204 ARM_FEATURE_V4T, 1205 ARM_FEATURE_V5, 1206 ARM_FEATURE_STRONGARM, 1207 ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */ 1208 ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */ 1209 ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */ 1210 ARM_FEATURE_GENERIC_TIMER, 1211 ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */ 1212 ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */ 1213 ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */ 1214 ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */ 1215 ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */ 1216 ARM_FEATURE_MPIDR, /* has cp15 MPIDR */ 1217 ARM_FEATURE_PXN, /* has Privileged Execute Never bit */ 1218 ARM_FEATURE_LPAE, /* has Large Physical Address Extension */ 1219 ARM_FEATURE_V8, 1220 ARM_FEATURE_AARCH64, /* supports 64 bit mode */ 1221 ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */ 1222 ARM_FEATURE_CBAR, /* has cp15 CBAR */ 1223 ARM_FEATURE_CRC, /* ARMv8 CRC instructions */ 1224 ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */ 1225 ARM_FEATURE_EL2, /* has EL2 Virtualization support */ 1226 ARM_FEATURE_EL3, /* has EL3 Secure monitor support */ 1227 ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */ 1228 ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */ 1229 ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */ 1230 ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */ 1231 ARM_FEATURE_PMU, /* has PMU support */ 1232 ARM_FEATURE_VBAR, /* has cp15 VBAR */ 1233 }; 1234 1235 static inline int arm_feature(CPUARMState *env, int feature) 1236 { 1237 return (env->features & (1ULL << feature)) != 0; 1238 } 1239 1240 #if !defined(CONFIG_USER_ONLY) 1241 /* Return true if exception levels below EL3 are in secure state, 1242 * or would be following an exception return to that level. 1243 * Unlike arm_is_secure() (which is always a question about the 1244 * _current_ state of the CPU) this doesn't care about the current 1245 * EL or mode. 1246 */ 1247 static inline bool arm_is_secure_below_el3(CPUARMState *env) 1248 { 1249 if (arm_feature(env, ARM_FEATURE_EL3)) { 1250 return !(env->cp15.scr_el3 & SCR_NS); 1251 } else { 1252 /* If EL3 is not supported then the secure state is implementation 1253 * defined, in which case QEMU defaults to non-secure. 1254 */ 1255 return false; 1256 } 1257 } 1258 1259 /* Return true if the CPU is AArch64 EL3 or AArch32 Mon */ 1260 static inline bool arm_is_el3_or_mon(CPUARMState *env) 1261 { 1262 if (arm_feature(env, ARM_FEATURE_EL3)) { 1263 if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) { 1264 /* CPU currently in AArch64 state and EL3 */ 1265 return true; 1266 } else if (!is_a64(env) && 1267 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 1268 /* CPU currently in AArch32 state and monitor mode */ 1269 return true; 1270 } 1271 } 1272 return false; 1273 } 1274 1275 /* Return true if the processor is in secure state */ 1276 static inline bool arm_is_secure(CPUARMState *env) 1277 { 1278 if (arm_is_el3_or_mon(env)) { 1279 return true; 1280 } 1281 return arm_is_secure_below_el3(env); 1282 } 1283 1284 #else 1285 static inline bool arm_is_secure_below_el3(CPUARMState *env) 1286 { 1287 return false; 1288 } 1289 1290 static inline bool arm_is_secure(CPUARMState *env) 1291 { 1292 return false; 1293 } 1294 #endif 1295 1296 /* Return true if the specified exception level is running in AArch64 state. */ 1297 static inline bool arm_el_is_aa64(CPUARMState *env, int el) 1298 { 1299 /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, 1300 * and if we're not in EL0 then the state of EL0 isn't well defined.) 1301 */ 1302 assert(el >= 1 && el <= 3); 1303 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); 1304 1305 /* The highest exception level is always at the maximum supported 1306 * register width, and then lower levels have a register width controlled 1307 * by bits in the SCR or HCR registers. 1308 */ 1309 if (el == 3) { 1310 return aa64; 1311 } 1312 1313 if (arm_feature(env, ARM_FEATURE_EL3)) { 1314 aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); 1315 } 1316 1317 if (el == 2) { 1318 return aa64; 1319 } 1320 1321 if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) { 1322 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); 1323 } 1324 1325 return aa64; 1326 } 1327 1328 /* Function for determing whether guest cp register reads and writes should 1329 * access the secure or non-secure bank of a cp register. When EL3 is 1330 * operating in AArch32 state, the NS-bit determines whether the secure 1331 * instance of a cp register should be used. When EL3 is AArch64 (or if 1332 * it doesn't exist at all) then there is no register banking, and all 1333 * accesses are to the non-secure version. 1334 */ 1335 static inline bool access_secure_reg(CPUARMState *env) 1336 { 1337 bool ret = (arm_feature(env, ARM_FEATURE_EL3) && 1338 !arm_el_is_aa64(env, 3) && 1339 !(env->cp15.scr_el3 & SCR_NS)); 1340 1341 return ret; 1342 } 1343 1344 /* Macros for accessing a specified CP register bank */ 1345 #define A32_BANKED_REG_GET(_env, _regname, _secure) \ 1346 ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns) 1347 1348 #define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \ 1349 do { \ 1350 if (_secure) { \ 1351 (_env)->cp15._regname##_s = (_val); \ 1352 } else { \ 1353 (_env)->cp15._regname##_ns = (_val); \ 1354 } \ 1355 } while (0) 1356 1357 /* Macros for automatically accessing a specific CP register bank depending on 1358 * the current secure state of the system. These macros are not intended for 1359 * supporting instruction translation reads/writes as these are dependent 1360 * solely on the SCR.NS bit and not the mode. 1361 */ 1362 #define A32_BANKED_CURRENT_REG_GET(_env, _regname) \ 1363 A32_BANKED_REG_GET((_env), _regname, \ 1364 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3))) 1365 1366 #define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \ 1367 A32_BANKED_REG_SET((_env), _regname, \ 1368 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ 1369 (_val)) 1370 1371 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf); 1372 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 1373 uint32_t cur_el, bool secure); 1374 1375 /* Interface between CPU and Interrupt controller. */ 1376 #ifndef CONFIG_USER_ONLY 1377 bool armv7m_nvic_can_take_pending_exception(void *opaque); 1378 #else 1379 static inline bool armv7m_nvic_can_take_pending_exception(void *opaque) 1380 { 1381 return true; 1382 } 1383 #endif 1384 void armv7m_nvic_set_pending(void *opaque, int irq); 1385 void armv7m_nvic_acknowledge_irq(void *opaque); 1386 /** 1387 * armv7m_nvic_complete_irq: complete specified interrupt or exception 1388 * @opaque: the NVIC 1389 * @irq: the exception number to complete 1390 * 1391 * Returns: -1 if the irq was not active 1392 * 1 if completing this irq brought us back to base (no active irqs) 1393 * 0 if there is still an irq active after this one was completed 1394 * (Ignoring -1, this is the same as the RETTOBASE value before completion.) 1395 */ 1396 int armv7m_nvic_complete_irq(void *opaque, int irq); 1397 1398 /* Interface for defining coprocessor registers. 1399 * Registers are defined in tables of arm_cp_reginfo structs 1400 * which are passed to define_arm_cp_regs(). 1401 */ 1402 1403 /* When looking up a coprocessor register we look for it 1404 * via an integer which encodes all of: 1405 * coprocessor number 1406 * Crn, Crm, opc1, opc2 fields 1407 * 32 or 64 bit register (ie is it accessed via MRC/MCR 1408 * or via MRRC/MCRR?) 1409 * non-secure/secure bank (AArch32 only) 1410 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field. 1411 * (In this case crn and opc2 should be zero.) 1412 * For AArch64, there is no 32/64 bit size distinction; 1413 * instead all registers have a 2 bit op0, 3 bit op1 and op2, 1414 * and 4 bit CRn and CRm. The encoding patterns are chosen 1415 * to be easy to convert to and from the KVM encodings, and also 1416 * so that the hashtable can contain both AArch32 and AArch64 1417 * registers (to allow for interprocessing where we might run 1418 * 32 bit code on a 64 bit core). 1419 */ 1420 /* This bit is private to our hashtable cpreg; in KVM register 1421 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64 1422 * in the upper bits of the 64 bit ID. 1423 */ 1424 #define CP_REG_AA64_SHIFT 28 1425 #define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT) 1426 1427 /* To enable banking of coprocessor registers depending on ns-bit we 1428 * add a bit to distinguish between secure and non-secure cpregs in the 1429 * hashtable. 1430 */ 1431 #define CP_REG_NS_SHIFT 29 1432 #define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT) 1433 1434 #define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \ 1435 ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \ 1436 ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2)) 1437 1438 #define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \ 1439 (CP_REG_AA64_MASK | \ 1440 ((cp) << CP_REG_ARM_COPROC_SHIFT) | \ 1441 ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ 1442 ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ 1443 ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ 1444 ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \ 1445 ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT)) 1446 1447 /* Convert a full 64 bit KVM register ID to the truncated 32 bit 1448 * version used as a key for the coprocessor register hashtable 1449 */ 1450 static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid) 1451 { 1452 uint32_t cpregid = kvmid; 1453 if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) { 1454 cpregid |= CP_REG_AA64_MASK; 1455 } else { 1456 if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) { 1457 cpregid |= (1 << 15); 1458 } 1459 1460 /* KVM is always non-secure so add the NS flag on AArch32 register 1461 * entries. 1462 */ 1463 cpregid |= 1 << CP_REG_NS_SHIFT; 1464 } 1465 return cpregid; 1466 } 1467 1468 /* Convert a truncated 32 bit hashtable key into the full 1469 * 64 bit KVM register ID. 1470 */ 1471 static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) 1472 { 1473 uint64_t kvmid; 1474 1475 if (cpregid & CP_REG_AA64_MASK) { 1476 kvmid = cpregid & ~CP_REG_AA64_MASK; 1477 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64; 1478 } else { 1479 kvmid = cpregid & ~(1 << 15); 1480 if (cpregid & (1 << 15)) { 1481 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM; 1482 } else { 1483 kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM; 1484 } 1485 } 1486 return kvmid; 1487 } 1488 1489 /* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a 1490 * special-behaviour cp reg and bits [15..8] indicate what behaviour 1491 * it has. Otherwise it is a simple cp reg, where CONST indicates that 1492 * TCG can assume the value to be constant (ie load at translate time) 1493 * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END 1494 * indicates that the TB should not be ended after a write to this register 1495 * (the default is that the TB ends after cp writes). OVERRIDE permits 1496 * a register definition to override a previous definition for the 1497 * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the 1498 * old must have the OVERRIDE bit set. 1499 * ALIAS indicates that this register is an alias view of some underlying 1500 * state which is also visible via another register, and that the other 1501 * register is handling migration and reset; registers marked ALIAS will not be 1502 * migrated but may have their state set by syncing of register state from KVM. 1503 * NO_RAW indicates that this register has no underlying state and does not 1504 * support raw access for state saving/loading; it will not be used for either 1505 * migration or KVM state synchronization. (Typically this is for "registers" 1506 * which are actually used as instructions for cache maintenance and so on.) 1507 * IO indicates that this register does I/O and therefore its accesses 1508 * need to be surrounded by gen_io_start()/gen_io_end(). In particular, 1509 * registers which implement clocks or timers require this. 1510 */ 1511 #define ARM_CP_SPECIAL 1 1512 #define ARM_CP_CONST 2 1513 #define ARM_CP_64BIT 4 1514 #define ARM_CP_SUPPRESS_TB_END 8 1515 #define ARM_CP_OVERRIDE 16 1516 #define ARM_CP_ALIAS 32 1517 #define ARM_CP_IO 64 1518 #define ARM_CP_NO_RAW 128 1519 #define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8)) 1520 #define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8)) 1521 #define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8)) 1522 #define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8)) 1523 #define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8)) 1524 #define ARM_LAST_SPECIAL ARM_CP_DC_ZVA 1525 /* Used only as a terminator for ARMCPRegInfo lists */ 1526 #define ARM_CP_SENTINEL 0xffff 1527 /* Mask of only the flag bits in a type field */ 1528 #define ARM_CP_FLAG_MASK 0xff 1529 1530 /* Valid values for ARMCPRegInfo state field, indicating which of 1531 * the AArch32 and AArch64 execution states this register is visible in. 1532 * If the reginfo doesn't explicitly specify then it is AArch32 only. 1533 * If the reginfo is declared to be visible in both states then a second 1534 * reginfo is synthesised for the AArch32 view of the AArch64 register, 1535 * such that the AArch32 view is the lower 32 bits of the AArch64 one. 1536 * Note that we rely on the values of these enums as we iterate through 1537 * the various states in some places. 1538 */ 1539 enum { 1540 ARM_CP_STATE_AA32 = 0, 1541 ARM_CP_STATE_AA64 = 1, 1542 ARM_CP_STATE_BOTH = 2, 1543 }; 1544 1545 /* ARM CP register secure state flags. These flags identify security state 1546 * attributes for a given CP register entry. 1547 * The existence of both or neither secure and non-secure flags indicates that 1548 * the register has both a secure and non-secure hash entry. A single one of 1549 * these flags causes the register to only be hashed for the specified 1550 * security state. 1551 * Although definitions may have any combination of the S/NS bits, each 1552 * registered entry will only have one to identify whether the entry is secure 1553 * or non-secure. 1554 */ 1555 enum { 1556 ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */ 1557 ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */ 1558 }; 1559 1560 /* Return true if cptype is a valid type field. This is used to try to 1561 * catch errors where the sentinel has been accidentally left off the end 1562 * of a list of registers. 1563 */ 1564 static inline bool cptype_valid(int cptype) 1565 { 1566 return ((cptype & ~ARM_CP_FLAG_MASK) == 0) 1567 || ((cptype & ARM_CP_SPECIAL) && 1568 ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL)); 1569 } 1570 1571 /* Access rights: 1572 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM 1573 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and 1574 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1 1575 * (ie any of the privileged modes in Secure state, or Monitor mode). 1576 * If a register is accessible in one privilege level it's always accessible 1577 * in higher privilege levels too. Since "Secure PL1" also follows this rule 1578 * (ie anything visible in PL2 is visible in S-PL1, some things are only 1579 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the 1580 * terminology a little and call this PL3. 1581 * In AArch64 things are somewhat simpler as the PLx bits line up exactly 1582 * with the ELx exception levels. 1583 * 1584 * If access permissions for a register are more complex than can be 1585 * described with these bits, then use a laxer set of restrictions, and 1586 * do the more restrictive/complex check inside a helper function. 1587 */ 1588 #define PL3_R 0x80 1589 #define PL3_W 0x40 1590 #define PL2_R (0x20 | PL3_R) 1591 #define PL2_W (0x10 | PL3_W) 1592 #define PL1_R (0x08 | PL2_R) 1593 #define PL1_W (0x04 | PL2_W) 1594 #define PL0_R (0x02 | PL1_R) 1595 #define PL0_W (0x01 | PL1_W) 1596 1597 #define PL3_RW (PL3_R | PL3_W) 1598 #define PL2_RW (PL2_R | PL2_W) 1599 #define PL1_RW (PL1_R | PL1_W) 1600 #define PL0_RW (PL0_R | PL0_W) 1601 1602 /* Return the highest implemented Exception Level */ 1603 static inline int arm_highest_el(CPUARMState *env) 1604 { 1605 if (arm_feature(env, ARM_FEATURE_EL3)) { 1606 return 3; 1607 } 1608 if (arm_feature(env, ARM_FEATURE_EL2)) { 1609 return 2; 1610 } 1611 return 1; 1612 } 1613 1614 /* Return the current Exception Level (as per ARMv8; note that this differs 1615 * from the ARMv7 Privilege Level). 1616 */ 1617 static inline int arm_current_el(CPUARMState *env) 1618 { 1619 if (arm_feature(env, ARM_FEATURE_M)) { 1620 return !((env->v7m.exception == 0) && (env->v7m.control & 1)); 1621 } 1622 1623 if (is_a64(env)) { 1624 return extract32(env->pstate, 2, 2); 1625 } 1626 1627 switch (env->uncached_cpsr & 0x1f) { 1628 case ARM_CPU_MODE_USR: 1629 return 0; 1630 case ARM_CPU_MODE_HYP: 1631 return 2; 1632 case ARM_CPU_MODE_MON: 1633 return 3; 1634 default: 1635 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 1636 /* If EL3 is 32-bit then all secure privileged modes run in 1637 * EL3 1638 */ 1639 return 3; 1640 } 1641 1642 return 1; 1643 } 1644 } 1645 1646 typedef struct ARMCPRegInfo ARMCPRegInfo; 1647 1648 typedef enum CPAccessResult { 1649 /* Access is permitted */ 1650 CP_ACCESS_OK = 0, 1651 /* Access fails due to a configurable trap or enable which would 1652 * result in a categorized exception syndrome giving information about 1653 * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6, 1654 * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or 1655 * PL1 if in EL0, otherwise to the current EL). 1656 */ 1657 CP_ACCESS_TRAP = 1, 1658 /* Access fails and results in an exception syndrome 0x0 ("uncategorized"). 1659 * Note that this is not a catch-all case -- the set of cases which may 1660 * result in this failure is specifically defined by the architecture. 1661 */ 1662 CP_ACCESS_TRAP_UNCATEGORIZED = 2, 1663 /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */ 1664 CP_ACCESS_TRAP_EL2 = 3, 1665 CP_ACCESS_TRAP_EL3 = 4, 1666 /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */ 1667 CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5, 1668 CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6, 1669 /* Access fails and results in an exception syndrome for an FP access, 1670 * trapped directly to EL2 or EL3 1671 */ 1672 CP_ACCESS_TRAP_FP_EL2 = 7, 1673 CP_ACCESS_TRAP_FP_EL3 = 8, 1674 } CPAccessResult; 1675 1676 /* Access functions for coprocessor registers. These cannot fail and 1677 * may not raise exceptions. 1678 */ 1679 typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); 1680 typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, 1681 uint64_t value); 1682 /* Access permission check functions for coprocessor registers. */ 1683 typedef CPAccessResult CPAccessFn(CPUARMState *env, 1684 const ARMCPRegInfo *opaque, 1685 bool isread); 1686 /* Hook function for register reset */ 1687 typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); 1688 1689 #define CP_ANY 0xff 1690 1691 /* Definition of an ARM coprocessor register */ 1692 struct ARMCPRegInfo { 1693 /* Name of register (useful mainly for debugging, need not be unique) */ 1694 const char *name; 1695 /* Location of register: coprocessor number and (crn,crm,opc1,opc2) 1696 * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a 1697 * 'wildcard' field -- any value of that field in the MRC/MCR insn 1698 * will be decoded to this register. The register read and write 1699 * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2 1700 * used by the program, so it is possible to register a wildcard and 1701 * then behave differently on read/write if necessary. 1702 * For 64 bit registers, only crm and opc1 are relevant; crn and opc2 1703 * must both be zero. 1704 * For AArch64-visible registers, opc0 is also used. 1705 * Since there are no "coprocessors" in AArch64, cp is purely used as a 1706 * way to distinguish (for KVM's benefit) guest-visible system registers 1707 * from demuxed ones provided to preserve the "no side effects on 1708 * KVM register read/write from QEMU" semantics. cp==0x13 is guest 1709 * visible (to match KVM's encoding); cp==0 will be converted to 1710 * cp==0x13 when the ARMCPRegInfo is registered, for convenience. 1711 */ 1712 uint8_t cp; 1713 uint8_t crn; 1714 uint8_t crm; 1715 uint8_t opc0; 1716 uint8_t opc1; 1717 uint8_t opc2; 1718 /* Execution state in which this register is visible: ARM_CP_STATE_* */ 1719 int state; 1720 /* Register type: ARM_CP_* bits/values */ 1721 int type; 1722 /* Access rights: PL*_[RW] */ 1723 int access; 1724 /* Security state: ARM_CP_SECSTATE_* bits/values */ 1725 int secure; 1726 /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when 1727 * this register was defined: can be used to hand data through to the 1728 * register read/write functions, since they are passed the ARMCPRegInfo*. 1729 */ 1730 void *opaque; 1731 /* Value of this register, if it is ARM_CP_CONST. Otherwise, if 1732 * fieldoffset is non-zero, the reset value of the register. 1733 */ 1734 uint64_t resetvalue; 1735 /* Offset of the field in CPUARMState for this register. 1736 * 1737 * This is not needed if either: 1738 * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs 1739 * 2. both readfn and writefn are specified 1740 */ 1741 ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */ 1742 1743 /* Offsets of the secure and non-secure fields in CPUARMState for the 1744 * register if it is banked. These fields are only used during the static 1745 * registration of a register. During hashing the bank associated 1746 * with a given security state is copied to fieldoffset which is used from 1747 * there on out. 1748 * 1749 * It is expected that register definitions use either fieldoffset or 1750 * bank_fieldoffsets in the definition but not both. It is also expected 1751 * that both bank offsets are set when defining a banked register. This 1752 * use indicates that a register is banked. 1753 */ 1754 ptrdiff_t bank_fieldoffsets[2]; 1755 1756 /* Function for making any access checks for this register in addition to 1757 * those specified by the 'access' permissions bits. If NULL, no extra 1758 * checks required. The access check is performed at runtime, not at 1759 * translate time. 1760 */ 1761 CPAccessFn *accessfn; 1762 /* Function for handling reads of this register. If NULL, then reads 1763 * will be done by loading from the offset into CPUARMState specified 1764 * by fieldoffset. 1765 */ 1766 CPReadFn *readfn; 1767 /* Function for handling writes of this register. If NULL, then writes 1768 * will be done by writing to the offset into CPUARMState specified 1769 * by fieldoffset. 1770 */ 1771 CPWriteFn *writefn; 1772 /* Function for doing a "raw" read; used when we need to copy 1773 * coprocessor state to the kernel for KVM or out for 1774 * migration. This only needs to be provided if there is also a 1775 * readfn and it has side effects (for instance clear-on-read bits). 1776 */ 1777 CPReadFn *raw_readfn; 1778 /* Function for doing a "raw" write; used when we need to copy KVM 1779 * kernel coprocessor state into userspace, or for inbound 1780 * migration. This only needs to be provided if there is also a 1781 * writefn and it masks out "unwritable" bits or has write-one-to-clear 1782 * or similar behaviour. 1783 */ 1784 CPWriteFn *raw_writefn; 1785 /* Function for resetting the register. If NULL, then reset will be done 1786 * by writing resetvalue to the field specified in fieldoffset. If 1787 * fieldoffset is 0 then no reset will be done. 1788 */ 1789 CPResetFn *resetfn; 1790 }; 1791 1792 /* Macros which are lvalues for the field in CPUARMState for the 1793 * ARMCPRegInfo *ri. 1794 */ 1795 #define CPREG_FIELD32(env, ri) \ 1796 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) 1797 #define CPREG_FIELD64(env, ri) \ 1798 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) 1799 1800 #define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL } 1801 1802 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 1803 const ARMCPRegInfo *regs, void *opaque); 1804 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 1805 const ARMCPRegInfo *regs, void *opaque); 1806 static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs) 1807 { 1808 define_arm_cp_regs_with_opaque(cpu, regs, 0); 1809 } 1810 static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) 1811 { 1812 define_one_arm_cp_reg_with_opaque(cpu, regs, 0); 1813 } 1814 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); 1815 1816 /* CPWriteFn that can be used to implement writes-ignored behaviour */ 1817 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 1818 uint64_t value); 1819 /* CPReadFn that can be used for read-as-zero behaviour */ 1820 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri); 1821 1822 /* CPResetFn that does nothing, for use if no reset is required even 1823 * if fieldoffset is non zero. 1824 */ 1825 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); 1826 1827 /* Return true if this reginfo struct's field in the cpu state struct 1828 * is 64 bits wide. 1829 */ 1830 static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) 1831 { 1832 return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); 1833 } 1834 1835 static inline bool cp_access_ok(int current_el, 1836 const ARMCPRegInfo *ri, int isread) 1837 { 1838 return (ri->access >> ((current_el * 2) + isread)) & 1; 1839 } 1840 1841 /* Raw read of a coprocessor register (as needed for migration, etc) */ 1842 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri); 1843 1844 /** 1845 * write_list_to_cpustate 1846 * @cpu: ARMCPU 1847 * 1848 * For each register listed in the ARMCPU cpreg_indexes list, write 1849 * its value from the cpreg_values list into the ARMCPUState structure. 1850 * This updates TCG's working data structures from KVM data or 1851 * from incoming migration state. 1852 * 1853 * Returns: true if all register values were updated correctly, 1854 * false if some register was unknown or could not be written. 1855 * Note that we do not stop early on failure -- we will attempt 1856 * writing all registers in the list. 1857 */ 1858 bool write_list_to_cpustate(ARMCPU *cpu); 1859 1860 /** 1861 * write_cpustate_to_list: 1862 * @cpu: ARMCPU 1863 * 1864 * For each register listed in the ARMCPU cpreg_indexes list, write 1865 * its value from the ARMCPUState structure into the cpreg_values list. 1866 * This is used to copy info from TCG's working data structures into 1867 * KVM or for outbound migration. 1868 * 1869 * Returns: true if all register values were read correctly, 1870 * false if some register was unknown or could not be read. 1871 * Note that we do not stop early on failure -- we will attempt 1872 * reading all registers in the list. 1873 */ 1874 bool write_cpustate_to_list(ARMCPU *cpu); 1875 1876 #define ARM_CPUID_TI915T 0x54029152 1877 #define ARM_CPUID_TI925T 0x54029252 1878 1879 #if defined(CONFIG_USER_ONLY) 1880 #define TARGET_PAGE_BITS 12 1881 #else 1882 /* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6 1883 * have to support 1K tiny pages. 1884 */ 1885 #define TARGET_PAGE_BITS_VARY 1886 #define TARGET_PAGE_BITS_MIN 10 1887 #endif 1888 1889 #if defined(TARGET_AARCH64) 1890 # define TARGET_PHYS_ADDR_SPACE_BITS 48 1891 # define TARGET_VIRT_ADDR_SPACE_BITS 64 1892 #else 1893 # define TARGET_PHYS_ADDR_SPACE_BITS 40 1894 # define TARGET_VIRT_ADDR_SPACE_BITS 32 1895 #endif 1896 1897 static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, 1898 unsigned int target_el) 1899 { 1900 CPUARMState *env = cs->env_ptr; 1901 unsigned int cur_el = arm_current_el(env); 1902 bool secure = arm_is_secure(env); 1903 bool pstate_unmasked; 1904 int8_t unmasked = 0; 1905 1906 /* Don't take exceptions if they target a lower EL. 1907 * This check should catch any exceptions that would not be taken but left 1908 * pending. 1909 */ 1910 if (cur_el > target_el) { 1911 return false; 1912 } 1913 1914 switch (excp_idx) { 1915 case EXCP_FIQ: 1916 pstate_unmasked = !(env->daif & PSTATE_F); 1917 break; 1918 1919 case EXCP_IRQ: 1920 pstate_unmasked = !(env->daif & PSTATE_I); 1921 break; 1922 1923 case EXCP_VFIQ: 1924 if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) { 1925 /* VFIQs are only taken when hypervized and non-secure. */ 1926 return false; 1927 } 1928 return !(env->daif & PSTATE_F); 1929 case EXCP_VIRQ: 1930 if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) { 1931 /* VIRQs are only taken when hypervized and non-secure. */ 1932 return false; 1933 } 1934 return !(env->daif & PSTATE_I); 1935 default: 1936 g_assert_not_reached(); 1937 } 1938 1939 /* Use the target EL, current execution state and SCR/HCR settings to 1940 * determine whether the corresponding CPSR bit is used to mask the 1941 * interrupt. 1942 */ 1943 if ((target_el > cur_el) && (target_el != 1)) { 1944 /* Exceptions targeting a higher EL may not be maskable */ 1945 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 1946 /* 64-bit masking rules are simple: exceptions to EL3 1947 * can't be masked, and exceptions to EL2 can only be 1948 * masked from Secure state. The HCR and SCR settings 1949 * don't affect the masking logic, only the interrupt routing. 1950 */ 1951 if (target_el == 3 || !secure) { 1952 unmasked = 1; 1953 } 1954 } else { 1955 /* The old 32-bit-only environment has a more complicated 1956 * masking setup. HCR and SCR bits not only affect interrupt 1957 * routing but also change the behaviour of masking. 1958 */ 1959 bool hcr, scr; 1960 1961 switch (excp_idx) { 1962 case EXCP_FIQ: 1963 /* If FIQs are routed to EL3 or EL2 then there are cases where 1964 * we override the CPSR.F in determining if the exception is 1965 * masked or not. If neither of these are set then we fall back 1966 * to the CPSR.F setting otherwise we further assess the state 1967 * below. 1968 */ 1969 hcr = (env->cp15.hcr_el2 & HCR_FMO); 1970 scr = (env->cp15.scr_el3 & SCR_FIQ); 1971 1972 /* When EL3 is 32-bit, the SCR.FW bit controls whether the 1973 * CPSR.F bit masks FIQ interrupts when taken in non-secure 1974 * state. If SCR.FW is set then FIQs can be masked by CPSR.F 1975 * when non-secure but only when FIQs are only routed to EL3. 1976 */ 1977 scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); 1978 break; 1979 case EXCP_IRQ: 1980 /* When EL3 execution state is 32-bit, if HCR.IMO is set then 1981 * we may override the CPSR.I masking when in non-secure state. 1982 * The SCR.IRQ setting has already been taken into consideration 1983 * when setting the target EL, so it does not have a further 1984 * affect here. 1985 */ 1986 hcr = (env->cp15.hcr_el2 & HCR_IMO); 1987 scr = false; 1988 break; 1989 default: 1990 g_assert_not_reached(); 1991 } 1992 1993 if ((scr || hcr) && !secure) { 1994 unmasked = 1; 1995 } 1996 } 1997 } 1998 1999 /* The PSTATE bits only mask the interrupt if we have not overriden the 2000 * ability above. 2001 */ 2002 return unmasked || pstate_unmasked; 2003 } 2004 2005 #define cpu_init(cpu_model) CPU(cpu_arm_init(cpu_model)) 2006 2007 #define cpu_signal_handler cpu_arm_signal_handler 2008 #define cpu_list arm_cpu_list 2009 2010 /* ARM has the following "translation regimes" (as the ARM ARM calls them): 2011 * 2012 * If EL3 is 64-bit: 2013 * + NonSecure EL1 & 0 stage 1 2014 * + NonSecure EL1 & 0 stage 2 2015 * + NonSecure EL2 2016 * + Secure EL1 & EL0 2017 * + Secure EL3 2018 * If EL3 is 32-bit: 2019 * + NonSecure PL1 & 0 stage 1 2020 * + NonSecure PL1 & 0 stage 2 2021 * + NonSecure PL2 2022 * + Secure PL0 & PL1 2023 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) 2024 * 2025 * For QEMU, an mmu_idx is not quite the same as a translation regime because: 2026 * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they 2027 * may differ in access permissions even if the VA->PA map is the same 2028 * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 2029 * translation, which means that we have one mmu_idx that deals with two 2030 * concatenated translation regimes [this sort of combined s1+2 TLB is 2031 * architecturally permitted] 2032 * 3. we don't need to allocate an mmu_idx to translations that we won't be 2033 * handling via the TLB. The only way to do a stage 1 translation without 2034 * the immediate stage 2 translation is via the ATS or AT system insns, 2035 * which can be slow-pathed and always do a page table walk. 2036 * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" 2037 * translation regimes, because they map reasonably well to each other 2038 * and they can't both be active at the same time. 2039 * This gives us the following list of mmu_idx values: 2040 * 2041 * NS EL0 (aka NS PL0) stage 1+2 2042 * NS EL1 (aka NS PL1) stage 1+2 2043 * NS EL2 (aka NS PL2) 2044 * S EL3 (aka S PL1) 2045 * S EL0 (aka S PL0) 2046 * S EL1 (not used if EL3 is 32 bit) 2047 * NS EL0+1 stage 2 2048 * 2049 * (The last of these is an mmu_idx because we want to be able to use the TLB 2050 * for the accesses done as part of a stage 1 page table walk, rather than 2051 * having to walk the stage 2 page table over and over.) 2052 * 2053 * R profile CPUs have an MPU, but can use the same set of MMU indexes 2054 * as A profile. They only need to distinguish NS EL0 and NS EL1 (and 2055 * NS EL2 if we ever model a Cortex-R52). 2056 * 2057 * M profile CPUs are rather different as they do not have a true MMU. 2058 * They have the following different MMU indexes: 2059 * User 2060 * Privileged 2061 * Execution priority negative (this is like privileged, but the 2062 * MPU HFNMIENA bit means that it may have different access permission 2063 * check results to normal privileged code, so can't share a TLB). 2064 * 2065 * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code 2066 * are not quite the same -- different CPU types (most notably M profile 2067 * vs A/R profile) would like to use MMU indexes with different semantics, 2068 * but since we don't ever need to use all of those in a single CPU we 2069 * can avoid setting NB_MMU_MODES to more than 8. The lower bits of 2070 * ARMMMUIdx are the core TLB mmu index, and the higher bits are always 2071 * the same for any particular CPU. 2072 * Variables of type ARMMUIdx are always full values, and the core 2073 * index values are in variables of type 'int'. 2074 * 2075 * Our enumeration includes at the end some entries which are not "true" 2076 * mmu_idx values in that they don't have corresponding TLBs and are only 2077 * valid for doing slow path page table walks. 2078 * 2079 * The constant names here are patterned after the general style of the names 2080 * of the AT/ATS operations. 2081 * The values used are carefully arranged to make mmu_idx => EL lookup easy. 2082 */ 2083 #define ARM_MMU_IDX_A 0x10 /* A profile */ 2084 #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ 2085 #define ARM_MMU_IDX_M 0x40 /* M profile */ 2086 2087 #define ARM_MMU_IDX_TYPE_MASK (~0x7) 2088 #define ARM_MMU_IDX_COREIDX_MASK 0x7 2089 2090 typedef enum ARMMMUIdx { 2091 ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A, 2092 ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A, 2093 ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A, 2094 ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A, 2095 ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A, 2096 ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A, 2097 ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A, 2098 ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M, 2099 ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M, 2100 ARMMMUIdx_MNegPri = 2 | ARM_MMU_IDX_M, 2101 /* Indexes below here don't have TLBs and are used only for AT system 2102 * instructions or for the first stage of an S12 page table walk. 2103 */ 2104 ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB, 2105 ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB, 2106 } ARMMMUIdx; 2107 2108 /* Bit macros for the core-mmu-index values for each index, 2109 * for use when calling tlb_flush_by_mmuidx() and friends. 2110 */ 2111 typedef enum ARMMMUIdxBit { 2112 ARMMMUIdxBit_S12NSE0 = 1 << 0, 2113 ARMMMUIdxBit_S12NSE1 = 1 << 1, 2114 ARMMMUIdxBit_S1E2 = 1 << 2, 2115 ARMMMUIdxBit_S1E3 = 1 << 3, 2116 ARMMMUIdxBit_S1SE0 = 1 << 4, 2117 ARMMMUIdxBit_S1SE1 = 1 << 5, 2118 ARMMMUIdxBit_S2NS = 1 << 6, 2119 ARMMMUIdxBit_MUser = 1 << 0, 2120 ARMMMUIdxBit_MPriv = 1 << 1, 2121 ARMMMUIdxBit_MNegPri = 1 << 2, 2122 } ARMMMUIdxBit; 2123 2124 #define MMU_USER_IDX 0 2125 2126 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) 2127 { 2128 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK; 2129 } 2130 2131 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx) 2132 { 2133 if (arm_feature(env, ARM_FEATURE_M)) { 2134 return mmu_idx | ARM_MMU_IDX_M; 2135 } else { 2136 return mmu_idx | ARM_MMU_IDX_A; 2137 } 2138 } 2139 2140 /* Return the exception level we're running at if this is our mmu_idx */ 2141 static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 2142 { 2143 switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) { 2144 case ARM_MMU_IDX_A: 2145 return mmu_idx & 3; 2146 case ARM_MMU_IDX_M: 2147 return mmu_idx == ARMMMUIdx_MUser ? 0 : 1; 2148 default: 2149 g_assert_not_reached(); 2150 } 2151 } 2152 2153 /* Determine the current mmu_idx to use for normal loads/stores */ 2154 static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) 2155 { 2156 int el = arm_current_el(env); 2157 2158 if (arm_feature(env, ARM_FEATURE_M)) { 2159 ARMMMUIdx mmu_idx = el == 0 ? ARMMMUIdx_MUser : ARMMMUIdx_MPriv; 2160 2161 /* Execution priority is negative if FAULTMASK is set or 2162 * we're in a HardFault or NMI handler. 2163 */ 2164 if ((env->v7m.exception > 0 && env->v7m.exception <= 3) 2165 || env->daif & PSTATE_F) { 2166 return arm_to_core_mmu_idx(ARMMMUIdx_MNegPri); 2167 } 2168 2169 return arm_to_core_mmu_idx(mmu_idx); 2170 } 2171 2172 if (el < 2 && arm_is_secure_below_el3(env)) { 2173 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0 + el); 2174 } 2175 return el; 2176 } 2177 2178 /* Indexes used when registering address spaces with cpu_address_space_init */ 2179 typedef enum ARMASIdx { 2180 ARMASIdx_NS = 0, 2181 ARMASIdx_S = 1, 2182 } ARMASIdx; 2183 2184 /* Return the Exception Level targeted by debug exceptions. */ 2185 static inline int arm_debug_target_el(CPUARMState *env) 2186 { 2187 bool secure = arm_is_secure(env); 2188 bool route_to_el2 = false; 2189 2190 if (arm_feature(env, ARM_FEATURE_EL2) && !secure) { 2191 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE || 2192 env->cp15.mdcr_el2 & (1 << 8); 2193 } 2194 2195 if (route_to_el2) { 2196 return 2; 2197 } else if (arm_feature(env, ARM_FEATURE_EL3) && 2198 !arm_el_is_aa64(env, 3) && secure) { 2199 return 3; 2200 } else { 2201 return 1; 2202 } 2203 } 2204 2205 static inline bool aa64_generate_debug_exceptions(CPUARMState *env) 2206 { 2207 if (arm_is_secure(env)) { 2208 /* MDCR_EL3.SDD disables debug events from Secure state */ 2209 if (extract32(env->cp15.mdcr_el3, 16, 1) != 0 2210 || arm_current_el(env) == 3) { 2211 return false; 2212 } 2213 } 2214 2215 if (arm_current_el(env) == arm_debug_target_el(env)) { 2216 if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0) 2217 || (env->daif & PSTATE_D)) { 2218 return false; 2219 } 2220 } 2221 return true; 2222 } 2223 2224 static inline bool aa32_generate_debug_exceptions(CPUARMState *env) 2225 { 2226 int el = arm_current_el(env); 2227 2228 if (el == 0 && arm_el_is_aa64(env, 1)) { 2229 return aa64_generate_debug_exceptions(env); 2230 } 2231 2232 if (arm_is_secure(env)) { 2233 int spd; 2234 2235 if (el == 0 && (env->cp15.sder & 1)) { 2236 /* SDER.SUIDEN means debug exceptions from Secure EL0 2237 * are always enabled. Otherwise they are controlled by 2238 * SDCR.SPD like those from other Secure ELs. 2239 */ 2240 return true; 2241 } 2242 2243 spd = extract32(env->cp15.mdcr_el3, 14, 2); 2244 switch (spd) { 2245 case 1: 2246 /* SPD == 0b01 is reserved, but behaves as 0b00. */ 2247 case 0: 2248 /* For 0b00 we return true if external secure invasive debug 2249 * is enabled. On real hardware this is controlled by external 2250 * signals to the core. QEMU always permits debug, and behaves 2251 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high. 2252 */ 2253 return true; 2254 case 2: 2255 return false; 2256 case 3: 2257 return true; 2258 } 2259 } 2260 2261 return el != 2; 2262 } 2263 2264 /* Return true if debugging exceptions are currently enabled. 2265 * This corresponds to what in ARM ARM pseudocode would be 2266 * if UsingAArch32() then 2267 * return AArch32.GenerateDebugExceptions() 2268 * else 2269 * return AArch64.GenerateDebugExceptions() 2270 * We choose to push the if() down into this function for clarity, 2271 * since the pseudocode has it at all callsites except for the one in 2272 * CheckSoftwareStep(), where it is elided because both branches would 2273 * always return the same value. 2274 * 2275 * Parts of the pseudocode relating to EL2 and EL3 are omitted because we 2276 * don't yet implement those exception levels or their associated trap bits. 2277 */ 2278 static inline bool arm_generate_debug_exceptions(CPUARMState *env) 2279 { 2280 if (env->aarch64) { 2281 return aa64_generate_debug_exceptions(env); 2282 } else { 2283 return aa32_generate_debug_exceptions(env); 2284 } 2285 } 2286 2287 /* Is single-stepping active? (Note that the "is EL_D AArch64?" check 2288 * implicitly means this always returns false in pre-v8 CPUs.) 2289 */ 2290 static inline bool arm_singlestep_active(CPUARMState *env) 2291 { 2292 return extract32(env->cp15.mdscr_el1, 0, 1) 2293 && arm_el_is_aa64(env, arm_debug_target_el(env)) 2294 && arm_generate_debug_exceptions(env); 2295 } 2296 2297 static inline bool arm_sctlr_b(CPUARMState *env) 2298 { 2299 return 2300 /* We need not implement SCTLR.ITD in user-mode emulation, so 2301 * let linux-user ignore the fact that it conflicts with SCTLR_B. 2302 * This lets people run BE32 binaries with "-cpu any". 2303 */ 2304 #ifndef CONFIG_USER_ONLY 2305 !arm_feature(env, ARM_FEATURE_V7) && 2306 #endif 2307 (env->cp15.sctlr_el[1] & SCTLR_B) != 0; 2308 } 2309 2310 /* Return true if the processor is in big-endian mode. */ 2311 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env) 2312 { 2313 int cur_el; 2314 2315 /* In 32bit endianness is determined by looking at CPSR's E bit */ 2316 if (!is_a64(env)) { 2317 return 2318 #ifdef CONFIG_USER_ONLY 2319 /* In system mode, BE32 is modelled in line with the 2320 * architecture (as word-invariant big-endianness), where loads 2321 * and stores are done little endian but from addresses which 2322 * are adjusted by XORing with the appropriate constant. So the 2323 * endianness to use for the raw data access is not affected by 2324 * SCTLR.B. 2325 * In user mode, however, we model BE32 as byte-invariant 2326 * big-endianness (because user-only code cannot tell the 2327 * difference), and so we need to use a data access endianness 2328 * that depends on SCTLR.B. 2329 */ 2330 arm_sctlr_b(env) || 2331 #endif 2332 ((env->uncached_cpsr & CPSR_E) ? 1 : 0); 2333 } 2334 2335 cur_el = arm_current_el(env); 2336 2337 if (cur_el == 0) { 2338 return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0; 2339 } 2340 2341 return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0; 2342 } 2343 2344 #include "exec/cpu-all.h" 2345 2346 /* Bit usage in the TB flags field: bit 31 indicates whether we are 2347 * in 32 or 64 bit mode. The meaning of the other bits depends on that. 2348 * We put flags which are shared between 32 and 64 bit mode at the top 2349 * of the word, and flags which apply to only one mode at the bottom. 2350 */ 2351 #define ARM_TBFLAG_AARCH64_STATE_SHIFT 31 2352 #define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT) 2353 #define ARM_TBFLAG_MMUIDX_SHIFT 28 2354 #define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT) 2355 #define ARM_TBFLAG_SS_ACTIVE_SHIFT 27 2356 #define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT) 2357 #define ARM_TBFLAG_PSTATE_SS_SHIFT 26 2358 #define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT) 2359 /* Target EL if we take a floating-point-disabled exception */ 2360 #define ARM_TBFLAG_FPEXC_EL_SHIFT 24 2361 #define ARM_TBFLAG_FPEXC_EL_MASK (0x3 << ARM_TBFLAG_FPEXC_EL_SHIFT) 2362 2363 /* Bit usage when in AArch32 state: */ 2364 #define ARM_TBFLAG_THUMB_SHIFT 0 2365 #define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT) 2366 #define ARM_TBFLAG_VECLEN_SHIFT 1 2367 #define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT) 2368 #define ARM_TBFLAG_VECSTRIDE_SHIFT 4 2369 #define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT) 2370 #define ARM_TBFLAG_VFPEN_SHIFT 7 2371 #define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT) 2372 #define ARM_TBFLAG_CONDEXEC_SHIFT 8 2373 #define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT) 2374 #define ARM_TBFLAG_SCTLR_B_SHIFT 16 2375 #define ARM_TBFLAG_SCTLR_B_MASK (1 << ARM_TBFLAG_SCTLR_B_SHIFT) 2376 /* We store the bottom two bits of the CPAR as TB flags and handle 2377 * checks on the other bits at runtime 2378 */ 2379 #define ARM_TBFLAG_XSCALE_CPAR_SHIFT 17 2380 #define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT) 2381 /* Indicates whether cp register reads and writes by guest code should access 2382 * the secure or nonsecure bank of banked registers; note that this is not 2383 * the same thing as the current security state of the processor! 2384 */ 2385 #define ARM_TBFLAG_NS_SHIFT 19 2386 #define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT) 2387 #define ARM_TBFLAG_BE_DATA_SHIFT 20 2388 #define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT) 2389 /* For M profile only, Handler (ie not Thread) mode */ 2390 #define ARM_TBFLAG_HANDLER_SHIFT 21 2391 #define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT) 2392 2393 /* Bit usage when in AArch64 state */ 2394 #define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */ 2395 #define ARM_TBFLAG_TBI0_MASK (0x1ull << ARM_TBFLAG_TBI0_SHIFT) 2396 #define ARM_TBFLAG_TBI1_SHIFT 1 /* TBI1 for EL0/1 */ 2397 #define ARM_TBFLAG_TBI1_MASK (0x1ull << ARM_TBFLAG_TBI1_SHIFT) 2398 2399 /* some convenience accessor macros */ 2400 #define ARM_TBFLAG_AARCH64_STATE(F) \ 2401 (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT) 2402 #define ARM_TBFLAG_MMUIDX(F) \ 2403 (((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT) 2404 #define ARM_TBFLAG_SS_ACTIVE(F) \ 2405 (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT) 2406 #define ARM_TBFLAG_PSTATE_SS(F) \ 2407 (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT) 2408 #define ARM_TBFLAG_FPEXC_EL(F) \ 2409 (((F) & ARM_TBFLAG_FPEXC_EL_MASK) >> ARM_TBFLAG_FPEXC_EL_SHIFT) 2410 #define ARM_TBFLAG_THUMB(F) \ 2411 (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT) 2412 #define ARM_TBFLAG_VECLEN(F) \ 2413 (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT) 2414 #define ARM_TBFLAG_VECSTRIDE(F) \ 2415 (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT) 2416 #define ARM_TBFLAG_VFPEN(F) \ 2417 (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT) 2418 #define ARM_TBFLAG_CONDEXEC(F) \ 2419 (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT) 2420 #define ARM_TBFLAG_SCTLR_B(F) \ 2421 (((F) & ARM_TBFLAG_SCTLR_B_MASK) >> ARM_TBFLAG_SCTLR_B_SHIFT) 2422 #define ARM_TBFLAG_XSCALE_CPAR(F) \ 2423 (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT) 2424 #define ARM_TBFLAG_NS(F) \ 2425 (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT) 2426 #define ARM_TBFLAG_BE_DATA(F) \ 2427 (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT) 2428 #define ARM_TBFLAG_HANDLER(F) \ 2429 (((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT) 2430 #define ARM_TBFLAG_TBI0(F) \ 2431 (((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT) 2432 #define ARM_TBFLAG_TBI1(F) \ 2433 (((F) & ARM_TBFLAG_TBI1_MASK) >> ARM_TBFLAG_TBI1_SHIFT) 2434 2435 static inline bool bswap_code(bool sctlr_b) 2436 { 2437 #ifdef CONFIG_USER_ONLY 2438 /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian. 2439 * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0 2440 * would also end up as a mixed-endian mode with BE code, LE data. 2441 */ 2442 return 2443 #ifdef TARGET_WORDS_BIGENDIAN 2444 1 ^ 2445 #endif 2446 sctlr_b; 2447 #else 2448 /* All code access in ARM is little endian, and there are no loaders 2449 * doing swaps that need to be reversed 2450 */ 2451 return 0; 2452 #endif 2453 } 2454 2455 /* Return the exception level to which FP-disabled exceptions should 2456 * be taken, or 0 if FP is enabled. 2457 */ 2458 static inline int fp_exception_el(CPUARMState *env) 2459 { 2460 int fpen; 2461 int cur_el = arm_current_el(env); 2462 2463 /* CPACR and the CPTR registers don't exist before v6, so FP is 2464 * always accessible 2465 */ 2466 if (!arm_feature(env, ARM_FEATURE_V6)) { 2467 return 0; 2468 } 2469 2470 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 2471 * 0, 2 : trap EL0 and EL1/PL1 accesses 2472 * 1 : trap only EL0 accesses 2473 * 3 : trap no accesses 2474 */ 2475 fpen = extract32(env->cp15.cpacr_el1, 20, 2); 2476 switch (fpen) { 2477 case 0: 2478 case 2: 2479 if (cur_el == 0 || cur_el == 1) { 2480 /* Trap to PL1, which might be EL1 or EL3 */ 2481 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 2482 return 3; 2483 } 2484 return 1; 2485 } 2486 if (cur_el == 3 && !is_a64(env)) { 2487 /* Secure PL1 running at EL3 */ 2488 return 3; 2489 } 2490 break; 2491 case 1: 2492 if (cur_el == 0) { 2493 return 1; 2494 } 2495 break; 2496 case 3: 2497 break; 2498 } 2499 2500 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 2501 * check because zero bits in the registers mean "don't trap". 2502 */ 2503 2504 /* CPTR_EL2 : present in v7VE or v8 */ 2505 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 2506 && !arm_is_secure_below_el3(env)) { 2507 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 2508 return 2; 2509 } 2510 2511 /* CPTR_EL3 : present in v8 */ 2512 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 2513 /* Trap all FP ops to EL3 */ 2514 return 3; 2515 } 2516 2517 return 0; 2518 } 2519 2520 #ifdef CONFIG_USER_ONLY 2521 static inline bool arm_cpu_bswap_data(CPUARMState *env) 2522 { 2523 return 2524 #ifdef TARGET_WORDS_BIGENDIAN 2525 1 ^ 2526 #endif 2527 arm_cpu_data_is_big_endian(env); 2528 } 2529 #endif 2530 2531 #ifndef CONFIG_USER_ONLY 2532 /** 2533 * arm_regime_tbi0: 2534 * @env: CPUARMState 2535 * @mmu_idx: MMU index indicating required translation regime 2536 * 2537 * Extracts the TBI0 value from the appropriate TCR for the current EL 2538 * 2539 * Returns: the TBI0 value. 2540 */ 2541 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx); 2542 2543 /** 2544 * arm_regime_tbi1: 2545 * @env: CPUARMState 2546 * @mmu_idx: MMU index indicating required translation regime 2547 * 2548 * Extracts the TBI1 value from the appropriate TCR for the current EL 2549 * 2550 * Returns: the TBI1 value. 2551 */ 2552 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx); 2553 #else 2554 /* We can't handle tagged addresses properly in user-only mode */ 2555 static inline uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx) 2556 { 2557 return 0; 2558 } 2559 2560 static inline uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx) 2561 { 2562 return 0; 2563 } 2564 #endif 2565 2566 static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 2567 target_ulong *cs_base, uint32_t *flags) 2568 { 2569 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 2570 if (is_a64(env)) { 2571 *pc = env->pc; 2572 *flags = ARM_TBFLAG_AARCH64_STATE_MASK; 2573 /* Get control bits for tagged addresses */ 2574 *flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); 2575 *flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); 2576 } else { 2577 *pc = env->regs[15]; 2578 *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) 2579 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) 2580 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) 2581 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) 2582 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT); 2583 if (!(access_secure_reg(env))) { 2584 *flags |= ARM_TBFLAG_NS_MASK; 2585 } 2586 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) 2587 || arm_el_is_aa64(env, 1)) { 2588 *flags |= ARM_TBFLAG_VFPEN_MASK; 2589 } 2590 *flags |= (extract32(env->cp15.c15_cpar, 0, 2) 2591 << ARM_TBFLAG_XSCALE_CPAR_SHIFT); 2592 } 2593 2594 *flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT); 2595 2596 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 2597 * states defined in the ARM ARM for software singlestep: 2598 * SS_ACTIVE PSTATE.SS State 2599 * 0 x Inactive (the TB flag for SS is always 0) 2600 * 1 0 Active-pending 2601 * 1 1 Active-not-pending 2602 */ 2603 if (arm_singlestep_active(env)) { 2604 *flags |= ARM_TBFLAG_SS_ACTIVE_MASK; 2605 if (is_a64(env)) { 2606 if (env->pstate & PSTATE_SS) { 2607 *flags |= ARM_TBFLAG_PSTATE_SS_MASK; 2608 } 2609 } else { 2610 if (env->uncached_cpsr & PSTATE_SS) { 2611 *flags |= ARM_TBFLAG_PSTATE_SS_MASK; 2612 } 2613 } 2614 } 2615 if (arm_cpu_data_is_big_endian(env)) { 2616 *flags |= ARM_TBFLAG_BE_DATA_MASK; 2617 } 2618 *flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT; 2619 2620 if (env->v7m.exception != 0) { 2621 *flags |= ARM_TBFLAG_HANDLER_MASK; 2622 } 2623 2624 *cs_base = 0; 2625 } 2626 2627 enum { 2628 QEMU_PSCI_CONDUIT_DISABLED = 0, 2629 QEMU_PSCI_CONDUIT_SMC = 1, 2630 QEMU_PSCI_CONDUIT_HVC = 2, 2631 }; 2632 2633 #ifndef CONFIG_USER_ONLY 2634 /* Return the address space index to use for a memory access */ 2635 static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) 2636 { 2637 return attrs.secure ? ARMASIdx_S : ARMASIdx_NS; 2638 } 2639 2640 /* Return the AddressSpace to use for a memory access 2641 * (which depends on whether the access is S or NS, and whether 2642 * the board gave us a separate AddressSpace for S accesses). 2643 */ 2644 static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs) 2645 { 2646 return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs)); 2647 } 2648 #endif 2649 2650 /** 2651 * arm_register_el_change_hook: 2652 * Register a hook function which will be called back whenever this 2653 * CPU changes exception level or mode. The hook function will be 2654 * passed a pointer to the ARMCPU and the opaque data pointer passed 2655 * to this function when the hook was registered. 2656 * 2657 * Note that we currently only support registering a single hook function, 2658 * and will assert if this function is called twice. 2659 * This facility is intended for the use of the GICv3 emulation. 2660 */ 2661 void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHook *hook, 2662 void *opaque); 2663 2664 /** 2665 * arm_get_el_change_hook_opaque: 2666 * Return the opaque data that will be used by the el_change_hook 2667 * for this CPU. 2668 */ 2669 static inline void *arm_get_el_change_hook_opaque(ARMCPU *cpu) 2670 { 2671 return cpu->el_change_hook_opaque; 2672 } 2673 2674 #endif 2675