1 /* 2 * QEMU ARM CPU -- internal functions and types 3 * 4 * Copyright (c) 2014 Linaro Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see 18 * <http://www.gnu.org/licenses/gpl-2.0.html> 19 * 20 * This header defines functions, types, etc which need to be shared 21 * between different source files within target/arm/ but which are 22 * private to it and not required by the rest of QEMU. 23 */ 24 25 #ifndef TARGET_ARM_INTERNALS_H 26 #define TARGET_ARM_INTERNALS_H 27 28 #include "hw/registerfields.h" 29 30 /* register banks for CPU modes */ 31 #define BANK_USRSYS 0 32 #define BANK_SVC 1 33 #define BANK_ABT 2 34 #define BANK_UND 3 35 #define BANK_IRQ 4 36 #define BANK_FIQ 5 37 #define BANK_HYP 6 38 #define BANK_MON 7 39 40 static inline bool excp_is_internal(int excp) 41 { 42 /* Return true if this exception number represents a QEMU-internal 43 * exception that will not be passed to the guest. 44 */ 45 return excp == EXCP_INTERRUPT 46 || excp == EXCP_HLT 47 || excp == EXCP_DEBUG 48 || excp == EXCP_HALTED 49 || excp == EXCP_EXCEPTION_EXIT 50 || excp == EXCP_KERNEL_TRAP 51 || excp == EXCP_SEMIHOST; 52 } 53 54 /* Scale factor for generic timers, ie number of ns per tick. 55 * This gives a 62.5MHz timer. 56 */ 57 #define GTIMER_SCALE 16 58 59 /* Bit definitions for the v7M CONTROL register */ 60 FIELD(V7M_CONTROL, NPRIV, 0, 1) 61 FIELD(V7M_CONTROL, SPSEL, 1, 1) 62 FIELD(V7M_CONTROL, FPCA, 2, 1) 63 FIELD(V7M_CONTROL, SFPA, 3, 1) 64 65 /* Bit definitions for v7M exception return payload */ 66 FIELD(V7M_EXCRET, ES, 0, 1) 67 FIELD(V7M_EXCRET, RES0, 1, 1) 68 FIELD(V7M_EXCRET, SPSEL, 2, 1) 69 FIELD(V7M_EXCRET, MODE, 3, 1) 70 FIELD(V7M_EXCRET, FTYPE, 4, 1) 71 FIELD(V7M_EXCRET, DCRS, 5, 1) 72 FIELD(V7M_EXCRET, S, 6, 1) 73 FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */ 74 75 /* Minimum value which is a magic number for exception return */ 76 #define EXC_RETURN_MIN_MAGIC 0xff000000 77 /* Minimum number which is a magic number for function or exception return 78 * when using v8M security extension 79 */ 80 #define FNC_RETURN_MIN_MAGIC 0xfefffffe 81 82 /* We use a few fake FSR values for internal purposes in M profile. 83 * M profile cores don't have A/R format FSRs, but currently our 84 * get_phys_addr() code assumes A/R profile and reports failures via 85 * an A/R format FSR value. We then translate that into the proper 86 * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt(). 87 * Mostly the FSR values we use for this are those defined for v7PMSA, 88 * since we share some of that codepath. A few kinds of fault are 89 * only for M profile and have no A/R equivalent, though, so we have 90 * to pick a value from the reserved range (which we never otherwise 91 * generate) to use for these. 92 * These values will never be visible to the guest. 93 */ 94 #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */ 95 #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */ 96 97 /* 98 * For AArch64, map a given EL to an index in the banked_spsr array. 99 * Note that this mapping and the AArch32 mapping defined in bank_number() 100 * must agree such that the AArch64<->AArch32 SPSRs have the architecturally 101 * mandated mapping between each other. 102 */ 103 static inline unsigned int aarch64_banked_spsr_index(unsigned int el) 104 { 105 static const unsigned int map[4] = { 106 [1] = BANK_SVC, /* EL1. */ 107 [2] = BANK_HYP, /* EL2. */ 108 [3] = BANK_MON, /* EL3. */ 109 }; 110 assert(el >= 1 && el <= 3); 111 return map[el]; 112 } 113 114 /* Map CPU modes onto saved register banks. */ 115 static inline int bank_number(int mode) 116 { 117 switch (mode) { 118 case ARM_CPU_MODE_USR: 119 case ARM_CPU_MODE_SYS: 120 return BANK_USRSYS; 121 case ARM_CPU_MODE_SVC: 122 return BANK_SVC; 123 case ARM_CPU_MODE_ABT: 124 return BANK_ABT; 125 case ARM_CPU_MODE_UND: 126 return BANK_UND; 127 case ARM_CPU_MODE_IRQ: 128 return BANK_IRQ; 129 case ARM_CPU_MODE_FIQ: 130 return BANK_FIQ; 131 case ARM_CPU_MODE_HYP: 132 return BANK_HYP; 133 case ARM_CPU_MODE_MON: 134 return BANK_MON; 135 } 136 g_assert_not_reached(); 137 } 138 139 void switch_mode(CPUARMState *, int); 140 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu); 141 void arm_translate_init(void); 142 143 enum arm_fprounding { 144 FPROUNDING_TIEEVEN, 145 FPROUNDING_POSINF, 146 FPROUNDING_NEGINF, 147 FPROUNDING_ZERO, 148 FPROUNDING_TIEAWAY, 149 FPROUNDING_ODD 150 }; 151 152 int arm_rmode_to_sf(int rmode); 153 154 static inline void aarch64_save_sp(CPUARMState *env, int el) 155 { 156 if (env->pstate & PSTATE_SP) { 157 env->sp_el[el] = env->xregs[31]; 158 } else { 159 env->sp_el[0] = env->xregs[31]; 160 } 161 } 162 163 static inline void aarch64_restore_sp(CPUARMState *env, int el) 164 { 165 if (env->pstate & PSTATE_SP) { 166 env->xregs[31] = env->sp_el[el]; 167 } else { 168 env->xregs[31] = env->sp_el[0]; 169 } 170 } 171 172 static inline void update_spsel(CPUARMState *env, uint32_t imm) 173 { 174 unsigned int cur_el = arm_current_el(env); 175 /* Update PSTATE SPSel bit; this requires us to update the 176 * working stack pointer in xregs[31]. 177 */ 178 if (!((imm ^ env->pstate) & PSTATE_SP)) { 179 return; 180 } 181 aarch64_save_sp(env, cur_el); 182 env->pstate = deposit32(env->pstate, 0, 1, imm); 183 184 /* We rely on illegal updates to SPsel from EL0 to get trapped 185 * at translation time. 186 */ 187 assert(cur_el >= 1 && cur_el <= 3); 188 aarch64_restore_sp(env, cur_el); 189 } 190 191 /* 192 * arm_pamax 193 * @cpu: ARMCPU 194 * 195 * Returns the implementation defined bit-width of physical addresses. 196 * The ARMv8 reference manuals refer to this as PAMax(). 197 */ 198 static inline unsigned int arm_pamax(ARMCPU *cpu) 199 { 200 static const unsigned int pamax_map[] = { 201 [0] = 32, 202 [1] = 36, 203 [2] = 40, 204 [3] = 42, 205 [4] = 44, 206 [5] = 48, 207 }; 208 unsigned int parange = extract32(cpu->id_aa64mmfr0, 0, 4); 209 210 /* id_aa64mmfr0 is a read-only register so values outside of the 211 * supported mappings can be considered an implementation error. */ 212 assert(parange < ARRAY_SIZE(pamax_map)); 213 return pamax_map[parange]; 214 } 215 216 /* Return true if extended addresses are enabled. 217 * This is always the case if our translation regime is 64 bit, 218 * but depends on TTBCR.EAE for 32 bit. 219 */ 220 static inline bool extended_addresses_enabled(CPUARMState *env) 221 { 222 TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1]; 223 return arm_el_is_aa64(env, 1) || 224 (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE)); 225 } 226 227 /* Valid Syndrome Register EC field values */ 228 enum arm_exception_class { 229 EC_UNCATEGORIZED = 0x00, 230 EC_WFX_TRAP = 0x01, 231 EC_CP15RTTRAP = 0x03, 232 EC_CP15RRTTRAP = 0x04, 233 EC_CP14RTTRAP = 0x05, 234 EC_CP14DTTRAP = 0x06, 235 EC_ADVSIMDFPACCESSTRAP = 0x07, 236 EC_FPIDTRAP = 0x08, 237 EC_CP14RRTTRAP = 0x0c, 238 EC_ILLEGALSTATE = 0x0e, 239 EC_AA32_SVC = 0x11, 240 EC_AA32_HVC = 0x12, 241 EC_AA32_SMC = 0x13, 242 EC_AA64_SVC = 0x15, 243 EC_AA64_HVC = 0x16, 244 EC_AA64_SMC = 0x17, 245 EC_SYSTEMREGISTERTRAP = 0x18, 246 EC_INSNABORT = 0x20, 247 EC_INSNABORT_SAME_EL = 0x21, 248 EC_PCALIGNMENT = 0x22, 249 EC_DATAABORT = 0x24, 250 EC_DATAABORT_SAME_EL = 0x25, 251 EC_SPALIGNMENT = 0x26, 252 EC_AA32_FPTRAP = 0x28, 253 EC_AA64_FPTRAP = 0x2c, 254 EC_SERROR = 0x2f, 255 EC_BREAKPOINT = 0x30, 256 EC_BREAKPOINT_SAME_EL = 0x31, 257 EC_SOFTWARESTEP = 0x32, 258 EC_SOFTWARESTEP_SAME_EL = 0x33, 259 EC_WATCHPOINT = 0x34, 260 EC_WATCHPOINT_SAME_EL = 0x35, 261 EC_AA32_BKPT = 0x38, 262 EC_VECTORCATCH = 0x3a, 263 EC_AA64_BKPT = 0x3c, 264 }; 265 266 #define ARM_EL_EC_SHIFT 26 267 #define ARM_EL_IL_SHIFT 25 268 #define ARM_EL_ISV_SHIFT 24 269 #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) 270 #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT) 271 272 /* Utility functions for constructing various kinds of syndrome value. 273 * Note that in general we follow the AArch64 syndrome values; in a 274 * few cases the value in HSR for exceptions taken to AArch32 Hyp 275 * mode differs slightly, so if we ever implemented Hyp mode then the 276 * syndrome value would need some massaging on exception entry. 277 * (One example of this is that AArch64 defaults to IL bit set for 278 * exceptions which don't specifically indicate information about the 279 * trapping instruction, whereas AArch32 defaults to IL bit clear.) 280 */ 281 static inline uint32_t syn_uncategorized(void) 282 { 283 return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL; 284 } 285 286 static inline uint32_t syn_aa64_svc(uint32_t imm16) 287 { 288 return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); 289 } 290 291 static inline uint32_t syn_aa64_hvc(uint32_t imm16) 292 { 293 return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); 294 } 295 296 static inline uint32_t syn_aa64_smc(uint32_t imm16) 297 { 298 return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); 299 } 300 301 static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit) 302 { 303 return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) 304 | (is_16bit ? 0 : ARM_EL_IL); 305 } 306 307 static inline uint32_t syn_aa32_hvc(uint32_t imm16) 308 { 309 return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); 310 } 311 312 static inline uint32_t syn_aa32_smc(void) 313 { 314 return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL; 315 } 316 317 static inline uint32_t syn_aa64_bkpt(uint32_t imm16) 318 { 319 return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff); 320 } 321 322 static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit) 323 { 324 return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff) 325 | (is_16bit ? 0 : ARM_EL_IL); 326 } 327 328 static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2, 329 int crn, int crm, int rt, 330 int isread) 331 { 332 return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL 333 | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5) 334 | (crm << 1) | isread; 335 } 336 337 static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2, 338 int crn, int crm, int rt, int isread, 339 bool is_16bit) 340 { 341 return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT) 342 | (is_16bit ? 0 : ARM_EL_IL) 343 | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) 344 | (crn << 10) | (rt << 5) | (crm << 1) | isread; 345 } 346 347 static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2, 348 int crn, int crm, int rt, int isread, 349 bool is_16bit) 350 { 351 return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT) 352 | (is_16bit ? 0 : ARM_EL_IL) 353 | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14) 354 | (crn << 10) | (rt << 5) | (crm << 1) | isread; 355 } 356 357 static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm, 358 int rt, int rt2, int isread, 359 bool is_16bit) 360 { 361 return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT) 362 | (is_16bit ? 0 : ARM_EL_IL) 363 | (cv << 24) | (cond << 20) | (opc1 << 16) 364 | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; 365 } 366 367 static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm, 368 int rt, int rt2, int isread, 369 bool is_16bit) 370 { 371 return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT) 372 | (is_16bit ? 0 : ARM_EL_IL) 373 | (cv << 24) | (cond << 20) | (opc1 << 16) 374 | (rt2 << 10) | (rt << 5) | (crm << 1) | isread; 375 } 376 377 static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit) 378 { 379 return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT) 380 | (is_16bit ? 0 : ARM_EL_IL) 381 | (cv << 24) | (cond << 20); 382 } 383 384 static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) 385 { 386 return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) 387 | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc; 388 } 389 390 static inline uint32_t syn_data_abort_no_iss(int same_el, 391 int ea, int cm, int s1ptw, 392 int wnr, int fsc) 393 { 394 return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) 395 | ARM_EL_IL 396 | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; 397 } 398 399 static inline uint32_t syn_data_abort_with_iss(int same_el, 400 int sas, int sse, int srt, 401 int sf, int ar, 402 int ea, int cm, int s1ptw, 403 int wnr, int fsc, 404 bool is_16bit) 405 { 406 return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) 407 | (is_16bit ? 0 : ARM_EL_IL) 408 | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16) 409 | (sf << 15) | (ar << 14) 410 | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; 411 } 412 413 static inline uint32_t syn_swstep(int same_el, int isv, int ex) 414 { 415 return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) 416 | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22; 417 } 418 419 static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr) 420 { 421 return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) 422 | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22; 423 } 424 425 static inline uint32_t syn_breakpoint(int same_el) 426 { 427 return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) 428 | ARM_EL_IL | 0x22; 429 } 430 431 static inline uint32_t syn_wfx(int cv, int cond, int ti, bool is_16bit) 432 { 433 return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) | 434 (is_16bit ? 0 : (1 << ARM_EL_IL_SHIFT)) | 435 (cv << 24) | (cond << 20) | ti; 436 } 437 438 /* Update a QEMU watchpoint based on the information the guest has set in the 439 * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers. 440 */ 441 void hw_watchpoint_update(ARMCPU *cpu, int n); 442 /* Update the QEMU watchpoints for every guest watchpoint. This does a 443 * complete delete-and-reinstate of the QEMU watchpoint list and so is 444 * suitable for use after migration or on reset. 445 */ 446 void hw_watchpoint_update_all(ARMCPU *cpu); 447 /* Update a QEMU breakpoint based on the information the guest has set in the 448 * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers. 449 */ 450 void hw_breakpoint_update(ARMCPU *cpu, int n); 451 /* Update the QEMU breakpoints for every guest breakpoint. This does a 452 * complete delete-and-reinstate of the QEMU breakpoint list and so is 453 * suitable for use after migration or on reset. 454 */ 455 void hw_breakpoint_update_all(ARMCPU *cpu); 456 457 /* Callback function for checking if a watchpoint should trigger. */ 458 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp); 459 460 /* Adjust addresses (in BE32 mode) before testing against watchpoint 461 * addresses. 462 */ 463 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len); 464 465 /* Callback function for when a watchpoint or breakpoint triggers. */ 466 void arm_debug_excp_handler(CPUState *cs); 467 468 #ifdef CONFIG_USER_ONLY 469 static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type) 470 { 471 return false; 472 } 473 #else 474 /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */ 475 bool arm_is_psci_call(ARMCPU *cpu, int excp_type); 476 /* Actually handle a PSCI call */ 477 void arm_handle_psci_call(ARMCPU *cpu); 478 #endif 479 480 /** 481 * arm_clear_exclusive: clear the exclusive monitor 482 * @env: CPU env 483 * Clear the CPU's exclusive monitor, like the guest CLREX instruction. 484 */ 485 static inline void arm_clear_exclusive(CPUARMState *env) 486 { 487 env->exclusive_addr = -1; 488 } 489 490 /** 491 * ARMFaultType: type of an ARM MMU fault 492 * This corresponds to the v8A pseudocode's Fault enumeration, 493 * with extensions for QEMU internal conditions. 494 */ 495 typedef enum ARMFaultType { 496 ARMFault_None, 497 ARMFault_AccessFlag, 498 ARMFault_Alignment, 499 ARMFault_Background, 500 ARMFault_Domain, 501 ARMFault_Permission, 502 ARMFault_Translation, 503 ARMFault_AddressSize, 504 ARMFault_SyncExternal, 505 ARMFault_SyncExternalOnWalk, 506 ARMFault_SyncParity, 507 ARMFault_SyncParityOnWalk, 508 ARMFault_AsyncParity, 509 ARMFault_AsyncExternal, 510 ARMFault_Debug, 511 ARMFault_TLBConflict, 512 ARMFault_Lockdown, 513 ARMFault_Exclusive, 514 ARMFault_ICacheMaint, 515 ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */ 516 ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */ 517 } ARMFaultType; 518 519 /** 520 * ARMMMUFaultInfo: Information describing an ARM MMU Fault 521 * @type: Type of fault 522 * @level: Table walk level (for translation, access flag and permission faults) 523 * @domain: Domain of the fault address (for non-LPAE CPUs only) 524 * @s2addr: Address that caused a fault at stage 2 525 * @stage2: True if we faulted at stage 2 526 * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk 527 * @ea: True if we should set the EA (external abort type) bit in syndrome 528 */ 529 typedef struct ARMMMUFaultInfo ARMMMUFaultInfo; 530 struct ARMMMUFaultInfo { 531 ARMFaultType type; 532 target_ulong s2addr; 533 int level; 534 int domain; 535 bool stage2; 536 bool s1ptw; 537 bool ea; 538 }; 539 540 /** 541 * arm_fi_to_sfsc: Convert fault info struct to short-format FSC 542 * Compare pseudocode EncodeSDFSC(), though unlike that function 543 * we set up a whole FSR-format code including domain field and 544 * putting the high bit of the FSC into bit 10. 545 */ 546 static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi) 547 { 548 uint32_t fsc; 549 550 switch (fi->type) { 551 case ARMFault_None: 552 return 0; 553 case ARMFault_AccessFlag: 554 fsc = fi->level == 1 ? 0x3 : 0x6; 555 break; 556 case ARMFault_Alignment: 557 fsc = 0x1; 558 break; 559 case ARMFault_Permission: 560 fsc = fi->level == 1 ? 0xd : 0xf; 561 break; 562 case ARMFault_Domain: 563 fsc = fi->level == 1 ? 0x9 : 0xb; 564 break; 565 case ARMFault_Translation: 566 fsc = fi->level == 1 ? 0x5 : 0x7; 567 break; 568 case ARMFault_SyncExternal: 569 fsc = 0x8 | (fi->ea << 12); 570 break; 571 case ARMFault_SyncExternalOnWalk: 572 fsc = fi->level == 1 ? 0xc : 0xe; 573 fsc |= (fi->ea << 12); 574 break; 575 case ARMFault_SyncParity: 576 fsc = 0x409; 577 break; 578 case ARMFault_SyncParityOnWalk: 579 fsc = fi->level == 1 ? 0x40c : 0x40e; 580 break; 581 case ARMFault_AsyncParity: 582 fsc = 0x408; 583 break; 584 case ARMFault_AsyncExternal: 585 fsc = 0x406 | (fi->ea << 12); 586 break; 587 case ARMFault_Debug: 588 fsc = 0x2; 589 break; 590 case ARMFault_TLBConflict: 591 fsc = 0x400; 592 break; 593 case ARMFault_Lockdown: 594 fsc = 0x404; 595 break; 596 case ARMFault_Exclusive: 597 fsc = 0x405; 598 break; 599 case ARMFault_ICacheMaint: 600 fsc = 0x4; 601 break; 602 case ARMFault_Background: 603 fsc = 0x0; 604 break; 605 case ARMFault_QEMU_NSCExec: 606 fsc = M_FAKE_FSR_NSC_EXEC; 607 break; 608 case ARMFault_QEMU_SFault: 609 fsc = M_FAKE_FSR_SFAULT; 610 break; 611 default: 612 /* Other faults can't occur in a context that requires a 613 * short-format status code. 614 */ 615 g_assert_not_reached(); 616 } 617 618 fsc |= (fi->domain << 4); 619 return fsc; 620 } 621 622 /** 623 * arm_fi_to_lfsc: Convert fault info struct to long-format FSC 624 * Compare pseudocode EncodeLDFSC(), though unlike that function 625 * we fill in also the LPAE bit 9 of a DFSR format. 626 */ 627 static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi) 628 { 629 uint32_t fsc; 630 631 switch (fi->type) { 632 case ARMFault_None: 633 return 0; 634 case ARMFault_AddressSize: 635 fsc = fi->level & 3; 636 break; 637 case ARMFault_AccessFlag: 638 fsc = (fi->level & 3) | (0x2 << 2); 639 break; 640 case ARMFault_Permission: 641 fsc = (fi->level & 3) | (0x3 << 2); 642 break; 643 case ARMFault_Translation: 644 fsc = (fi->level & 3) | (0x1 << 2); 645 break; 646 case ARMFault_SyncExternal: 647 fsc = 0x10 | (fi->ea << 12); 648 break; 649 case ARMFault_SyncExternalOnWalk: 650 fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12); 651 break; 652 case ARMFault_SyncParity: 653 fsc = 0x18; 654 break; 655 case ARMFault_SyncParityOnWalk: 656 fsc = (fi->level & 3) | (0x7 << 2); 657 break; 658 case ARMFault_AsyncParity: 659 fsc = 0x19; 660 break; 661 case ARMFault_AsyncExternal: 662 fsc = 0x11 | (fi->ea << 12); 663 break; 664 case ARMFault_Alignment: 665 fsc = 0x21; 666 break; 667 case ARMFault_Debug: 668 fsc = 0x22; 669 break; 670 case ARMFault_TLBConflict: 671 fsc = 0x30; 672 break; 673 case ARMFault_Lockdown: 674 fsc = 0x34; 675 break; 676 case ARMFault_Exclusive: 677 fsc = 0x35; 678 break; 679 default: 680 /* Other faults can't occur in a context that requires a 681 * long-format status code. 682 */ 683 g_assert_not_reached(); 684 } 685 686 fsc |= 1 << 9; 687 return fsc; 688 } 689 690 static inline bool arm_extabort_type(MemTxResult result) 691 { 692 /* The EA bit in syndromes and fault status registers is an 693 * IMPDEF classification of external aborts. ARM implementations 694 * usually use this to indicate AXI bus Decode error (0) or 695 * Slave error (1); in QEMU we follow that. 696 */ 697 return result != MEMTX_DECODE_ERROR; 698 } 699 700 /* Do a page table walk and add page to TLB if possible */ 701 bool arm_tlb_fill(CPUState *cpu, vaddr address, 702 MMUAccessType access_type, int mmu_idx, 703 ARMMMUFaultInfo *fi); 704 705 /* Return true if the stage 1 translation regime is using LPAE format page 706 * tables */ 707 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx); 708 709 /* Raise a data fault alignment exception for the specified virtual address */ 710 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, 711 MMUAccessType access_type, 712 int mmu_idx, uintptr_t retaddr); 713 714 /* arm_cpu_do_transaction_failed: handle a memory system error response 715 * (eg "no device/memory present at address") by raising an external abort 716 * exception 717 */ 718 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 719 vaddr addr, unsigned size, 720 MMUAccessType access_type, 721 int mmu_idx, MemTxAttrs attrs, 722 MemTxResult response, uintptr_t retaddr); 723 724 /* Call the EL change hook if one has been registered */ 725 static inline void arm_call_el_change_hook(ARMCPU *cpu) 726 { 727 if (cpu->el_change_hook) { 728 cpu->el_change_hook(cpu, cpu->el_change_hook_opaque); 729 } 730 } 731 732 /* Return true if this address translation regime is secure */ 733 static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx) 734 { 735 switch (mmu_idx) { 736 case ARMMMUIdx_S12NSE0: 737 case ARMMMUIdx_S12NSE1: 738 case ARMMMUIdx_S1NSE0: 739 case ARMMMUIdx_S1NSE1: 740 case ARMMMUIdx_S1E2: 741 case ARMMMUIdx_S2NS: 742 case ARMMMUIdx_MPrivNegPri: 743 case ARMMMUIdx_MUserNegPri: 744 case ARMMMUIdx_MPriv: 745 case ARMMMUIdx_MUser: 746 return false; 747 case ARMMMUIdx_S1E3: 748 case ARMMMUIdx_S1SE0: 749 case ARMMMUIdx_S1SE1: 750 case ARMMMUIdx_MSPrivNegPri: 751 case ARMMMUIdx_MSUserNegPri: 752 case ARMMMUIdx_MSPriv: 753 case ARMMMUIdx_MSUser: 754 return true; 755 default: 756 g_assert_not_reached(); 757 } 758 } 759 760 #endif 761