1 #include "qemu/osdep.h" 2 #include "trace.h" 3 #include "cpu.h" 4 #include "internals.h" 5 #include "exec/gdbstub.h" 6 #include "exec/helper-proto.h" 7 #include "qemu/host-utils.h" 8 #include "sysemu/arch_init.h" 9 #include "sysemu/sysemu.h" 10 #include "qemu/bitops.h" 11 #include "qemu/crc32c.h" 12 #include "exec/exec-all.h" 13 #include "exec/cpu_ldst.h" 14 #include "arm_ldst.h" 15 #include <zlib.h> /* For crc32 */ 16 #include "exec/semihost.h" 17 #include "sysemu/kvm.h" 18 19 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 20 21 #ifndef CONFIG_USER_ONLY 22 /* Cacheability and shareability attributes for a memory access */ 23 typedef struct ARMCacheAttrs { 24 unsigned int attrs:8; /* as in the MAIR register encoding */ 25 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 26 } ARMCacheAttrs; 27 28 static bool get_phys_addr(CPUARMState *env, target_ulong address, 29 MMUAccessType access_type, ARMMMUIdx mmu_idx, 30 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 31 target_ulong *page_size, 32 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 33 34 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 35 MMUAccessType access_type, ARMMMUIdx mmu_idx, 36 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 37 target_ulong *page_size_ptr, 38 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 39 40 /* Security attributes for an address, as returned by v8m_security_lookup. */ 41 typedef struct V8M_SAttributes { 42 bool ns; 43 bool nsc; 44 uint8_t sregion; 45 bool srvalid; 46 uint8_t iregion; 47 bool irvalid; 48 } V8M_SAttributes; 49 50 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 51 MMUAccessType access_type, ARMMMUIdx mmu_idx, 52 V8M_SAttributes *sattrs); 53 54 /* Definitions for the PMCCNTR and PMCR registers */ 55 #define PMCRD 0x8 56 #define PMCRC 0x4 57 #define PMCRE 0x1 58 #endif 59 60 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 61 { 62 int nregs; 63 64 /* VFP data registers are always little-endian. */ 65 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 66 if (reg < nregs) { 67 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 68 return 8; 69 } 70 if (arm_feature(env, ARM_FEATURE_NEON)) { 71 /* Aliases for Q regs. */ 72 nregs += 16; 73 if (reg < nregs) { 74 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 75 stq_le_p(buf, q[0]); 76 stq_le_p(buf + 8, q[1]); 77 return 16; 78 } 79 } 80 switch (reg - nregs) { 81 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 82 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4; 83 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 84 } 85 return 0; 86 } 87 88 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 89 { 90 int nregs; 91 92 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 93 if (reg < nregs) { 94 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 95 return 8; 96 } 97 if (arm_feature(env, ARM_FEATURE_NEON)) { 98 nregs += 16; 99 if (reg < nregs) { 100 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 101 q[0] = ldq_le_p(buf); 102 q[1] = ldq_le_p(buf + 8); 103 return 16; 104 } 105 } 106 switch (reg - nregs) { 107 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 108 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4; 109 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 110 } 111 return 0; 112 } 113 114 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 115 { 116 switch (reg) { 117 case 0 ... 31: 118 /* 128 bit FP register */ 119 { 120 uint64_t *q = aa64_vfp_qreg(env, reg); 121 stq_le_p(buf, q[0]); 122 stq_le_p(buf + 8, q[1]); 123 return 16; 124 } 125 case 32: 126 /* FPSR */ 127 stl_p(buf, vfp_get_fpsr(env)); 128 return 4; 129 case 33: 130 /* FPCR */ 131 stl_p(buf, vfp_get_fpcr(env)); 132 return 4; 133 default: 134 return 0; 135 } 136 } 137 138 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 139 { 140 switch (reg) { 141 case 0 ... 31: 142 /* 128 bit FP register */ 143 { 144 uint64_t *q = aa64_vfp_qreg(env, reg); 145 q[0] = ldq_le_p(buf); 146 q[1] = ldq_le_p(buf + 8); 147 return 16; 148 } 149 case 32: 150 /* FPSR */ 151 vfp_set_fpsr(env, ldl_p(buf)); 152 return 4; 153 case 33: 154 /* FPCR */ 155 vfp_set_fpcr(env, ldl_p(buf)); 156 return 4; 157 default: 158 return 0; 159 } 160 } 161 162 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 163 { 164 assert(ri->fieldoffset); 165 if (cpreg_field_is_64bit(ri)) { 166 return CPREG_FIELD64(env, ri); 167 } else { 168 return CPREG_FIELD32(env, ri); 169 } 170 } 171 172 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 173 uint64_t value) 174 { 175 assert(ri->fieldoffset); 176 if (cpreg_field_is_64bit(ri)) { 177 CPREG_FIELD64(env, ri) = value; 178 } else { 179 CPREG_FIELD32(env, ri) = value; 180 } 181 } 182 183 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 184 { 185 return (char *)env + ri->fieldoffset; 186 } 187 188 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 189 { 190 /* Raw read of a coprocessor register (as needed for migration, etc). */ 191 if (ri->type & ARM_CP_CONST) { 192 return ri->resetvalue; 193 } else if (ri->raw_readfn) { 194 return ri->raw_readfn(env, ri); 195 } else if (ri->readfn) { 196 return ri->readfn(env, ri); 197 } else { 198 return raw_read(env, ri); 199 } 200 } 201 202 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 203 uint64_t v) 204 { 205 /* Raw write of a coprocessor register (as needed for migration, etc). 206 * Note that constant registers are treated as write-ignored; the 207 * caller should check for success by whether a readback gives the 208 * value written. 209 */ 210 if (ri->type & ARM_CP_CONST) { 211 return; 212 } else if (ri->raw_writefn) { 213 ri->raw_writefn(env, ri, v); 214 } else if (ri->writefn) { 215 ri->writefn(env, ri, v); 216 } else { 217 raw_write(env, ri, v); 218 } 219 } 220 221 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 222 { 223 /* Return true if the regdef would cause an assertion if you called 224 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 225 * program bug for it not to have the NO_RAW flag). 226 * NB that returning false here doesn't necessarily mean that calling 227 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 228 * read/write access functions which are safe for raw use" from "has 229 * read/write access functions which have side effects but has forgotten 230 * to provide raw access functions". 231 * The tests here line up with the conditions in read/write_raw_cp_reg() 232 * and assertions in raw_read()/raw_write(). 233 */ 234 if ((ri->type & ARM_CP_CONST) || 235 ri->fieldoffset || 236 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 237 return false; 238 } 239 return true; 240 } 241 242 bool write_cpustate_to_list(ARMCPU *cpu) 243 { 244 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 245 int i; 246 bool ok = true; 247 248 for (i = 0; i < cpu->cpreg_array_len; i++) { 249 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 250 const ARMCPRegInfo *ri; 251 252 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 253 if (!ri) { 254 ok = false; 255 continue; 256 } 257 if (ri->type & ARM_CP_NO_RAW) { 258 continue; 259 } 260 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri); 261 } 262 return ok; 263 } 264 265 bool write_list_to_cpustate(ARMCPU *cpu) 266 { 267 int i; 268 bool ok = true; 269 270 for (i = 0; i < cpu->cpreg_array_len; i++) { 271 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 272 uint64_t v = cpu->cpreg_values[i]; 273 const ARMCPRegInfo *ri; 274 275 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 276 if (!ri) { 277 ok = false; 278 continue; 279 } 280 if (ri->type & ARM_CP_NO_RAW) { 281 continue; 282 } 283 /* Write value and confirm it reads back as written 284 * (to catch read-only registers and partially read-only 285 * registers where the incoming migration value doesn't match) 286 */ 287 write_raw_cp_reg(&cpu->env, ri, v); 288 if (read_raw_cp_reg(&cpu->env, ri) != v) { 289 ok = false; 290 } 291 } 292 return ok; 293 } 294 295 static void add_cpreg_to_list(gpointer key, gpointer opaque) 296 { 297 ARMCPU *cpu = opaque; 298 uint64_t regidx; 299 const ARMCPRegInfo *ri; 300 301 regidx = *(uint32_t *)key; 302 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 303 304 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 305 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 306 /* The value array need not be initialized at this point */ 307 cpu->cpreg_array_len++; 308 } 309 } 310 311 static void count_cpreg(gpointer key, gpointer opaque) 312 { 313 ARMCPU *cpu = opaque; 314 uint64_t regidx; 315 const ARMCPRegInfo *ri; 316 317 regidx = *(uint32_t *)key; 318 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 319 320 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 321 cpu->cpreg_array_len++; 322 } 323 } 324 325 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 326 { 327 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 328 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 329 330 if (aidx > bidx) { 331 return 1; 332 } 333 if (aidx < bidx) { 334 return -1; 335 } 336 return 0; 337 } 338 339 void init_cpreg_list(ARMCPU *cpu) 340 { 341 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 342 * Note that we require cpreg_tuples[] to be sorted by key ID. 343 */ 344 GList *keys; 345 int arraylen; 346 347 keys = g_hash_table_get_keys(cpu->cp_regs); 348 keys = g_list_sort(keys, cpreg_key_compare); 349 350 cpu->cpreg_array_len = 0; 351 352 g_list_foreach(keys, count_cpreg, cpu); 353 354 arraylen = cpu->cpreg_array_len; 355 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 356 cpu->cpreg_values = g_new(uint64_t, arraylen); 357 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 358 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 359 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 360 cpu->cpreg_array_len = 0; 361 362 g_list_foreach(keys, add_cpreg_to_list, cpu); 363 364 assert(cpu->cpreg_array_len == arraylen); 365 366 g_list_free(keys); 367 } 368 369 /* 370 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 371 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 372 * 373 * access_el3_aa32ns: Used to check AArch32 register views. 374 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 375 */ 376 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 377 const ARMCPRegInfo *ri, 378 bool isread) 379 { 380 bool secure = arm_is_secure_below_el3(env); 381 382 assert(!arm_el_is_aa64(env, 3)); 383 if (secure) { 384 return CP_ACCESS_TRAP_UNCATEGORIZED; 385 } 386 return CP_ACCESS_OK; 387 } 388 389 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 390 const ARMCPRegInfo *ri, 391 bool isread) 392 { 393 if (!arm_el_is_aa64(env, 3)) { 394 return access_el3_aa32ns(env, ri, isread); 395 } 396 return CP_ACCESS_OK; 397 } 398 399 /* Some secure-only AArch32 registers trap to EL3 if used from 400 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 401 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 402 * We assume that the .access field is set to PL1_RW. 403 */ 404 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 405 const ARMCPRegInfo *ri, 406 bool isread) 407 { 408 if (arm_current_el(env) == 3) { 409 return CP_ACCESS_OK; 410 } 411 if (arm_is_secure_below_el3(env)) { 412 return CP_ACCESS_TRAP_EL3; 413 } 414 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 415 return CP_ACCESS_TRAP_UNCATEGORIZED; 416 } 417 418 /* Check for traps to "powerdown debug" registers, which are controlled 419 * by MDCR.TDOSA 420 */ 421 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 422 bool isread) 423 { 424 int el = arm_current_el(env); 425 426 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA) 427 && !arm_is_secure_below_el3(env)) { 428 return CP_ACCESS_TRAP_EL2; 429 } 430 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 431 return CP_ACCESS_TRAP_EL3; 432 } 433 return CP_ACCESS_OK; 434 } 435 436 /* Check for traps to "debug ROM" registers, which are controlled 437 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 438 */ 439 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 440 bool isread) 441 { 442 int el = arm_current_el(env); 443 444 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA) 445 && !arm_is_secure_below_el3(env)) { 446 return CP_ACCESS_TRAP_EL2; 447 } 448 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 449 return CP_ACCESS_TRAP_EL3; 450 } 451 return CP_ACCESS_OK; 452 } 453 454 /* Check for traps to general debug registers, which are controlled 455 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 456 */ 457 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 458 bool isread) 459 { 460 int el = arm_current_el(env); 461 462 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA) 463 && !arm_is_secure_below_el3(env)) { 464 return CP_ACCESS_TRAP_EL2; 465 } 466 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 467 return CP_ACCESS_TRAP_EL3; 468 } 469 return CP_ACCESS_OK; 470 } 471 472 /* Check for traps to performance monitor registers, which are controlled 473 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 474 */ 475 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 476 bool isread) 477 { 478 int el = arm_current_el(env); 479 480 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 481 && !arm_is_secure_below_el3(env)) { 482 return CP_ACCESS_TRAP_EL2; 483 } 484 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 485 return CP_ACCESS_TRAP_EL3; 486 } 487 return CP_ACCESS_OK; 488 } 489 490 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 491 { 492 ARMCPU *cpu = arm_env_get_cpu(env); 493 494 raw_write(env, ri, value); 495 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 496 } 497 498 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 499 { 500 ARMCPU *cpu = arm_env_get_cpu(env); 501 502 if (raw_read(env, ri) != value) { 503 /* Unlike real hardware the qemu TLB uses virtual addresses, 504 * not modified virtual addresses, so this causes a TLB flush. 505 */ 506 tlb_flush(CPU(cpu)); 507 raw_write(env, ri, value); 508 } 509 } 510 511 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 512 uint64_t value) 513 { 514 ARMCPU *cpu = arm_env_get_cpu(env); 515 516 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 517 && !extended_addresses_enabled(env)) { 518 /* For VMSA (when not using the LPAE long descriptor page table 519 * format) this register includes the ASID, so do a TLB flush. 520 * For PMSA it is purely a process ID and no action is needed. 521 */ 522 tlb_flush(CPU(cpu)); 523 } 524 raw_write(env, ri, value); 525 } 526 527 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 528 uint64_t value) 529 { 530 /* Invalidate all (TLBIALL) */ 531 ARMCPU *cpu = arm_env_get_cpu(env); 532 533 tlb_flush(CPU(cpu)); 534 } 535 536 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 537 uint64_t value) 538 { 539 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 540 ARMCPU *cpu = arm_env_get_cpu(env); 541 542 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 543 } 544 545 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 546 uint64_t value) 547 { 548 /* Invalidate by ASID (TLBIASID) */ 549 ARMCPU *cpu = arm_env_get_cpu(env); 550 551 tlb_flush(CPU(cpu)); 552 } 553 554 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 555 uint64_t value) 556 { 557 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 558 ARMCPU *cpu = arm_env_get_cpu(env); 559 560 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 561 } 562 563 /* IS variants of TLB operations must affect all cores */ 564 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 565 uint64_t value) 566 { 567 CPUState *cs = ENV_GET_CPU(env); 568 569 tlb_flush_all_cpus_synced(cs); 570 } 571 572 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 573 uint64_t value) 574 { 575 CPUState *cs = ENV_GET_CPU(env); 576 577 tlb_flush_all_cpus_synced(cs); 578 } 579 580 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 581 uint64_t value) 582 { 583 CPUState *cs = ENV_GET_CPU(env); 584 585 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 586 } 587 588 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 589 uint64_t value) 590 { 591 CPUState *cs = ENV_GET_CPU(env); 592 593 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 594 } 595 596 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 597 uint64_t value) 598 { 599 CPUState *cs = ENV_GET_CPU(env); 600 601 tlb_flush_by_mmuidx(cs, 602 ARMMMUIdxBit_S12NSE1 | 603 ARMMMUIdxBit_S12NSE0 | 604 ARMMMUIdxBit_S2NS); 605 } 606 607 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 608 uint64_t value) 609 { 610 CPUState *cs = ENV_GET_CPU(env); 611 612 tlb_flush_by_mmuidx_all_cpus_synced(cs, 613 ARMMMUIdxBit_S12NSE1 | 614 ARMMMUIdxBit_S12NSE0 | 615 ARMMMUIdxBit_S2NS); 616 } 617 618 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 619 uint64_t value) 620 { 621 /* Invalidate by IPA. This has to invalidate any structures that 622 * contain only stage 2 translation information, but does not need 623 * to apply to structures that contain combined stage 1 and stage 2 624 * translation information. 625 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 626 */ 627 CPUState *cs = ENV_GET_CPU(env); 628 uint64_t pageaddr; 629 630 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 631 return; 632 } 633 634 pageaddr = sextract64(value << 12, 0, 40); 635 636 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 637 } 638 639 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 640 uint64_t value) 641 { 642 CPUState *cs = ENV_GET_CPU(env); 643 uint64_t pageaddr; 644 645 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 646 return; 647 } 648 649 pageaddr = sextract64(value << 12, 0, 40); 650 651 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 652 ARMMMUIdxBit_S2NS); 653 } 654 655 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 656 uint64_t value) 657 { 658 CPUState *cs = ENV_GET_CPU(env); 659 660 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 661 } 662 663 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 664 uint64_t value) 665 { 666 CPUState *cs = ENV_GET_CPU(env); 667 668 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 669 } 670 671 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 672 uint64_t value) 673 { 674 CPUState *cs = ENV_GET_CPU(env); 675 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 676 677 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 678 } 679 680 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 681 uint64_t value) 682 { 683 CPUState *cs = ENV_GET_CPU(env); 684 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 685 686 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 687 ARMMMUIdxBit_S1E2); 688 } 689 690 static const ARMCPRegInfo cp_reginfo[] = { 691 /* Define the secure and non-secure FCSE identifier CP registers 692 * separately because there is no secure bank in V8 (no _EL3). This allows 693 * the secure register to be properly reset and migrated. There is also no 694 * v8 EL1 version of the register so the non-secure instance stands alone. 695 */ 696 { .name = "FCSEIDR(NS)", 697 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 698 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 699 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 700 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 701 { .name = "FCSEIDR(S)", 702 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 703 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 704 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 705 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 706 /* Define the secure and non-secure context identifier CP registers 707 * separately because there is no secure bank in V8 (no _EL3). This allows 708 * the secure register to be properly reset and migrated. In the 709 * non-secure case, the 32-bit register will have reset and migration 710 * disabled during registration as it is handled by the 64-bit instance. 711 */ 712 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 713 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 714 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 715 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 716 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 717 { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32, 718 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 719 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 720 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 721 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 722 REGINFO_SENTINEL 723 }; 724 725 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 726 /* NB: Some of these registers exist in v8 but with more precise 727 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 728 */ 729 /* MMU Domain access control / MPU write buffer control */ 730 { .name = "DACR", 731 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 732 .access = PL1_RW, .resetvalue = 0, 733 .writefn = dacr_write, .raw_writefn = raw_write, 734 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 735 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 736 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 737 * For v6 and v5, these mappings are overly broad. 738 */ 739 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 740 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 741 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 742 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 743 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 744 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 745 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 746 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 747 /* Cache maintenance ops; some of this space may be overridden later. */ 748 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 749 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 750 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 751 REGINFO_SENTINEL 752 }; 753 754 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 755 /* Not all pre-v6 cores implemented this WFI, so this is slightly 756 * over-broad. 757 */ 758 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 759 .access = PL1_W, .type = ARM_CP_WFI }, 760 REGINFO_SENTINEL 761 }; 762 763 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 764 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 765 * is UNPREDICTABLE; we choose to NOP as most implementations do). 766 */ 767 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 768 .access = PL1_W, .type = ARM_CP_WFI }, 769 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 770 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 771 * OMAPCP will override this space. 772 */ 773 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 774 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 775 .resetvalue = 0 }, 776 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 777 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 778 .resetvalue = 0 }, 779 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 780 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 781 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 782 .resetvalue = 0 }, 783 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 784 * implementing it as RAZ means the "debug architecture version" bits 785 * will read as a reserved value, which should cause Linux to not try 786 * to use the debug hardware. 787 */ 788 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 789 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 790 /* MMU TLB control. Note that the wildcarding means we cover not just 791 * the unified TLB ops but also the dside/iside/inner-shareable variants. 792 */ 793 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 794 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 795 .type = ARM_CP_NO_RAW }, 796 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 797 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 798 .type = ARM_CP_NO_RAW }, 799 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 800 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 801 .type = ARM_CP_NO_RAW }, 802 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 803 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 804 .type = ARM_CP_NO_RAW }, 805 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 806 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 807 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 808 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 809 REGINFO_SENTINEL 810 }; 811 812 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 813 uint64_t value) 814 { 815 uint32_t mask = 0; 816 817 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 818 if (!arm_feature(env, ARM_FEATURE_V8)) { 819 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 820 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 821 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 822 */ 823 if (arm_feature(env, ARM_FEATURE_VFP)) { 824 /* VFP coprocessor: cp10 & cp11 [23:20] */ 825 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 826 827 if (!arm_feature(env, ARM_FEATURE_NEON)) { 828 /* ASEDIS [31] bit is RAO/WI */ 829 value |= (1 << 31); 830 } 831 832 /* VFPv3 and upwards with NEON implement 32 double precision 833 * registers (D0-D31). 834 */ 835 if (!arm_feature(env, ARM_FEATURE_NEON) || 836 !arm_feature(env, ARM_FEATURE_VFP3)) { 837 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 838 value |= (1 << 30); 839 } 840 } 841 value &= mask; 842 } 843 env->cp15.cpacr_el1 = value; 844 } 845 846 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 847 bool isread) 848 { 849 if (arm_feature(env, ARM_FEATURE_V8)) { 850 /* Check if CPACR accesses are to be trapped to EL2 */ 851 if (arm_current_el(env) == 1 && 852 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 853 return CP_ACCESS_TRAP_EL2; 854 /* Check if CPACR accesses are to be trapped to EL3 */ 855 } else if (arm_current_el(env) < 3 && 856 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 857 return CP_ACCESS_TRAP_EL3; 858 } 859 } 860 861 return CP_ACCESS_OK; 862 } 863 864 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 865 bool isread) 866 { 867 /* Check if CPTR accesses are set to trap to EL3 */ 868 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 869 return CP_ACCESS_TRAP_EL3; 870 } 871 872 return CP_ACCESS_OK; 873 } 874 875 static const ARMCPRegInfo v6_cp_reginfo[] = { 876 /* prefetch by MVA in v6, NOP in v7 */ 877 { .name = "MVA_prefetch", 878 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 879 .access = PL1_W, .type = ARM_CP_NOP }, 880 /* We need to break the TB after ISB to execute self-modifying code 881 * correctly and also to take any pending interrupts immediately. 882 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 883 */ 884 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 885 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 886 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 887 .access = PL0_W, .type = ARM_CP_NOP }, 888 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 889 .access = PL0_W, .type = ARM_CP_NOP }, 890 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 891 .access = PL1_RW, 892 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 893 offsetof(CPUARMState, cp15.ifar_ns) }, 894 .resetvalue = 0, }, 895 /* Watchpoint Fault Address Register : should actually only be present 896 * for 1136, 1176, 11MPCore. 897 */ 898 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 899 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 900 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 901 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 902 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 903 .resetvalue = 0, .writefn = cpacr_write }, 904 REGINFO_SENTINEL 905 }; 906 907 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 908 bool isread) 909 { 910 /* Performance monitor registers user accessibility is controlled 911 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 912 * trapping to EL2 or EL3 for other accesses. 913 */ 914 int el = arm_current_el(env); 915 916 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 917 return CP_ACCESS_TRAP; 918 } 919 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 920 && !arm_is_secure_below_el3(env)) { 921 return CP_ACCESS_TRAP_EL2; 922 } 923 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 924 return CP_ACCESS_TRAP_EL3; 925 } 926 927 return CP_ACCESS_OK; 928 } 929 930 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 931 const ARMCPRegInfo *ri, 932 bool isread) 933 { 934 /* ER: event counter read trap control */ 935 if (arm_feature(env, ARM_FEATURE_V8) 936 && arm_current_el(env) == 0 937 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 938 && isread) { 939 return CP_ACCESS_OK; 940 } 941 942 return pmreg_access(env, ri, isread); 943 } 944 945 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 946 const ARMCPRegInfo *ri, 947 bool isread) 948 { 949 /* SW: software increment write trap control */ 950 if (arm_feature(env, ARM_FEATURE_V8) 951 && arm_current_el(env) == 0 952 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 953 && !isread) { 954 return CP_ACCESS_OK; 955 } 956 957 return pmreg_access(env, ri, isread); 958 } 959 960 #ifndef CONFIG_USER_ONLY 961 962 static CPAccessResult pmreg_access_selr(CPUARMState *env, 963 const ARMCPRegInfo *ri, 964 bool isread) 965 { 966 /* ER: event counter read trap control */ 967 if (arm_feature(env, ARM_FEATURE_V8) 968 && arm_current_el(env) == 0 969 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 970 return CP_ACCESS_OK; 971 } 972 973 return pmreg_access(env, ri, isread); 974 } 975 976 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 977 const ARMCPRegInfo *ri, 978 bool isread) 979 { 980 /* CR: cycle counter read trap control */ 981 if (arm_feature(env, ARM_FEATURE_V8) 982 && arm_current_el(env) == 0 983 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 984 && isread) { 985 return CP_ACCESS_OK; 986 } 987 988 return pmreg_access(env, ri, isread); 989 } 990 991 static inline bool arm_ccnt_enabled(CPUARMState *env) 992 { 993 /* This does not support checking PMCCFILTR_EL0 register */ 994 995 if (!(env->cp15.c9_pmcr & PMCRE)) { 996 return false; 997 } 998 999 return true; 1000 } 1001 1002 void pmccntr_sync(CPUARMState *env) 1003 { 1004 uint64_t temp_ticks; 1005 1006 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1007 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1008 1009 if (env->cp15.c9_pmcr & PMCRD) { 1010 /* Increment once every 64 processor clock cycles */ 1011 temp_ticks /= 64; 1012 } 1013 1014 if (arm_ccnt_enabled(env)) { 1015 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt; 1016 } 1017 } 1018 1019 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1020 uint64_t value) 1021 { 1022 pmccntr_sync(env); 1023 1024 if (value & PMCRC) { 1025 /* The counter has been reset */ 1026 env->cp15.c15_ccnt = 0; 1027 } 1028 1029 /* only the DP, X, D and E bits are writable */ 1030 env->cp15.c9_pmcr &= ~0x39; 1031 env->cp15.c9_pmcr |= (value & 0x39); 1032 1033 pmccntr_sync(env); 1034 } 1035 1036 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1037 { 1038 uint64_t total_ticks; 1039 1040 if (!arm_ccnt_enabled(env)) { 1041 /* Counter is disabled, do not change value */ 1042 return env->cp15.c15_ccnt; 1043 } 1044 1045 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1046 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1047 1048 if (env->cp15.c9_pmcr & PMCRD) { 1049 /* Increment once every 64 processor clock cycles */ 1050 total_ticks /= 64; 1051 } 1052 return total_ticks - env->cp15.c15_ccnt; 1053 } 1054 1055 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1056 uint64_t value) 1057 { 1058 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1059 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1060 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1061 * accessed. 1062 */ 1063 env->cp15.c9_pmselr = value & 0x1f; 1064 } 1065 1066 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1067 uint64_t value) 1068 { 1069 uint64_t total_ticks; 1070 1071 if (!arm_ccnt_enabled(env)) { 1072 /* Counter is disabled, set the absolute value */ 1073 env->cp15.c15_ccnt = value; 1074 return; 1075 } 1076 1077 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1078 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1079 1080 if (env->cp15.c9_pmcr & PMCRD) { 1081 /* Increment once every 64 processor clock cycles */ 1082 total_ticks /= 64; 1083 } 1084 env->cp15.c15_ccnt = total_ticks - value; 1085 } 1086 1087 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1088 uint64_t value) 1089 { 1090 uint64_t cur_val = pmccntr_read(env, NULL); 1091 1092 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1093 } 1094 1095 #else /* CONFIG_USER_ONLY */ 1096 1097 void pmccntr_sync(CPUARMState *env) 1098 { 1099 } 1100 1101 #endif 1102 1103 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1104 uint64_t value) 1105 { 1106 pmccntr_sync(env); 1107 env->cp15.pmccfiltr_el0 = value & 0x7E000000; 1108 pmccntr_sync(env); 1109 } 1110 1111 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1112 uint64_t value) 1113 { 1114 value &= (1 << 31); 1115 env->cp15.c9_pmcnten |= value; 1116 } 1117 1118 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1119 uint64_t value) 1120 { 1121 value &= (1 << 31); 1122 env->cp15.c9_pmcnten &= ~value; 1123 } 1124 1125 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1126 uint64_t value) 1127 { 1128 env->cp15.c9_pmovsr &= ~value; 1129 } 1130 1131 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1132 uint64_t value) 1133 { 1134 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1135 * PMSELR value is equal to or greater than the number of implemented 1136 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1137 */ 1138 if (env->cp15.c9_pmselr == 0x1f) { 1139 pmccfiltr_write(env, ri, value); 1140 } 1141 } 1142 1143 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1144 { 1145 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1146 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write(). 1147 */ 1148 if (env->cp15.c9_pmselr == 0x1f) { 1149 return env->cp15.pmccfiltr_el0; 1150 } else { 1151 return 0; 1152 } 1153 } 1154 1155 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1156 uint64_t value) 1157 { 1158 if (arm_feature(env, ARM_FEATURE_V8)) { 1159 env->cp15.c9_pmuserenr = value & 0xf; 1160 } else { 1161 env->cp15.c9_pmuserenr = value & 1; 1162 } 1163 } 1164 1165 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1166 uint64_t value) 1167 { 1168 /* We have no event counters so only the C bit can be changed */ 1169 value &= (1 << 31); 1170 env->cp15.c9_pminten |= value; 1171 } 1172 1173 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1174 uint64_t value) 1175 { 1176 value &= (1 << 31); 1177 env->cp15.c9_pminten &= ~value; 1178 } 1179 1180 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1181 uint64_t value) 1182 { 1183 /* Note that even though the AArch64 view of this register has bits 1184 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1185 * architectural requirements for bits which are RES0 only in some 1186 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1187 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1188 */ 1189 raw_write(env, ri, value & ~0x1FULL); 1190 } 1191 1192 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1193 { 1194 /* We only mask off bits that are RES0 both for AArch64 and AArch32. 1195 * For bits that vary between AArch32/64, code needs to check the 1196 * current execution mode before directly using the feature bit. 1197 */ 1198 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK; 1199 1200 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1201 valid_mask &= ~SCR_HCE; 1202 1203 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1204 * supported if EL2 exists. The bit is UNK/SBZP when 1205 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1206 * when EL2 is unavailable. 1207 * On ARMv8, this bit is always available. 1208 */ 1209 if (arm_feature(env, ARM_FEATURE_V7) && 1210 !arm_feature(env, ARM_FEATURE_V8)) { 1211 valid_mask &= ~SCR_SMD; 1212 } 1213 } 1214 1215 /* Clear all-context RES0 bits. */ 1216 value &= valid_mask; 1217 raw_write(env, ri, value); 1218 } 1219 1220 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1221 { 1222 ARMCPU *cpu = arm_env_get_cpu(env); 1223 1224 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1225 * bank 1226 */ 1227 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1228 ri->secure & ARM_CP_SECSTATE_S); 1229 1230 return cpu->ccsidr[index]; 1231 } 1232 1233 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1234 uint64_t value) 1235 { 1236 raw_write(env, ri, value & 0xf); 1237 } 1238 1239 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1240 { 1241 CPUState *cs = ENV_GET_CPU(env); 1242 uint64_t ret = 0; 1243 1244 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1245 ret |= CPSR_I; 1246 } 1247 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1248 ret |= CPSR_F; 1249 } 1250 /* External aborts are not possible in QEMU so A bit is always clear */ 1251 return ret; 1252 } 1253 1254 static const ARMCPRegInfo v7_cp_reginfo[] = { 1255 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1256 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1257 .access = PL1_W, .type = ARM_CP_NOP }, 1258 /* Performance monitors are implementation defined in v7, 1259 * but with an ARM recommended set of registers, which we 1260 * follow (although we don't actually implement any counters) 1261 * 1262 * Performance registers fall into three categories: 1263 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1264 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1265 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1266 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1267 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1268 */ 1269 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1270 .access = PL0_RW, .type = ARM_CP_ALIAS, 1271 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1272 .writefn = pmcntenset_write, 1273 .accessfn = pmreg_access, 1274 .raw_writefn = raw_write }, 1275 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 1276 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1277 .access = PL0_RW, .accessfn = pmreg_access, 1278 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1279 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1280 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1281 .access = PL0_RW, 1282 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1283 .accessfn = pmreg_access, 1284 .writefn = pmcntenclr_write, 1285 .type = ARM_CP_ALIAS }, 1286 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1287 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1288 .access = PL0_RW, .accessfn = pmreg_access, 1289 .type = ARM_CP_ALIAS, 1290 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 1291 .writefn = pmcntenclr_write }, 1292 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 1293 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1294 .accessfn = pmreg_access, 1295 .writefn = pmovsr_write, 1296 .raw_writefn = raw_write }, 1297 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 1298 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 1299 .access = PL0_RW, .accessfn = pmreg_access, 1300 .type = ARM_CP_ALIAS, 1301 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1302 .writefn = pmovsr_write, 1303 .raw_writefn = raw_write }, 1304 /* Unimplemented so WI. */ 1305 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 1306 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP }, 1307 #ifndef CONFIG_USER_ONLY 1308 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 1309 .access = PL0_RW, .type = ARM_CP_ALIAS, 1310 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 1311 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 1312 .raw_writefn = raw_write}, 1313 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 1314 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 1315 .access = PL0_RW, .accessfn = pmreg_access_selr, 1316 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 1317 .writefn = pmselr_write, .raw_writefn = raw_write, }, 1318 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 1319 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO, 1320 .readfn = pmccntr_read, .writefn = pmccntr_write32, 1321 .accessfn = pmreg_access_ccntr }, 1322 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 1323 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 1324 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 1325 .type = ARM_CP_IO, 1326 .readfn = pmccntr_read, .writefn = pmccntr_write, }, 1327 #endif 1328 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 1329 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 1330 .writefn = pmccfiltr_write, 1331 .access = PL0_RW, .accessfn = pmreg_access, 1332 .type = ARM_CP_IO, 1333 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 1334 .resetvalue = 0, }, 1335 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 1336 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1337 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1338 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 1339 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 1340 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1341 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1342 /* Unimplemented, RAZ/WI. */ 1343 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 1344 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, 1345 .accessfn = pmreg_access_xevcntr }, 1346 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 1347 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 1348 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 1349 .resetvalue = 0, 1350 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1351 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 1352 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 1353 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1354 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 1355 .resetvalue = 0, 1356 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1357 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 1358 .access = PL1_RW, .accessfn = access_tpm, 1359 .type = ARM_CP_ALIAS, 1360 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 1361 .resetvalue = 0, 1362 .writefn = pmintenset_write, .raw_writefn = raw_write }, 1363 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 1364 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 1365 .access = PL1_RW, .accessfn = access_tpm, 1366 .type = ARM_CP_IO, 1367 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1368 .writefn = pmintenset_write, .raw_writefn = raw_write, 1369 .resetvalue = 0x0 }, 1370 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 1371 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1372 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1373 .writefn = pmintenclr_write, }, 1374 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 1375 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 1376 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1377 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1378 .writefn = pmintenclr_write }, 1379 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 1380 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 1381 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 1382 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 1383 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 1384 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, 1385 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 1386 offsetof(CPUARMState, cp15.csselr_ns) } }, 1387 /* Auxiliary ID register: this actually has an IMPDEF value but for now 1388 * just RAZ for all cores: 1389 */ 1390 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 1391 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 1392 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 1393 /* Auxiliary fault status registers: these also are IMPDEF, and we 1394 * choose to RAZ/WI for all cores. 1395 */ 1396 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 1397 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 1398 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1399 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 1400 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 1401 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1402 /* MAIR can just read-as-written because we don't implement caches 1403 * and so don't need to care about memory attributes. 1404 */ 1405 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 1406 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 1407 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 1408 .resetvalue = 0 }, 1409 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 1410 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 1411 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 1412 .resetvalue = 0 }, 1413 /* For non-long-descriptor page tables these are PRRR and NMRR; 1414 * regardless they still act as reads-as-written for QEMU. 1415 */ 1416 /* MAIR0/1 are defined separately from their 64-bit counterpart which 1417 * allows them to assign the correct fieldoffset based on the endianness 1418 * handled in the field definitions. 1419 */ 1420 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 1421 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 1422 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 1423 offsetof(CPUARMState, cp15.mair0_ns) }, 1424 .resetfn = arm_cp_reset_ignore }, 1425 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 1426 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 1427 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 1428 offsetof(CPUARMState, cp15.mair1_ns) }, 1429 .resetfn = arm_cp_reset_ignore }, 1430 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 1431 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 1432 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 1433 /* 32 bit ITLB invalidates */ 1434 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 1435 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1436 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 1437 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1438 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 1439 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1440 /* 32 bit DTLB invalidates */ 1441 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 1442 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1443 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 1444 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1445 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 1446 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1447 /* 32 bit TLB invalidates */ 1448 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 1449 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1450 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 1451 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1452 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 1453 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1454 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 1455 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 1456 REGINFO_SENTINEL 1457 }; 1458 1459 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 1460 /* 32 bit TLB invalidates, Inner Shareable */ 1461 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 1462 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 1463 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 1464 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 1465 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 1466 .type = ARM_CP_NO_RAW, .access = PL1_W, 1467 .writefn = tlbiasid_is_write }, 1468 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 1469 .type = ARM_CP_NO_RAW, .access = PL1_W, 1470 .writefn = tlbimvaa_is_write }, 1471 REGINFO_SENTINEL 1472 }; 1473 1474 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1475 uint64_t value) 1476 { 1477 value &= 1; 1478 env->teecr = value; 1479 } 1480 1481 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1482 bool isread) 1483 { 1484 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 1485 return CP_ACCESS_TRAP; 1486 } 1487 return CP_ACCESS_OK; 1488 } 1489 1490 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 1491 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 1492 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 1493 .resetvalue = 0, 1494 .writefn = teecr_write }, 1495 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 1496 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 1497 .accessfn = teehbr_access, .resetvalue = 0 }, 1498 REGINFO_SENTINEL 1499 }; 1500 1501 static const ARMCPRegInfo v6k_cp_reginfo[] = { 1502 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 1503 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 1504 .access = PL0_RW, 1505 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 1506 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 1507 .access = PL0_RW, 1508 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 1509 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 1510 .resetfn = arm_cp_reset_ignore }, 1511 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 1512 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 1513 .access = PL0_R|PL1_W, 1514 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 1515 .resetvalue = 0}, 1516 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 1517 .access = PL0_R|PL1_W, 1518 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 1519 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 1520 .resetfn = arm_cp_reset_ignore }, 1521 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 1522 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 1523 .access = PL1_RW, 1524 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 1525 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 1526 .access = PL1_RW, 1527 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 1528 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 1529 .resetvalue = 0 }, 1530 REGINFO_SENTINEL 1531 }; 1532 1533 #ifndef CONFIG_USER_ONLY 1534 1535 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 1536 bool isread) 1537 { 1538 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 1539 * Writable only at the highest implemented exception level. 1540 */ 1541 int el = arm_current_el(env); 1542 1543 switch (el) { 1544 case 0: 1545 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) { 1546 return CP_ACCESS_TRAP; 1547 } 1548 break; 1549 case 1: 1550 if (!isread && ri->state == ARM_CP_STATE_AA32 && 1551 arm_is_secure_below_el3(env)) { 1552 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 1553 return CP_ACCESS_TRAP_UNCATEGORIZED; 1554 } 1555 break; 1556 case 2: 1557 case 3: 1558 break; 1559 } 1560 1561 if (!isread && el < arm_highest_el(env)) { 1562 return CP_ACCESS_TRAP_UNCATEGORIZED; 1563 } 1564 1565 return CP_ACCESS_OK; 1566 } 1567 1568 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 1569 bool isread) 1570 { 1571 unsigned int cur_el = arm_current_el(env); 1572 bool secure = arm_is_secure(env); 1573 1574 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ 1575 if (cur_el == 0 && 1576 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 1577 return CP_ACCESS_TRAP; 1578 } 1579 1580 if (arm_feature(env, ARM_FEATURE_EL2) && 1581 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1582 !extract32(env->cp15.cnthctl_el2, 0, 1)) { 1583 return CP_ACCESS_TRAP_EL2; 1584 } 1585 return CP_ACCESS_OK; 1586 } 1587 1588 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 1589 bool isread) 1590 { 1591 unsigned int cur_el = arm_current_el(env); 1592 bool secure = arm_is_secure(env); 1593 1594 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if 1595 * EL0[PV]TEN is zero. 1596 */ 1597 if (cur_el == 0 && 1598 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 1599 return CP_ACCESS_TRAP; 1600 } 1601 1602 if (arm_feature(env, ARM_FEATURE_EL2) && 1603 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1604 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 1605 return CP_ACCESS_TRAP_EL2; 1606 } 1607 return CP_ACCESS_OK; 1608 } 1609 1610 static CPAccessResult gt_pct_access(CPUARMState *env, 1611 const ARMCPRegInfo *ri, 1612 bool isread) 1613 { 1614 return gt_counter_access(env, GTIMER_PHYS, isread); 1615 } 1616 1617 static CPAccessResult gt_vct_access(CPUARMState *env, 1618 const ARMCPRegInfo *ri, 1619 bool isread) 1620 { 1621 return gt_counter_access(env, GTIMER_VIRT, isread); 1622 } 1623 1624 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1625 bool isread) 1626 { 1627 return gt_timer_access(env, GTIMER_PHYS, isread); 1628 } 1629 1630 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1631 bool isread) 1632 { 1633 return gt_timer_access(env, GTIMER_VIRT, isread); 1634 } 1635 1636 static CPAccessResult gt_stimer_access(CPUARMState *env, 1637 const ARMCPRegInfo *ri, 1638 bool isread) 1639 { 1640 /* The AArch64 register view of the secure physical timer is 1641 * always accessible from EL3, and configurably accessible from 1642 * Secure EL1. 1643 */ 1644 switch (arm_current_el(env)) { 1645 case 1: 1646 if (!arm_is_secure(env)) { 1647 return CP_ACCESS_TRAP; 1648 } 1649 if (!(env->cp15.scr_el3 & SCR_ST)) { 1650 return CP_ACCESS_TRAP_EL3; 1651 } 1652 return CP_ACCESS_OK; 1653 case 0: 1654 case 2: 1655 return CP_ACCESS_TRAP; 1656 case 3: 1657 return CP_ACCESS_OK; 1658 default: 1659 g_assert_not_reached(); 1660 } 1661 } 1662 1663 static uint64_t gt_get_countervalue(CPUARMState *env) 1664 { 1665 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; 1666 } 1667 1668 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 1669 { 1670 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 1671 1672 if (gt->ctl & 1) { 1673 /* Timer enabled: calculate and set current ISTATUS, irq, and 1674 * reset timer to when ISTATUS next has to change 1675 */ 1676 uint64_t offset = timeridx == GTIMER_VIRT ? 1677 cpu->env.cp15.cntvoff_el2 : 0; 1678 uint64_t count = gt_get_countervalue(&cpu->env); 1679 /* Note that this must be unsigned 64 bit arithmetic: */ 1680 int istatus = count - offset >= gt->cval; 1681 uint64_t nexttick; 1682 int irqstate; 1683 1684 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 1685 1686 irqstate = (istatus && !(gt->ctl & 2)); 1687 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1688 1689 if (istatus) { 1690 /* Next transition is when count rolls back over to zero */ 1691 nexttick = UINT64_MAX; 1692 } else { 1693 /* Next transition is when we hit cval */ 1694 nexttick = gt->cval + offset; 1695 } 1696 /* Note that the desired next expiry time might be beyond the 1697 * signed-64-bit range of a QEMUTimer -- in this case we just 1698 * set the timer for as far in the future as possible. When the 1699 * timer expires we will reset the timer for any remaining period. 1700 */ 1701 if (nexttick > INT64_MAX / GTIMER_SCALE) { 1702 nexttick = INT64_MAX / GTIMER_SCALE; 1703 } 1704 timer_mod(cpu->gt_timer[timeridx], nexttick); 1705 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 1706 } else { 1707 /* Timer disabled: ISTATUS and timer output always clear */ 1708 gt->ctl &= ~4; 1709 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 1710 timer_del(cpu->gt_timer[timeridx]); 1711 trace_arm_gt_recalc_disabled(timeridx); 1712 } 1713 } 1714 1715 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 1716 int timeridx) 1717 { 1718 ARMCPU *cpu = arm_env_get_cpu(env); 1719 1720 timer_del(cpu->gt_timer[timeridx]); 1721 } 1722 1723 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1724 { 1725 return gt_get_countervalue(env); 1726 } 1727 1728 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1729 { 1730 return gt_get_countervalue(env) - env->cp15.cntvoff_el2; 1731 } 1732 1733 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1734 int timeridx, 1735 uint64_t value) 1736 { 1737 trace_arm_gt_cval_write(timeridx, value); 1738 env->cp15.c14_timer[timeridx].cval = value; 1739 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1740 } 1741 1742 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 1743 int timeridx) 1744 { 1745 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1746 1747 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 1748 (gt_get_countervalue(env) - offset)); 1749 } 1750 1751 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1752 int timeridx, 1753 uint64_t value) 1754 { 1755 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1756 1757 trace_arm_gt_tval_write(timeridx, value); 1758 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 1759 sextract64(value, 0, 32); 1760 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1761 } 1762 1763 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1764 int timeridx, 1765 uint64_t value) 1766 { 1767 ARMCPU *cpu = arm_env_get_cpu(env); 1768 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 1769 1770 trace_arm_gt_ctl_write(timeridx, value); 1771 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 1772 if ((oldval ^ value) & 1) { 1773 /* Enable toggled */ 1774 gt_recalc_timer(cpu, timeridx); 1775 } else if ((oldval ^ value) & 2) { 1776 /* IMASK toggled: don't need to recalculate, 1777 * just set the interrupt line based on ISTATUS 1778 */ 1779 int irqstate = (oldval & 4) && !(value & 2); 1780 1781 trace_arm_gt_imask_toggle(timeridx, irqstate); 1782 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1783 } 1784 } 1785 1786 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1787 { 1788 gt_timer_reset(env, ri, GTIMER_PHYS); 1789 } 1790 1791 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1792 uint64_t value) 1793 { 1794 gt_cval_write(env, ri, GTIMER_PHYS, value); 1795 } 1796 1797 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1798 { 1799 return gt_tval_read(env, ri, GTIMER_PHYS); 1800 } 1801 1802 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1803 uint64_t value) 1804 { 1805 gt_tval_write(env, ri, GTIMER_PHYS, value); 1806 } 1807 1808 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1809 uint64_t value) 1810 { 1811 gt_ctl_write(env, ri, GTIMER_PHYS, value); 1812 } 1813 1814 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1815 { 1816 gt_timer_reset(env, ri, GTIMER_VIRT); 1817 } 1818 1819 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1820 uint64_t value) 1821 { 1822 gt_cval_write(env, ri, GTIMER_VIRT, value); 1823 } 1824 1825 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1826 { 1827 return gt_tval_read(env, ri, GTIMER_VIRT); 1828 } 1829 1830 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1831 uint64_t value) 1832 { 1833 gt_tval_write(env, ri, GTIMER_VIRT, value); 1834 } 1835 1836 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1837 uint64_t value) 1838 { 1839 gt_ctl_write(env, ri, GTIMER_VIRT, value); 1840 } 1841 1842 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 1843 uint64_t value) 1844 { 1845 ARMCPU *cpu = arm_env_get_cpu(env); 1846 1847 trace_arm_gt_cntvoff_write(value); 1848 raw_write(env, ri, value); 1849 gt_recalc_timer(cpu, GTIMER_VIRT); 1850 } 1851 1852 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1853 { 1854 gt_timer_reset(env, ri, GTIMER_HYP); 1855 } 1856 1857 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1858 uint64_t value) 1859 { 1860 gt_cval_write(env, ri, GTIMER_HYP, value); 1861 } 1862 1863 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1864 { 1865 return gt_tval_read(env, ri, GTIMER_HYP); 1866 } 1867 1868 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1869 uint64_t value) 1870 { 1871 gt_tval_write(env, ri, GTIMER_HYP, value); 1872 } 1873 1874 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1875 uint64_t value) 1876 { 1877 gt_ctl_write(env, ri, GTIMER_HYP, value); 1878 } 1879 1880 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1881 { 1882 gt_timer_reset(env, ri, GTIMER_SEC); 1883 } 1884 1885 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1886 uint64_t value) 1887 { 1888 gt_cval_write(env, ri, GTIMER_SEC, value); 1889 } 1890 1891 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1892 { 1893 return gt_tval_read(env, ri, GTIMER_SEC); 1894 } 1895 1896 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1897 uint64_t value) 1898 { 1899 gt_tval_write(env, ri, GTIMER_SEC, value); 1900 } 1901 1902 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1903 uint64_t value) 1904 { 1905 gt_ctl_write(env, ri, GTIMER_SEC, value); 1906 } 1907 1908 void arm_gt_ptimer_cb(void *opaque) 1909 { 1910 ARMCPU *cpu = opaque; 1911 1912 gt_recalc_timer(cpu, GTIMER_PHYS); 1913 } 1914 1915 void arm_gt_vtimer_cb(void *opaque) 1916 { 1917 ARMCPU *cpu = opaque; 1918 1919 gt_recalc_timer(cpu, GTIMER_VIRT); 1920 } 1921 1922 void arm_gt_htimer_cb(void *opaque) 1923 { 1924 ARMCPU *cpu = opaque; 1925 1926 gt_recalc_timer(cpu, GTIMER_HYP); 1927 } 1928 1929 void arm_gt_stimer_cb(void *opaque) 1930 { 1931 ARMCPU *cpu = opaque; 1932 1933 gt_recalc_timer(cpu, GTIMER_SEC); 1934 } 1935 1936 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 1937 /* Note that CNTFRQ is purely reads-as-written for the benefit 1938 * of software; writing it doesn't actually change the timer frequency. 1939 * Our reset value matches the fixed frequency we implement the timer at. 1940 */ 1941 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 1942 .type = ARM_CP_ALIAS, 1943 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 1944 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 1945 }, 1946 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 1947 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 1948 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 1949 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 1950 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE, 1951 }, 1952 /* overall control: mostly access permissions */ 1953 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 1954 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 1955 .access = PL1_RW, 1956 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 1957 .resetvalue = 0, 1958 }, 1959 /* per-timer control */ 1960 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 1961 .secure = ARM_CP_SECSTATE_NS, 1962 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 1963 .accessfn = gt_ptimer_access, 1964 .fieldoffset = offsetoflow32(CPUARMState, 1965 cp15.c14_timer[GTIMER_PHYS].ctl), 1966 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 1967 }, 1968 { .name = "CNTP_CTL(S)", 1969 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 1970 .secure = ARM_CP_SECSTATE_S, 1971 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 1972 .accessfn = gt_ptimer_access, 1973 .fieldoffset = offsetoflow32(CPUARMState, 1974 cp15.c14_timer[GTIMER_SEC].ctl), 1975 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 1976 }, 1977 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 1978 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 1979 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 1980 .accessfn = gt_ptimer_access, 1981 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 1982 .resetvalue = 0, 1983 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 1984 }, 1985 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 1986 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 1987 .accessfn = gt_vtimer_access, 1988 .fieldoffset = offsetoflow32(CPUARMState, 1989 cp15.c14_timer[GTIMER_VIRT].ctl), 1990 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 1991 }, 1992 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 1993 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 1994 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 1995 .accessfn = gt_vtimer_access, 1996 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 1997 .resetvalue = 0, 1998 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 1999 }, 2000 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 2001 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2002 .secure = ARM_CP_SECSTATE_NS, 2003 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2004 .accessfn = gt_ptimer_access, 2005 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2006 }, 2007 { .name = "CNTP_TVAL(S)", 2008 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2009 .secure = ARM_CP_SECSTATE_S, 2010 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2011 .accessfn = gt_ptimer_access, 2012 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 2013 }, 2014 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2015 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 2016 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2017 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 2018 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2019 }, 2020 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 2021 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2022 .accessfn = gt_vtimer_access, 2023 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2024 }, 2025 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2026 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 2027 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2028 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 2029 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2030 }, 2031 /* The counter itself */ 2032 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 2033 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2034 .accessfn = gt_pct_access, 2035 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 2036 }, 2037 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 2038 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 2039 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2040 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 2041 }, 2042 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 2043 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2044 .accessfn = gt_vct_access, 2045 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 2046 }, 2047 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2048 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2049 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2050 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 2051 }, 2052 /* Comparison value, indicating when the timer goes off */ 2053 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 2054 .secure = ARM_CP_SECSTATE_NS, 2055 .access = PL1_RW | PL0_R, 2056 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2057 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2058 .accessfn = gt_ptimer_access, 2059 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2060 }, 2061 { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2, 2062 .secure = ARM_CP_SECSTATE_S, 2063 .access = PL1_RW | PL0_R, 2064 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2065 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2066 .accessfn = gt_ptimer_access, 2067 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2068 }, 2069 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2070 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 2071 .access = PL1_RW | PL0_R, 2072 .type = ARM_CP_IO, 2073 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2074 .resetvalue = 0, .accessfn = gt_ptimer_access, 2075 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2076 }, 2077 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 2078 .access = PL1_RW | PL0_R, 2079 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2080 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2081 .accessfn = gt_vtimer_access, 2082 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2083 }, 2084 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2085 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 2086 .access = PL1_RW | PL0_R, 2087 .type = ARM_CP_IO, 2088 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2089 .resetvalue = 0, .accessfn = gt_vtimer_access, 2090 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2091 }, 2092 /* Secure timer -- this is actually restricted to only EL3 2093 * and configurably Secure-EL1 via the accessfn. 2094 */ 2095 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 2096 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 2097 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 2098 .accessfn = gt_stimer_access, 2099 .readfn = gt_sec_tval_read, 2100 .writefn = gt_sec_tval_write, 2101 .resetfn = gt_sec_timer_reset, 2102 }, 2103 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 2104 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 2105 .type = ARM_CP_IO, .access = PL1_RW, 2106 .accessfn = gt_stimer_access, 2107 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 2108 .resetvalue = 0, 2109 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2110 }, 2111 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 2112 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 2113 .type = ARM_CP_IO, .access = PL1_RW, 2114 .accessfn = gt_stimer_access, 2115 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2116 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2117 }, 2118 REGINFO_SENTINEL 2119 }; 2120 2121 #else 2122 /* In user-mode none of the generic timer registers are accessible, 2123 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs, 2124 * so instead just don't register any of them. 2125 */ 2126 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2127 REGINFO_SENTINEL 2128 }; 2129 2130 #endif 2131 2132 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2133 { 2134 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2135 raw_write(env, ri, value); 2136 } else if (arm_feature(env, ARM_FEATURE_V7)) { 2137 raw_write(env, ri, value & 0xfffff6ff); 2138 } else { 2139 raw_write(env, ri, value & 0xfffff1ff); 2140 } 2141 } 2142 2143 #ifndef CONFIG_USER_ONLY 2144 /* get_phys_addr() isn't present for user-mode-only targets */ 2145 2146 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 2147 bool isread) 2148 { 2149 if (ri->opc2 & 4) { 2150 /* The ATS12NSO* operations must trap to EL3 if executed in 2151 * Secure EL1 (which can only happen if EL3 is AArch64). 2152 * They are simply UNDEF if executed from NS EL1. 2153 * They function normally from EL2 or EL3. 2154 */ 2155 if (arm_current_el(env) == 1) { 2156 if (arm_is_secure_below_el3(env)) { 2157 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 2158 } 2159 return CP_ACCESS_TRAP_UNCATEGORIZED; 2160 } 2161 } 2162 return CP_ACCESS_OK; 2163 } 2164 2165 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 2166 MMUAccessType access_type, ARMMMUIdx mmu_idx) 2167 { 2168 hwaddr phys_addr; 2169 target_ulong page_size; 2170 int prot; 2171 bool ret; 2172 uint64_t par64; 2173 bool format64 = false; 2174 MemTxAttrs attrs = {}; 2175 ARMMMUFaultInfo fi = {}; 2176 ARMCacheAttrs cacheattrs = {}; 2177 2178 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 2179 &prot, &page_size, &fi, &cacheattrs); 2180 2181 if (is_a64(env)) { 2182 format64 = true; 2183 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 2184 /* 2185 * ATS1Cxx: 2186 * * TTBCR.EAE determines whether the result is returned using the 2187 * 32-bit or the 64-bit PAR format 2188 * * Instructions executed in Hyp mode always use the 64bit format 2189 * 2190 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 2191 * * The Non-secure TTBCR.EAE bit is set to 1 2192 * * The implementation includes EL2, and the value of HCR.VM is 1 2193 * 2194 * ATS1Hx always uses the 64bit format (not supported yet). 2195 */ 2196 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 2197 2198 if (arm_feature(env, ARM_FEATURE_EL2)) { 2199 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 2200 format64 |= env->cp15.hcr_el2 & HCR_VM; 2201 } else { 2202 format64 |= arm_current_el(env) == 2; 2203 } 2204 } 2205 } 2206 2207 if (format64) { 2208 /* Create a 64-bit PAR */ 2209 par64 = (1 << 11); /* LPAE bit always set */ 2210 if (!ret) { 2211 par64 |= phys_addr & ~0xfffULL; 2212 if (!attrs.secure) { 2213 par64 |= (1 << 9); /* NS */ 2214 } 2215 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 2216 par64 |= cacheattrs.shareability << 7; /* SH */ 2217 } else { 2218 uint32_t fsr = arm_fi_to_lfsc(&fi); 2219 2220 par64 |= 1; /* F */ 2221 par64 |= (fsr & 0x3f) << 1; /* FS */ 2222 /* Note that S2WLK and FSTAGE are always zero, because we don't 2223 * implement virtualization and therefore there can't be a stage 2 2224 * fault. 2225 */ 2226 } 2227 } else { 2228 /* fsr is a DFSR/IFSR value for the short descriptor 2229 * translation table format (with WnR always clear). 2230 * Convert it to a 32-bit PAR. 2231 */ 2232 if (!ret) { 2233 /* We do not set any attribute bits in the PAR */ 2234 if (page_size == (1 << 24) 2235 && arm_feature(env, ARM_FEATURE_V7)) { 2236 par64 = (phys_addr & 0xff000000) | (1 << 1); 2237 } else { 2238 par64 = phys_addr & 0xfffff000; 2239 } 2240 if (!attrs.secure) { 2241 par64 |= (1 << 9); /* NS */ 2242 } 2243 } else { 2244 uint32_t fsr = arm_fi_to_sfsc(&fi); 2245 2246 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 2247 ((fsr & 0xf) << 1) | 1; 2248 } 2249 } 2250 return par64; 2251 } 2252 2253 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2254 { 2255 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2256 uint64_t par64; 2257 ARMMMUIdx mmu_idx; 2258 int el = arm_current_el(env); 2259 bool secure = arm_is_secure_below_el3(env); 2260 2261 switch (ri->opc2 & 6) { 2262 case 0: 2263 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */ 2264 switch (el) { 2265 case 3: 2266 mmu_idx = ARMMMUIdx_S1E3; 2267 break; 2268 case 2: 2269 mmu_idx = ARMMMUIdx_S1NSE1; 2270 break; 2271 case 1: 2272 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2273 break; 2274 default: 2275 g_assert_not_reached(); 2276 } 2277 break; 2278 case 2: 2279 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 2280 switch (el) { 2281 case 3: 2282 mmu_idx = ARMMMUIdx_S1SE0; 2283 break; 2284 case 2: 2285 mmu_idx = ARMMMUIdx_S1NSE0; 2286 break; 2287 case 1: 2288 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2289 break; 2290 default: 2291 g_assert_not_reached(); 2292 } 2293 break; 2294 case 4: 2295 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 2296 mmu_idx = ARMMMUIdx_S12NSE1; 2297 break; 2298 case 6: 2299 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 2300 mmu_idx = ARMMMUIdx_S12NSE0; 2301 break; 2302 default: 2303 g_assert_not_reached(); 2304 } 2305 2306 par64 = do_ats_write(env, value, access_type, mmu_idx); 2307 2308 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2309 } 2310 2311 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 2312 uint64_t value) 2313 { 2314 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2315 uint64_t par64; 2316 2317 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS); 2318 2319 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2320 } 2321 2322 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 2323 bool isread) 2324 { 2325 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 2326 return CP_ACCESS_TRAP; 2327 } 2328 return CP_ACCESS_OK; 2329 } 2330 2331 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 2332 uint64_t value) 2333 { 2334 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2335 ARMMMUIdx mmu_idx; 2336 int secure = arm_is_secure_below_el3(env); 2337 2338 switch (ri->opc2 & 6) { 2339 case 0: 2340 switch (ri->opc1) { 2341 case 0: /* AT S1E1R, AT S1E1W */ 2342 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2343 break; 2344 case 4: /* AT S1E2R, AT S1E2W */ 2345 mmu_idx = ARMMMUIdx_S1E2; 2346 break; 2347 case 6: /* AT S1E3R, AT S1E3W */ 2348 mmu_idx = ARMMMUIdx_S1E3; 2349 break; 2350 default: 2351 g_assert_not_reached(); 2352 } 2353 break; 2354 case 2: /* AT S1E0R, AT S1E0W */ 2355 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2356 break; 2357 case 4: /* AT S12E1R, AT S12E1W */ 2358 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1; 2359 break; 2360 case 6: /* AT S12E0R, AT S12E0W */ 2361 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0; 2362 break; 2363 default: 2364 g_assert_not_reached(); 2365 } 2366 2367 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 2368 } 2369 #endif 2370 2371 static const ARMCPRegInfo vapa_cp_reginfo[] = { 2372 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 2373 .access = PL1_RW, .resetvalue = 0, 2374 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 2375 offsetoflow32(CPUARMState, cp15.par_ns) }, 2376 .writefn = par_write }, 2377 #ifndef CONFIG_USER_ONLY 2378 /* This underdecoding is safe because the reginfo is NO_RAW. */ 2379 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 2380 .access = PL1_W, .accessfn = ats_access, 2381 .writefn = ats_write, .type = ARM_CP_NO_RAW }, 2382 #endif 2383 REGINFO_SENTINEL 2384 }; 2385 2386 /* Return basic MPU access permission bits. */ 2387 static uint32_t simple_mpu_ap_bits(uint32_t val) 2388 { 2389 uint32_t ret; 2390 uint32_t mask; 2391 int i; 2392 ret = 0; 2393 mask = 3; 2394 for (i = 0; i < 16; i += 2) { 2395 ret |= (val >> i) & mask; 2396 mask <<= 2; 2397 } 2398 return ret; 2399 } 2400 2401 /* Pad basic MPU access permission bits to extended format. */ 2402 static uint32_t extended_mpu_ap_bits(uint32_t val) 2403 { 2404 uint32_t ret; 2405 uint32_t mask; 2406 int i; 2407 ret = 0; 2408 mask = 3; 2409 for (i = 0; i < 16; i += 2) { 2410 ret |= (val & mask) << i; 2411 mask <<= 2; 2412 } 2413 return ret; 2414 } 2415 2416 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2417 uint64_t value) 2418 { 2419 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 2420 } 2421 2422 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2423 { 2424 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 2425 } 2426 2427 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2428 uint64_t value) 2429 { 2430 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 2431 } 2432 2433 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2434 { 2435 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 2436 } 2437 2438 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 2439 { 2440 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2441 2442 if (!u32p) { 2443 return 0; 2444 } 2445 2446 u32p += env->pmsav7.rnr[M_REG_NS]; 2447 return *u32p; 2448 } 2449 2450 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 2451 uint64_t value) 2452 { 2453 ARMCPU *cpu = arm_env_get_cpu(env); 2454 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2455 2456 if (!u32p) { 2457 return; 2458 } 2459 2460 u32p += env->pmsav7.rnr[M_REG_NS]; 2461 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 2462 *u32p = value; 2463 } 2464 2465 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2466 uint64_t value) 2467 { 2468 ARMCPU *cpu = arm_env_get_cpu(env); 2469 uint32_t nrgs = cpu->pmsav7_dregion; 2470 2471 if (value >= nrgs) { 2472 qemu_log_mask(LOG_GUEST_ERROR, 2473 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 2474 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 2475 return; 2476 } 2477 2478 raw_write(env, ri, value); 2479 } 2480 2481 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 2482 /* Reset for all these registers is handled in arm_cpu_reset(), 2483 * because the PMSAv7 is also used by M-profile CPUs, which do 2484 * not register cpregs but still need the state to be reset. 2485 */ 2486 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 2487 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2488 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 2489 .readfn = pmsav7_read, .writefn = pmsav7_write, 2490 .resetfn = arm_cp_reset_ignore }, 2491 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 2492 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2493 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 2494 .readfn = pmsav7_read, .writefn = pmsav7_write, 2495 .resetfn = arm_cp_reset_ignore }, 2496 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 2497 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2498 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 2499 .readfn = pmsav7_read, .writefn = pmsav7_write, 2500 .resetfn = arm_cp_reset_ignore }, 2501 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 2502 .access = PL1_RW, 2503 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 2504 .writefn = pmsav7_rgnr_write, 2505 .resetfn = arm_cp_reset_ignore }, 2506 REGINFO_SENTINEL 2507 }; 2508 2509 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 2510 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2511 .access = PL1_RW, .type = ARM_CP_ALIAS, 2512 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2513 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 2514 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2515 .access = PL1_RW, .type = ARM_CP_ALIAS, 2516 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2517 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 2518 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 2519 .access = PL1_RW, 2520 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2521 .resetvalue = 0, }, 2522 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 2523 .access = PL1_RW, 2524 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2525 .resetvalue = 0, }, 2526 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 2527 .access = PL1_RW, 2528 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 2529 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 2530 .access = PL1_RW, 2531 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 2532 /* Protection region base and size registers */ 2533 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 2534 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2535 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 2536 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 2537 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2538 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 2539 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 2540 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2541 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 2542 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 2543 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2544 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 2545 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 2546 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2547 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 2548 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 2549 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2550 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 2551 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 2552 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2553 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 2554 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 2555 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2556 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 2557 REGINFO_SENTINEL 2558 }; 2559 2560 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 2561 uint64_t value) 2562 { 2563 TCR *tcr = raw_ptr(env, ri); 2564 int maskshift = extract32(value, 0, 3); 2565 2566 if (!arm_feature(env, ARM_FEATURE_V8)) { 2567 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 2568 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 2569 * using Long-desciptor translation table format */ 2570 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 2571 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 2572 /* In an implementation that includes the Security Extensions 2573 * TTBCR has additional fields PD0 [4] and PD1 [5] for 2574 * Short-descriptor translation table format. 2575 */ 2576 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 2577 } else { 2578 value &= TTBCR_N; 2579 } 2580 } 2581 2582 /* Update the masks corresponding to the TCR bank being written 2583 * Note that we always calculate mask and base_mask, but 2584 * they are only used for short-descriptor tables (ie if EAE is 0); 2585 * for long-descriptor tables the TCR fields are used differently 2586 * and the mask and base_mask values are meaningless. 2587 */ 2588 tcr->raw_tcr = value; 2589 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 2590 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 2591 } 2592 2593 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2594 uint64_t value) 2595 { 2596 ARMCPU *cpu = arm_env_get_cpu(env); 2597 2598 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2599 /* With LPAE the TTBCR could result in a change of ASID 2600 * via the TTBCR.A1 bit, so do a TLB flush. 2601 */ 2602 tlb_flush(CPU(cpu)); 2603 } 2604 vmsa_ttbcr_raw_write(env, ri, value); 2605 } 2606 2607 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2608 { 2609 TCR *tcr = raw_ptr(env, ri); 2610 2611 /* Reset both the TCR as well as the masks corresponding to the bank of 2612 * the TCR being reset. 2613 */ 2614 tcr->raw_tcr = 0; 2615 tcr->mask = 0; 2616 tcr->base_mask = 0xffffc000u; 2617 } 2618 2619 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 2620 uint64_t value) 2621 { 2622 ARMCPU *cpu = arm_env_get_cpu(env); 2623 TCR *tcr = raw_ptr(env, ri); 2624 2625 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 2626 tlb_flush(CPU(cpu)); 2627 tcr->raw_tcr = value; 2628 } 2629 2630 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2631 uint64_t value) 2632 { 2633 /* 64 bit accesses to the TTBRs can change the ASID and so we 2634 * must flush the TLB. 2635 */ 2636 if (cpreg_field_is_64bit(ri)) { 2637 ARMCPU *cpu = arm_env_get_cpu(env); 2638 2639 tlb_flush(CPU(cpu)); 2640 } 2641 raw_write(env, ri, value); 2642 } 2643 2644 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2645 uint64_t value) 2646 { 2647 ARMCPU *cpu = arm_env_get_cpu(env); 2648 CPUState *cs = CPU(cpu); 2649 2650 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ 2651 if (raw_read(env, ri) != value) { 2652 tlb_flush_by_mmuidx(cs, 2653 ARMMMUIdxBit_S12NSE1 | 2654 ARMMMUIdxBit_S12NSE0 | 2655 ARMMMUIdxBit_S2NS); 2656 raw_write(env, ri, value); 2657 } 2658 } 2659 2660 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 2661 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2662 .access = PL1_RW, .type = ARM_CP_ALIAS, 2663 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 2664 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 2665 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2666 .access = PL1_RW, .resetvalue = 0, 2667 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 2668 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 2669 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 2670 .access = PL1_RW, .resetvalue = 0, 2671 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 2672 offsetof(CPUARMState, cp15.dfar_ns) } }, 2673 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 2674 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 2675 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 2676 .resetvalue = 0, }, 2677 REGINFO_SENTINEL 2678 }; 2679 2680 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 2681 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 2682 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 2683 .access = PL1_RW, 2684 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 2685 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 2686 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 2687 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2688 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 2689 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 2690 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 2691 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 2692 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2693 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 2694 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 2695 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 2696 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2697 .access = PL1_RW, .writefn = vmsa_tcr_el1_write, 2698 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 2699 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 2700 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2701 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 2702 .raw_writefn = vmsa_ttbcr_raw_write, 2703 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 2704 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 2705 REGINFO_SENTINEL 2706 }; 2707 2708 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 2709 uint64_t value) 2710 { 2711 env->cp15.c15_ticonfig = value & 0xe7; 2712 /* The OS_TYPE bit in this register changes the reported CPUID! */ 2713 env->cp15.c0_cpuid = (value & (1 << 5)) ? 2714 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 2715 } 2716 2717 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 2718 uint64_t value) 2719 { 2720 env->cp15.c15_threadid = value & 0xffff; 2721 } 2722 2723 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 2724 uint64_t value) 2725 { 2726 /* Wait-for-interrupt (deprecated) */ 2727 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); 2728 } 2729 2730 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 2731 uint64_t value) 2732 { 2733 /* On OMAP there are registers indicating the max/min index of dcache lines 2734 * containing a dirty line; cache flush operations have to reset these. 2735 */ 2736 env->cp15.c15_i_max = 0x000; 2737 env->cp15.c15_i_min = 0xff0; 2738 } 2739 2740 static const ARMCPRegInfo omap_cp_reginfo[] = { 2741 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 2742 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 2743 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 2744 .resetvalue = 0, }, 2745 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 2746 .access = PL1_RW, .type = ARM_CP_NOP }, 2747 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 2748 .access = PL1_RW, 2749 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 2750 .writefn = omap_ticonfig_write }, 2751 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 2752 .access = PL1_RW, 2753 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 2754 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 2755 .access = PL1_RW, .resetvalue = 0xff0, 2756 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 2757 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 2758 .access = PL1_RW, 2759 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 2760 .writefn = omap_threadid_write }, 2761 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 2762 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2763 .type = ARM_CP_NO_RAW, 2764 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 2765 /* TODO: Peripheral port remap register: 2766 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 2767 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 2768 * when MMU is off. 2769 */ 2770 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 2771 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 2772 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 2773 .writefn = omap_cachemaint_write }, 2774 { .name = "C9", .cp = 15, .crn = 9, 2775 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 2776 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 2777 REGINFO_SENTINEL 2778 }; 2779 2780 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 2781 uint64_t value) 2782 { 2783 env->cp15.c15_cpar = value & 0x3fff; 2784 } 2785 2786 static const ARMCPRegInfo xscale_cp_reginfo[] = { 2787 { .name = "XSCALE_CPAR", 2788 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2789 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 2790 .writefn = xscale_cpar_write, }, 2791 { .name = "XSCALE_AUXCR", 2792 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 2793 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 2794 .resetvalue = 0, }, 2795 /* XScale specific cache-lockdown: since we have no cache we NOP these 2796 * and hope the guest does not really rely on cache behaviour. 2797 */ 2798 { .name = "XSCALE_LOCK_ICACHE_LINE", 2799 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 2800 .access = PL1_W, .type = ARM_CP_NOP }, 2801 { .name = "XSCALE_UNLOCK_ICACHE", 2802 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 2803 .access = PL1_W, .type = ARM_CP_NOP }, 2804 { .name = "XSCALE_DCACHE_LOCK", 2805 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 2806 .access = PL1_RW, .type = ARM_CP_NOP }, 2807 { .name = "XSCALE_UNLOCK_DCACHE", 2808 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 2809 .access = PL1_W, .type = ARM_CP_NOP }, 2810 REGINFO_SENTINEL 2811 }; 2812 2813 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 2814 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 2815 * implementation of this implementation-defined space. 2816 * Ideally this should eventually disappear in favour of actually 2817 * implementing the correct behaviour for all cores. 2818 */ 2819 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 2820 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2821 .access = PL1_RW, 2822 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 2823 .resetvalue = 0 }, 2824 REGINFO_SENTINEL 2825 }; 2826 2827 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 2828 /* Cache status: RAZ because we have no cache so it's always clean */ 2829 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 2830 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2831 .resetvalue = 0 }, 2832 REGINFO_SENTINEL 2833 }; 2834 2835 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 2836 /* We never have a a block transfer operation in progress */ 2837 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 2838 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2839 .resetvalue = 0 }, 2840 /* The cache ops themselves: these all NOP for QEMU */ 2841 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 2842 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2843 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 2844 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2845 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 2846 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2847 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 2848 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2849 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 2850 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2851 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 2852 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2853 REGINFO_SENTINEL 2854 }; 2855 2856 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 2857 /* The cache test-and-clean instructions always return (1 << 30) 2858 * to indicate that there are no dirty cache lines. 2859 */ 2860 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 2861 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2862 .resetvalue = (1 << 30) }, 2863 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 2864 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2865 .resetvalue = (1 << 30) }, 2866 REGINFO_SENTINEL 2867 }; 2868 2869 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 2870 /* Ignore ReadBuffer accesses */ 2871 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 2872 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2873 .access = PL1_RW, .resetvalue = 0, 2874 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 2875 REGINFO_SENTINEL 2876 }; 2877 2878 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2879 { 2880 ARMCPU *cpu = arm_env_get_cpu(env); 2881 unsigned int cur_el = arm_current_el(env); 2882 bool secure = arm_is_secure(env); 2883 2884 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2885 return env->cp15.vpidr_el2; 2886 } 2887 return raw_read(env, ri); 2888 } 2889 2890 static uint64_t mpidr_read_val(CPUARMState *env) 2891 { 2892 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env)); 2893 uint64_t mpidr = cpu->mp_affinity; 2894 2895 if (arm_feature(env, ARM_FEATURE_V7MP)) { 2896 mpidr |= (1U << 31); 2897 /* Cores which are uniprocessor (non-coherent) 2898 * but still implement the MP extensions set 2899 * bit 30. (For instance, Cortex-R5). 2900 */ 2901 if (cpu->mp_is_up) { 2902 mpidr |= (1u << 30); 2903 } 2904 } 2905 return mpidr; 2906 } 2907 2908 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2909 { 2910 unsigned int cur_el = arm_current_el(env); 2911 bool secure = arm_is_secure(env); 2912 2913 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2914 return env->cp15.vmpidr_el2; 2915 } 2916 return mpidr_read_val(env); 2917 } 2918 2919 static const ARMCPRegInfo mpidr_cp_reginfo[] = { 2920 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH, 2921 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 2922 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 2923 REGINFO_SENTINEL 2924 }; 2925 2926 static const ARMCPRegInfo lpae_cp_reginfo[] = { 2927 /* NOP AMAIR0/1 */ 2928 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 2929 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 2930 .access = PL1_RW, .type = ARM_CP_CONST, 2931 .resetvalue = 0 }, 2932 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 2933 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 2934 .access = PL1_RW, .type = ARM_CP_CONST, 2935 .resetvalue = 0 }, 2936 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 2937 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 2938 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 2939 offsetof(CPUARMState, cp15.par_ns)} }, 2940 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 2941 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 2942 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 2943 offsetof(CPUARMState, cp15.ttbr0_ns) }, 2944 .writefn = vmsa_ttbr_write, }, 2945 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 2946 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 2947 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 2948 offsetof(CPUARMState, cp15.ttbr1_ns) }, 2949 .writefn = vmsa_ttbr_write, }, 2950 REGINFO_SENTINEL 2951 }; 2952 2953 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2954 { 2955 return vfp_get_fpcr(env); 2956 } 2957 2958 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2959 uint64_t value) 2960 { 2961 vfp_set_fpcr(env, value); 2962 } 2963 2964 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2965 { 2966 return vfp_get_fpsr(env); 2967 } 2968 2969 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2970 uint64_t value) 2971 { 2972 vfp_set_fpsr(env, value); 2973 } 2974 2975 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 2976 bool isread) 2977 { 2978 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { 2979 return CP_ACCESS_TRAP; 2980 } 2981 return CP_ACCESS_OK; 2982 } 2983 2984 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 2985 uint64_t value) 2986 { 2987 env->daif = value & PSTATE_DAIF; 2988 } 2989 2990 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 2991 const ARMCPRegInfo *ri, 2992 bool isread) 2993 { 2994 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 2995 * SCTLR_EL1.UCI is set. 2996 */ 2997 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) { 2998 return CP_ACCESS_TRAP; 2999 } 3000 return CP_ACCESS_OK; 3001 } 3002 3003 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 3004 * Page D4-1736 (DDI0487A.b) 3005 */ 3006 3007 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3008 uint64_t value) 3009 { 3010 CPUState *cs = ENV_GET_CPU(env); 3011 3012 if (arm_is_secure_below_el3(env)) { 3013 tlb_flush_by_mmuidx(cs, 3014 ARMMMUIdxBit_S1SE1 | 3015 ARMMMUIdxBit_S1SE0); 3016 } else { 3017 tlb_flush_by_mmuidx(cs, 3018 ARMMMUIdxBit_S12NSE1 | 3019 ARMMMUIdxBit_S12NSE0); 3020 } 3021 } 3022 3023 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3024 uint64_t value) 3025 { 3026 CPUState *cs = ENV_GET_CPU(env); 3027 bool sec = arm_is_secure_below_el3(env); 3028 3029 if (sec) { 3030 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3031 ARMMMUIdxBit_S1SE1 | 3032 ARMMMUIdxBit_S1SE0); 3033 } else { 3034 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3035 ARMMMUIdxBit_S12NSE1 | 3036 ARMMMUIdxBit_S12NSE0); 3037 } 3038 } 3039 3040 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3041 uint64_t value) 3042 { 3043 /* Note that the 'ALL' scope must invalidate both stage 1 and 3044 * stage 2 translations, whereas most other scopes only invalidate 3045 * stage 1 translations. 3046 */ 3047 ARMCPU *cpu = arm_env_get_cpu(env); 3048 CPUState *cs = CPU(cpu); 3049 3050 if (arm_is_secure_below_el3(env)) { 3051 tlb_flush_by_mmuidx(cs, 3052 ARMMMUIdxBit_S1SE1 | 3053 ARMMMUIdxBit_S1SE0); 3054 } else { 3055 if (arm_feature(env, ARM_FEATURE_EL2)) { 3056 tlb_flush_by_mmuidx(cs, 3057 ARMMMUIdxBit_S12NSE1 | 3058 ARMMMUIdxBit_S12NSE0 | 3059 ARMMMUIdxBit_S2NS); 3060 } else { 3061 tlb_flush_by_mmuidx(cs, 3062 ARMMMUIdxBit_S12NSE1 | 3063 ARMMMUIdxBit_S12NSE0); 3064 } 3065 } 3066 } 3067 3068 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3069 uint64_t value) 3070 { 3071 ARMCPU *cpu = arm_env_get_cpu(env); 3072 CPUState *cs = CPU(cpu); 3073 3074 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 3075 } 3076 3077 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3078 uint64_t value) 3079 { 3080 ARMCPU *cpu = arm_env_get_cpu(env); 3081 CPUState *cs = CPU(cpu); 3082 3083 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); 3084 } 3085 3086 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3087 uint64_t value) 3088 { 3089 /* Note that the 'ALL' scope must invalidate both stage 1 and 3090 * stage 2 translations, whereas most other scopes only invalidate 3091 * stage 1 translations. 3092 */ 3093 CPUState *cs = ENV_GET_CPU(env); 3094 bool sec = arm_is_secure_below_el3(env); 3095 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); 3096 3097 if (sec) { 3098 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3099 ARMMMUIdxBit_S1SE1 | 3100 ARMMMUIdxBit_S1SE0); 3101 } else if (has_el2) { 3102 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3103 ARMMMUIdxBit_S12NSE1 | 3104 ARMMMUIdxBit_S12NSE0 | 3105 ARMMMUIdxBit_S2NS); 3106 } else { 3107 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3108 ARMMMUIdxBit_S12NSE1 | 3109 ARMMMUIdxBit_S12NSE0); 3110 } 3111 } 3112 3113 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3114 uint64_t value) 3115 { 3116 CPUState *cs = ENV_GET_CPU(env); 3117 3118 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 3119 } 3120 3121 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3122 uint64_t value) 3123 { 3124 CPUState *cs = ENV_GET_CPU(env); 3125 3126 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); 3127 } 3128 3129 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3130 uint64_t value) 3131 { 3132 /* Invalidate by VA, EL1&0 (AArch64 version). 3133 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 3134 * since we don't support flush-for-specific-ASID-only or 3135 * flush-last-level-only. 3136 */ 3137 ARMCPU *cpu = arm_env_get_cpu(env); 3138 CPUState *cs = CPU(cpu); 3139 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3140 3141 if (arm_is_secure_below_el3(env)) { 3142 tlb_flush_page_by_mmuidx(cs, pageaddr, 3143 ARMMMUIdxBit_S1SE1 | 3144 ARMMMUIdxBit_S1SE0); 3145 } else { 3146 tlb_flush_page_by_mmuidx(cs, pageaddr, 3147 ARMMMUIdxBit_S12NSE1 | 3148 ARMMMUIdxBit_S12NSE0); 3149 } 3150 } 3151 3152 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3153 uint64_t value) 3154 { 3155 /* Invalidate by VA, EL2 3156 * Currently handles both VAE2 and VALE2, since we don't support 3157 * flush-last-level-only. 3158 */ 3159 ARMCPU *cpu = arm_env_get_cpu(env); 3160 CPUState *cs = CPU(cpu); 3161 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3162 3163 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 3164 } 3165 3166 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3167 uint64_t value) 3168 { 3169 /* Invalidate by VA, EL3 3170 * Currently handles both VAE3 and VALE3, since we don't support 3171 * flush-last-level-only. 3172 */ 3173 ARMCPU *cpu = arm_env_get_cpu(env); 3174 CPUState *cs = CPU(cpu); 3175 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3176 3177 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3); 3178 } 3179 3180 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3181 uint64_t value) 3182 { 3183 ARMCPU *cpu = arm_env_get_cpu(env); 3184 CPUState *cs = CPU(cpu); 3185 bool sec = arm_is_secure_below_el3(env); 3186 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3187 3188 if (sec) { 3189 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3190 ARMMMUIdxBit_S1SE1 | 3191 ARMMMUIdxBit_S1SE0); 3192 } else { 3193 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3194 ARMMMUIdxBit_S12NSE1 | 3195 ARMMMUIdxBit_S12NSE0); 3196 } 3197 } 3198 3199 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3200 uint64_t value) 3201 { 3202 CPUState *cs = ENV_GET_CPU(env); 3203 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3204 3205 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3206 ARMMMUIdxBit_S1E2); 3207 } 3208 3209 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3210 uint64_t value) 3211 { 3212 CPUState *cs = ENV_GET_CPU(env); 3213 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3214 3215 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3216 ARMMMUIdxBit_S1E3); 3217 } 3218 3219 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3220 uint64_t value) 3221 { 3222 /* Invalidate by IPA. This has to invalidate any structures that 3223 * contain only stage 2 translation information, but does not need 3224 * to apply to structures that contain combined stage 1 and stage 2 3225 * translation information. 3226 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 3227 */ 3228 ARMCPU *cpu = arm_env_get_cpu(env); 3229 CPUState *cs = CPU(cpu); 3230 uint64_t pageaddr; 3231 3232 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3233 return; 3234 } 3235 3236 pageaddr = sextract64(value << 12, 0, 48); 3237 3238 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 3239 } 3240 3241 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3242 uint64_t value) 3243 { 3244 CPUState *cs = ENV_GET_CPU(env); 3245 uint64_t pageaddr; 3246 3247 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3248 return; 3249 } 3250 3251 pageaddr = sextract64(value << 12, 0, 48); 3252 3253 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3254 ARMMMUIdxBit_S2NS); 3255 } 3256 3257 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 3258 bool isread) 3259 { 3260 /* We don't implement EL2, so the only control on DC ZVA is the 3261 * bit in the SCTLR which can prohibit access for EL0. 3262 */ 3263 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 3264 return CP_ACCESS_TRAP; 3265 } 3266 return CP_ACCESS_OK; 3267 } 3268 3269 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 3270 { 3271 ARMCPU *cpu = arm_env_get_cpu(env); 3272 int dzp_bit = 1 << 4; 3273 3274 /* DZP indicates whether DC ZVA access is allowed */ 3275 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 3276 dzp_bit = 0; 3277 } 3278 return cpu->dcz_blocksize | dzp_bit; 3279 } 3280 3281 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 3282 bool isread) 3283 { 3284 if (!(env->pstate & PSTATE_SP)) { 3285 /* Access to SP_EL0 is undefined if it's being used as 3286 * the stack pointer. 3287 */ 3288 return CP_ACCESS_TRAP_UNCATEGORIZED; 3289 } 3290 return CP_ACCESS_OK; 3291 } 3292 3293 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 3294 { 3295 return env->pstate & PSTATE_SP; 3296 } 3297 3298 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 3299 { 3300 update_spsel(env, val); 3301 } 3302 3303 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3304 uint64_t value) 3305 { 3306 ARMCPU *cpu = arm_env_get_cpu(env); 3307 3308 if (raw_read(env, ri) == value) { 3309 /* Skip the TLB flush if nothing actually changed; Linux likes 3310 * to do a lot of pointless SCTLR writes. 3311 */ 3312 return; 3313 } 3314 3315 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 3316 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 3317 value &= ~SCTLR_M; 3318 } 3319 3320 raw_write(env, ri, value); 3321 /* ??? Lots of these bits are not implemented. */ 3322 /* This may enable/disable the MMU, so do a TLB flush. */ 3323 tlb_flush(CPU(cpu)); 3324 } 3325 3326 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 3327 bool isread) 3328 { 3329 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 3330 return CP_ACCESS_TRAP_FP_EL2; 3331 } 3332 if (env->cp15.cptr_el[3] & CPTR_TFP) { 3333 return CP_ACCESS_TRAP_FP_EL3; 3334 } 3335 return CP_ACCESS_OK; 3336 } 3337 3338 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3339 uint64_t value) 3340 { 3341 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 3342 } 3343 3344 static const ARMCPRegInfo v8_cp_reginfo[] = { 3345 /* Minimal set of EL0-visible registers. This will need to be expanded 3346 * significantly for system emulation of AArch64 CPUs. 3347 */ 3348 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 3349 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 3350 .access = PL0_RW, .type = ARM_CP_NZCV }, 3351 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 3352 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 3353 .type = ARM_CP_NO_RAW, 3354 .access = PL0_RW, .accessfn = aa64_daif_access, 3355 .fieldoffset = offsetof(CPUARMState, daif), 3356 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 3357 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 3358 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 3359 .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 3360 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 3361 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 3362 .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 3363 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 3364 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 3365 .access = PL0_R, .type = ARM_CP_NO_RAW, 3366 .readfn = aa64_dczid_read }, 3367 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 3368 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 3369 .access = PL0_W, .type = ARM_CP_DC_ZVA, 3370 #ifndef CONFIG_USER_ONLY 3371 /* Avoid overhead of an access check that always passes in user-mode */ 3372 .accessfn = aa64_zva_access, 3373 #endif 3374 }, 3375 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 3376 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 3377 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 3378 /* Cache ops: all NOPs since we don't emulate caches */ 3379 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 3380 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3381 .access = PL1_W, .type = ARM_CP_NOP }, 3382 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 3383 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3384 .access = PL1_W, .type = ARM_CP_NOP }, 3385 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 3386 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 3387 .access = PL0_W, .type = ARM_CP_NOP, 3388 .accessfn = aa64_cacheop_access }, 3389 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 3390 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3391 .access = PL1_W, .type = ARM_CP_NOP }, 3392 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 3393 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3394 .access = PL1_W, .type = ARM_CP_NOP }, 3395 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 3396 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 3397 .access = PL0_W, .type = ARM_CP_NOP, 3398 .accessfn = aa64_cacheop_access }, 3399 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 3400 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3401 .access = PL1_W, .type = ARM_CP_NOP }, 3402 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 3403 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 3404 .access = PL0_W, .type = ARM_CP_NOP, 3405 .accessfn = aa64_cacheop_access }, 3406 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 3407 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 3408 .access = PL0_W, .type = ARM_CP_NOP, 3409 .accessfn = aa64_cacheop_access }, 3410 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 3411 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3412 .access = PL1_W, .type = ARM_CP_NOP }, 3413 /* TLBI operations */ 3414 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 3415 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 3416 .access = PL1_W, .type = ARM_CP_NO_RAW, 3417 .writefn = tlbi_aa64_vmalle1is_write }, 3418 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 3419 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 3420 .access = PL1_W, .type = ARM_CP_NO_RAW, 3421 .writefn = tlbi_aa64_vae1is_write }, 3422 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 3423 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 3424 .access = PL1_W, .type = ARM_CP_NO_RAW, 3425 .writefn = tlbi_aa64_vmalle1is_write }, 3426 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 3427 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 3428 .access = PL1_W, .type = ARM_CP_NO_RAW, 3429 .writefn = tlbi_aa64_vae1is_write }, 3430 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 3431 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3432 .access = PL1_W, .type = ARM_CP_NO_RAW, 3433 .writefn = tlbi_aa64_vae1is_write }, 3434 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 3435 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3436 .access = PL1_W, .type = ARM_CP_NO_RAW, 3437 .writefn = tlbi_aa64_vae1is_write }, 3438 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 3439 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 3440 .access = PL1_W, .type = ARM_CP_NO_RAW, 3441 .writefn = tlbi_aa64_vmalle1_write }, 3442 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 3443 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 3444 .access = PL1_W, .type = ARM_CP_NO_RAW, 3445 .writefn = tlbi_aa64_vae1_write }, 3446 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 3447 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 3448 .access = PL1_W, .type = ARM_CP_NO_RAW, 3449 .writefn = tlbi_aa64_vmalle1_write }, 3450 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 3451 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 3452 .access = PL1_W, .type = ARM_CP_NO_RAW, 3453 .writefn = tlbi_aa64_vae1_write }, 3454 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 3455 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3456 .access = PL1_W, .type = ARM_CP_NO_RAW, 3457 .writefn = tlbi_aa64_vae1_write }, 3458 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 3459 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3460 .access = PL1_W, .type = ARM_CP_NO_RAW, 3461 .writefn = tlbi_aa64_vae1_write }, 3462 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 3463 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3464 .access = PL2_W, .type = ARM_CP_NO_RAW, 3465 .writefn = tlbi_aa64_ipas2e1is_write }, 3466 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 3467 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3468 .access = PL2_W, .type = ARM_CP_NO_RAW, 3469 .writefn = tlbi_aa64_ipas2e1is_write }, 3470 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 3471 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 3472 .access = PL2_W, .type = ARM_CP_NO_RAW, 3473 .writefn = tlbi_aa64_alle1is_write }, 3474 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 3475 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 3476 .access = PL2_W, .type = ARM_CP_NO_RAW, 3477 .writefn = tlbi_aa64_alle1is_write }, 3478 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 3479 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3480 .access = PL2_W, .type = ARM_CP_NO_RAW, 3481 .writefn = tlbi_aa64_ipas2e1_write }, 3482 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 3483 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3484 .access = PL2_W, .type = ARM_CP_NO_RAW, 3485 .writefn = tlbi_aa64_ipas2e1_write }, 3486 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 3487 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 3488 .access = PL2_W, .type = ARM_CP_NO_RAW, 3489 .writefn = tlbi_aa64_alle1_write }, 3490 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 3491 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 3492 .access = PL2_W, .type = ARM_CP_NO_RAW, 3493 .writefn = tlbi_aa64_alle1is_write }, 3494 #ifndef CONFIG_USER_ONLY 3495 /* 64 bit address translation operations */ 3496 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 3497 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 3498 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3499 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 3500 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 3501 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3502 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 3503 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 3504 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3505 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 3506 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 3507 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3508 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 3509 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 3510 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3511 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 3512 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 3513 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3514 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 3515 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 3516 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3517 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 3518 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 3519 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3520 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 3521 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 3522 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 3523 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3524 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 3525 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 3526 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3527 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 3528 .type = ARM_CP_ALIAS, 3529 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 3530 .access = PL1_RW, .resetvalue = 0, 3531 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 3532 .writefn = par_write }, 3533 #endif 3534 /* TLB invalidate last level of translation table walk */ 3535 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3536 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 3537 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3538 .type = ARM_CP_NO_RAW, .access = PL1_W, 3539 .writefn = tlbimvaa_is_write }, 3540 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3541 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 3542 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3543 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 3544 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 3545 .type = ARM_CP_NO_RAW, .access = PL2_W, 3546 .writefn = tlbimva_hyp_write }, 3547 { .name = "TLBIMVALHIS", 3548 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 3549 .type = ARM_CP_NO_RAW, .access = PL2_W, 3550 .writefn = tlbimva_hyp_is_write }, 3551 { .name = "TLBIIPAS2", 3552 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3553 .type = ARM_CP_NO_RAW, .access = PL2_W, 3554 .writefn = tlbiipas2_write }, 3555 { .name = "TLBIIPAS2IS", 3556 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3557 .type = ARM_CP_NO_RAW, .access = PL2_W, 3558 .writefn = tlbiipas2_is_write }, 3559 { .name = "TLBIIPAS2L", 3560 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3561 .type = ARM_CP_NO_RAW, .access = PL2_W, 3562 .writefn = tlbiipas2_write }, 3563 { .name = "TLBIIPAS2LIS", 3564 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3565 .type = ARM_CP_NO_RAW, .access = PL2_W, 3566 .writefn = tlbiipas2_is_write }, 3567 /* 32 bit cache operations */ 3568 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3569 .type = ARM_CP_NOP, .access = PL1_W }, 3570 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 3571 .type = ARM_CP_NOP, .access = PL1_W }, 3572 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3573 .type = ARM_CP_NOP, .access = PL1_W }, 3574 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 3575 .type = ARM_CP_NOP, .access = PL1_W }, 3576 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 3577 .type = ARM_CP_NOP, .access = PL1_W }, 3578 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 3579 .type = ARM_CP_NOP, .access = PL1_W }, 3580 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3581 .type = ARM_CP_NOP, .access = PL1_W }, 3582 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3583 .type = ARM_CP_NOP, .access = PL1_W }, 3584 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 3585 .type = ARM_CP_NOP, .access = PL1_W }, 3586 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3587 .type = ARM_CP_NOP, .access = PL1_W }, 3588 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 3589 .type = ARM_CP_NOP, .access = PL1_W }, 3590 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 3591 .type = ARM_CP_NOP, .access = PL1_W }, 3592 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3593 .type = ARM_CP_NOP, .access = PL1_W }, 3594 /* MMU Domain access control / MPU write buffer control */ 3595 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 3596 .access = PL1_RW, .resetvalue = 0, 3597 .writefn = dacr_write, .raw_writefn = raw_write, 3598 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 3599 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 3600 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 3601 .type = ARM_CP_ALIAS, 3602 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 3603 .access = PL1_RW, 3604 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 3605 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 3606 .type = ARM_CP_ALIAS, 3607 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 3608 .access = PL1_RW, 3609 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 3610 /* We rely on the access checks not allowing the guest to write to the 3611 * state field when SPSel indicates that it's being used as the stack 3612 * pointer. 3613 */ 3614 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 3615 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 3616 .access = PL1_RW, .accessfn = sp_el0_access, 3617 .type = ARM_CP_ALIAS, 3618 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 3619 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 3620 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 3621 .access = PL2_RW, .type = ARM_CP_ALIAS, 3622 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 3623 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 3624 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 3625 .type = ARM_CP_NO_RAW, 3626 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 3627 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 3628 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 3629 .type = ARM_CP_ALIAS, 3630 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 3631 .access = PL2_RW, .accessfn = fpexc32_access }, 3632 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 3633 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 3634 .access = PL2_RW, .resetvalue = 0, 3635 .writefn = dacr_write, .raw_writefn = raw_write, 3636 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 3637 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 3638 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 3639 .access = PL2_RW, .resetvalue = 0, 3640 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 3641 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 3642 .type = ARM_CP_ALIAS, 3643 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 3644 .access = PL2_RW, 3645 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 3646 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 3647 .type = ARM_CP_ALIAS, 3648 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 3649 .access = PL2_RW, 3650 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 3651 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 3652 .type = ARM_CP_ALIAS, 3653 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 3654 .access = PL2_RW, 3655 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 3656 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 3657 .type = ARM_CP_ALIAS, 3658 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 3659 .access = PL2_RW, 3660 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 3661 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 3662 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 3663 .resetvalue = 0, 3664 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 3665 { .name = "SDCR", .type = ARM_CP_ALIAS, 3666 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 3667 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 3668 .writefn = sdcr_write, 3669 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 3670 REGINFO_SENTINEL 3671 }; 3672 3673 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 3674 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 3675 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64, 3676 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3677 .access = PL2_RW, 3678 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3679 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3680 .type = ARM_CP_NO_RAW, 3681 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3682 .access = PL2_RW, 3683 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3684 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3685 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3686 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3687 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3688 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3689 .access = PL2_RW, .type = ARM_CP_CONST, 3690 .resetvalue = 0 }, 3691 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3692 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3693 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3694 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3695 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3696 .access = PL2_RW, .type = ARM_CP_CONST, 3697 .resetvalue = 0 }, 3698 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3699 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3700 .access = PL2_RW, .type = ARM_CP_CONST, 3701 .resetvalue = 0 }, 3702 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3703 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3704 .access = PL2_RW, .type = ARM_CP_CONST, 3705 .resetvalue = 0 }, 3706 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3707 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3708 .access = PL2_RW, .type = ARM_CP_CONST, 3709 .resetvalue = 0 }, 3710 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3711 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3712 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3713 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 3714 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3715 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3716 .type = ARM_CP_CONST, .resetvalue = 0 }, 3717 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3718 .cp = 15, .opc1 = 6, .crm = 2, 3719 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3720 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 3721 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3722 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3723 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3724 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3725 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3726 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3727 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3728 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3729 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3730 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3731 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3732 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3733 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3734 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3735 .resetvalue = 0 }, 3736 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 3737 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 3738 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3739 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 3740 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 3741 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3742 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 3743 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3744 .resetvalue = 0 }, 3745 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 3746 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 3747 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3748 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 3749 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3750 .resetvalue = 0 }, 3751 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 3752 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 3753 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3754 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 3755 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 3756 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3757 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 3758 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 3759 .access = PL2_RW, .accessfn = access_tda, 3760 .type = ARM_CP_CONST, .resetvalue = 0 }, 3761 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 3762 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 3763 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3764 .type = ARM_CP_CONST, .resetvalue = 0 }, 3765 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 3766 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 3767 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3768 REGINFO_SENTINEL 3769 }; 3770 3771 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3772 { 3773 ARMCPU *cpu = arm_env_get_cpu(env); 3774 uint64_t valid_mask = HCR_MASK; 3775 3776 if (arm_feature(env, ARM_FEATURE_EL3)) { 3777 valid_mask &= ~HCR_HCD; 3778 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 3779 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 3780 * However, if we're using the SMC PSCI conduit then QEMU is 3781 * effectively acting like EL3 firmware and so the guest at 3782 * EL2 should retain the ability to prevent EL1 from being 3783 * able to make SMC calls into the ersatz firmware, so in 3784 * that case HCR.TSC should be read/write. 3785 */ 3786 valid_mask &= ~HCR_TSC; 3787 } 3788 3789 /* Clear RES0 bits. */ 3790 value &= valid_mask; 3791 3792 /* These bits change the MMU setup: 3793 * HCR_VM enables stage 2 translation 3794 * HCR_PTW forbids certain page-table setups 3795 * HCR_DC Disables stage1 and enables stage2 translation 3796 */ 3797 if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 3798 tlb_flush(CPU(cpu)); 3799 } 3800 raw_write(env, ri, value); 3801 } 3802 3803 static const ARMCPRegInfo el2_cp_reginfo[] = { 3804 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3805 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3806 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 3807 .writefn = hcr_write }, 3808 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 3809 .type = ARM_CP_ALIAS, 3810 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 3811 .access = PL2_RW, 3812 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 3813 { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64, 3814 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 3815 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 3816 { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64, 3817 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 3818 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 3819 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 3820 .type = ARM_CP_ALIAS, 3821 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 3822 .access = PL2_RW, 3823 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 3824 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64, 3825 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3826 .access = PL2_RW, .writefn = vbar_write, 3827 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 3828 .resetvalue = 0 }, 3829 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 3830 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 3831 .access = PL3_RW, .type = ARM_CP_ALIAS, 3832 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 3833 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3834 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3835 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 3836 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) }, 3837 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3838 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3839 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 3840 .resetvalue = 0 }, 3841 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3842 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3843 .access = PL2_RW, .type = ARM_CP_ALIAS, 3844 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 3845 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3846 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3847 .access = PL2_RW, .type = ARM_CP_CONST, 3848 .resetvalue = 0 }, 3849 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 3850 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3851 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3852 .access = PL2_RW, .type = ARM_CP_CONST, 3853 .resetvalue = 0 }, 3854 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3855 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3856 .access = PL2_RW, .type = ARM_CP_CONST, 3857 .resetvalue = 0 }, 3858 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3859 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3860 .access = PL2_RW, .type = ARM_CP_CONST, 3861 .resetvalue = 0 }, 3862 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3863 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3864 .access = PL2_RW, 3865 /* no .writefn needed as this can't cause an ASID change; 3866 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3867 */ 3868 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 3869 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 3870 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3871 .type = ARM_CP_ALIAS, 3872 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3873 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 3874 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 3875 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3876 .access = PL2_RW, 3877 /* no .writefn needed as this can't cause an ASID change; 3878 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3879 */ 3880 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 3881 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3882 .cp = 15, .opc1 = 6, .crm = 2, 3883 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3884 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3885 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 3886 .writefn = vttbr_write }, 3887 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3888 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3889 .access = PL2_RW, .writefn = vttbr_write, 3890 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 3891 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3892 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3893 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 3894 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 3895 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3896 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3897 .access = PL2_RW, .resetvalue = 0, 3898 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 3899 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3900 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3901 .access = PL2_RW, .resetvalue = 0, 3902 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 3903 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3904 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3905 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 3906 { .name = "TLBIALLNSNH", 3907 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 3908 .type = ARM_CP_NO_RAW, .access = PL2_W, 3909 .writefn = tlbiall_nsnh_write }, 3910 { .name = "TLBIALLNSNHIS", 3911 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 3912 .type = ARM_CP_NO_RAW, .access = PL2_W, 3913 .writefn = tlbiall_nsnh_is_write }, 3914 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 3915 .type = ARM_CP_NO_RAW, .access = PL2_W, 3916 .writefn = tlbiall_hyp_write }, 3917 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 3918 .type = ARM_CP_NO_RAW, .access = PL2_W, 3919 .writefn = tlbiall_hyp_is_write }, 3920 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 3921 .type = ARM_CP_NO_RAW, .access = PL2_W, 3922 .writefn = tlbimva_hyp_write }, 3923 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 3924 .type = ARM_CP_NO_RAW, .access = PL2_W, 3925 .writefn = tlbimva_hyp_is_write }, 3926 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 3927 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 3928 .type = ARM_CP_NO_RAW, .access = PL2_W, 3929 .writefn = tlbi_aa64_alle2_write }, 3930 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 3931 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 3932 .type = ARM_CP_NO_RAW, .access = PL2_W, 3933 .writefn = tlbi_aa64_vae2_write }, 3934 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 3935 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 3936 .access = PL2_W, .type = ARM_CP_NO_RAW, 3937 .writefn = tlbi_aa64_vae2_write }, 3938 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 3939 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 3940 .access = PL2_W, .type = ARM_CP_NO_RAW, 3941 .writefn = tlbi_aa64_alle2is_write }, 3942 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 3943 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 3944 .type = ARM_CP_NO_RAW, .access = PL2_W, 3945 .writefn = tlbi_aa64_vae2is_write }, 3946 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 3947 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 3948 .access = PL2_W, .type = ARM_CP_NO_RAW, 3949 .writefn = tlbi_aa64_vae2is_write }, 3950 #ifndef CONFIG_USER_ONLY 3951 /* Unlike the other EL2-related AT operations, these must 3952 * UNDEF from EL3 if EL2 is not implemented, which is why we 3953 * define them here rather than with the rest of the AT ops. 3954 */ 3955 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 3956 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 3957 .access = PL2_W, .accessfn = at_s1e2_access, 3958 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3959 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 3960 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 3961 .access = PL2_W, .accessfn = at_s1e2_access, 3962 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3963 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 3964 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 3965 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 3966 * to behave as if SCR.NS was 1. 3967 */ 3968 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 3969 .access = PL2_W, 3970 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 3971 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 3972 .access = PL2_W, 3973 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 3974 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 3975 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 3976 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 3977 * reset values as IMPDEF. We choose to reset to 3 to comply with 3978 * both ARMv7 and ARMv8. 3979 */ 3980 .access = PL2_RW, .resetvalue = 3, 3981 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 3982 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 3983 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 3984 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 3985 .writefn = gt_cntvoff_write, 3986 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 3987 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 3988 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 3989 .writefn = gt_cntvoff_write, 3990 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 3991 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 3992 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 3993 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 3994 .type = ARM_CP_IO, .access = PL2_RW, 3995 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 3996 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 3997 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 3998 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 3999 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4000 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4001 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4002 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 4003 .resetfn = gt_hyp_timer_reset, 4004 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 4005 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4006 .type = ARM_CP_IO, 4007 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4008 .access = PL2_RW, 4009 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 4010 .resetvalue = 0, 4011 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 4012 #endif 4013 /* The only field of MDCR_EL2 that has a defined architectural reset value 4014 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 4015 * don't impelment any PMU event counters, so using zero as a reset 4016 * value for MDCR_EL2 is okay 4017 */ 4018 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4019 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4020 .access = PL2_RW, .resetvalue = 0, 4021 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 4022 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 4023 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4024 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4025 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4026 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 4027 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4028 .access = PL2_RW, 4029 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4030 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4031 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4032 .access = PL2_RW, 4033 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 4034 REGINFO_SENTINEL 4035 }; 4036 4037 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 4038 bool isread) 4039 { 4040 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 4041 * At Secure EL1 it traps to EL3. 4042 */ 4043 if (arm_current_el(env) == 3) { 4044 return CP_ACCESS_OK; 4045 } 4046 if (arm_is_secure_below_el3(env)) { 4047 return CP_ACCESS_TRAP_EL3; 4048 } 4049 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 4050 if (isread) { 4051 return CP_ACCESS_OK; 4052 } 4053 return CP_ACCESS_TRAP_UNCATEGORIZED; 4054 } 4055 4056 static const ARMCPRegInfo el3_cp_reginfo[] = { 4057 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 4058 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 4059 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 4060 .resetvalue = 0, .writefn = scr_write }, 4061 { .name = "SCR", .type = ARM_CP_ALIAS, 4062 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 4063 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4064 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 4065 .writefn = scr_write }, 4066 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 4067 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 4068 .access = PL3_RW, .resetvalue = 0, 4069 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 4070 { .name = "SDER", 4071 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 4072 .access = PL3_RW, .resetvalue = 0, 4073 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 4074 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 4075 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4076 .writefn = vbar_write, .resetvalue = 0, 4077 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 4078 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 4079 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 4080 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 4081 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 4082 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 4083 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 4084 .access = PL3_RW, 4085 /* no .writefn needed as this can't cause an ASID change; 4086 * we must provide a .raw_writefn and .resetfn because we handle 4087 * reset and migration for the AArch32 TTBCR(S), which might be 4088 * using mask and base_mask. 4089 */ 4090 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 4091 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 4092 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 4093 .type = ARM_CP_ALIAS, 4094 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 4095 .access = PL3_RW, 4096 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 4097 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 4098 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 4099 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 4100 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 4101 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 4102 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 4103 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 4104 .type = ARM_CP_ALIAS, 4105 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 4106 .access = PL3_RW, 4107 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 4108 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 4109 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 4110 .access = PL3_RW, .writefn = vbar_write, 4111 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 4112 .resetvalue = 0 }, 4113 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 4114 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 4115 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 4116 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 4117 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 4118 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 4119 .access = PL3_RW, .resetvalue = 0, 4120 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 4121 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 4122 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 4123 .access = PL3_RW, .type = ARM_CP_CONST, 4124 .resetvalue = 0 }, 4125 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 4126 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 4127 .access = PL3_RW, .type = ARM_CP_CONST, 4128 .resetvalue = 0 }, 4129 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 4130 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 4131 .access = PL3_RW, .type = ARM_CP_CONST, 4132 .resetvalue = 0 }, 4133 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 4134 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 4135 .access = PL3_W, .type = ARM_CP_NO_RAW, 4136 .writefn = tlbi_aa64_alle3is_write }, 4137 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 4138 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 4139 .access = PL3_W, .type = ARM_CP_NO_RAW, 4140 .writefn = tlbi_aa64_vae3is_write }, 4141 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 4142 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 4143 .access = PL3_W, .type = ARM_CP_NO_RAW, 4144 .writefn = tlbi_aa64_vae3is_write }, 4145 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 4146 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 4147 .access = PL3_W, .type = ARM_CP_NO_RAW, 4148 .writefn = tlbi_aa64_alle3_write }, 4149 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 4150 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 4151 .access = PL3_W, .type = ARM_CP_NO_RAW, 4152 .writefn = tlbi_aa64_vae3_write }, 4153 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 4154 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 4155 .access = PL3_W, .type = ARM_CP_NO_RAW, 4156 .writefn = tlbi_aa64_vae3_write }, 4157 REGINFO_SENTINEL 4158 }; 4159 4160 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4161 bool isread) 4162 { 4163 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, 4164 * but the AArch32 CTR has its own reginfo struct) 4165 */ 4166 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 4167 return CP_ACCESS_TRAP; 4168 } 4169 return CP_ACCESS_OK; 4170 } 4171 4172 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4173 uint64_t value) 4174 { 4175 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 4176 * read via a bit in OSLSR_EL1. 4177 */ 4178 int oslock; 4179 4180 if (ri->state == ARM_CP_STATE_AA32) { 4181 oslock = (value == 0xC5ACCE55); 4182 } else { 4183 oslock = value & 1; 4184 } 4185 4186 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 4187 } 4188 4189 static const ARMCPRegInfo debug_cp_reginfo[] = { 4190 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 4191 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 4192 * unlike DBGDRAR it is never accessible from EL0. 4193 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 4194 * accessor. 4195 */ 4196 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 4197 .access = PL0_R, .accessfn = access_tdra, 4198 .type = ARM_CP_CONST, .resetvalue = 0 }, 4199 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 4200 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 4201 .access = PL1_R, .accessfn = access_tdra, 4202 .type = ARM_CP_CONST, .resetvalue = 0 }, 4203 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 4204 .access = PL0_R, .accessfn = access_tdra, 4205 .type = ARM_CP_CONST, .resetvalue = 0 }, 4206 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 4207 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 4208 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4209 .access = PL1_RW, .accessfn = access_tda, 4210 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 4211 .resetvalue = 0 }, 4212 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 4213 * We don't implement the configurable EL0 access. 4214 */ 4215 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 4216 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4217 .type = ARM_CP_ALIAS, 4218 .access = PL1_R, .accessfn = access_tda, 4219 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 4220 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 4221 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 4222 .access = PL1_W, .type = ARM_CP_NO_RAW, 4223 .accessfn = access_tdosa, 4224 .writefn = oslar_write }, 4225 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 4226 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 4227 .access = PL1_R, .resetvalue = 10, 4228 .accessfn = access_tdosa, 4229 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 4230 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 4231 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 4232 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 4233 .access = PL1_RW, .accessfn = access_tdosa, 4234 .type = ARM_CP_NOP }, 4235 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 4236 * implement vector catch debug events yet. 4237 */ 4238 { .name = "DBGVCR", 4239 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 4240 .access = PL1_RW, .accessfn = access_tda, 4241 .type = ARM_CP_NOP }, 4242 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 4243 * to save and restore a 32-bit guest's DBGVCR) 4244 */ 4245 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 4246 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 4247 .access = PL2_RW, .accessfn = access_tda, 4248 .type = ARM_CP_NOP }, 4249 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 4250 * Channel but Linux may try to access this register. The 32-bit 4251 * alias is DBGDCCINT. 4252 */ 4253 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 4254 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4255 .access = PL1_RW, .accessfn = access_tda, 4256 .type = ARM_CP_NOP }, 4257 REGINFO_SENTINEL 4258 }; 4259 4260 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 4261 /* 64 bit access versions of the (dummy) debug registers */ 4262 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 4263 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4264 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 4265 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4266 REGINFO_SENTINEL 4267 }; 4268 4269 /* Return the exception level to which SVE-disabled exceptions should 4270 * be taken, or 0 if SVE is enabled. 4271 */ 4272 static int sve_exception_el(CPUARMState *env) 4273 { 4274 #ifndef CONFIG_USER_ONLY 4275 unsigned current_el = arm_current_el(env); 4276 4277 /* The CPACR.ZEN controls traps to EL1: 4278 * 0, 2 : trap EL0 and EL1 accesses 4279 * 1 : trap only EL0 accesses 4280 * 3 : trap no accesses 4281 */ 4282 switch (extract32(env->cp15.cpacr_el1, 16, 2)) { 4283 default: 4284 if (current_el <= 1) { 4285 /* Trap to PL1, which might be EL1 or EL3 */ 4286 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 4287 return 3; 4288 } 4289 return 1; 4290 } 4291 break; 4292 case 1: 4293 if (current_el == 0) { 4294 return 1; 4295 } 4296 break; 4297 case 3: 4298 break; 4299 } 4300 4301 /* Similarly for CPACR.FPEN, after having checked ZEN. */ 4302 switch (extract32(env->cp15.cpacr_el1, 20, 2)) { 4303 default: 4304 if (current_el <= 1) { 4305 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 4306 return 3; 4307 } 4308 return 1; 4309 } 4310 break; 4311 case 1: 4312 if (current_el == 0) { 4313 return 1; 4314 } 4315 break; 4316 case 3: 4317 break; 4318 } 4319 4320 /* CPTR_EL2. Check both TZ and TFP. */ 4321 if (current_el <= 2 4322 && (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ)) 4323 && !arm_is_secure_below_el3(env)) { 4324 return 2; 4325 } 4326 4327 /* CPTR_EL3. Check both EZ and TFP. */ 4328 if (!(env->cp15.cptr_el[3] & CPTR_EZ) 4329 || (env->cp15.cptr_el[3] & CPTR_TFP)) { 4330 return 3; 4331 } 4332 #endif 4333 return 0; 4334 } 4335 4336 static CPAccessResult zcr_access(CPUARMState *env, const ARMCPRegInfo *ri, 4337 bool isread) 4338 { 4339 switch (sve_exception_el(env)) { 4340 case 3: 4341 return CP_ACCESS_TRAP_EL3; 4342 case 2: 4343 return CP_ACCESS_TRAP_EL2; 4344 case 1: 4345 return CP_ACCESS_TRAP; 4346 } 4347 return CP_ACCESS_OK; 4348 } 4349 4350 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4351 uint64_t value) 4352 { 4353 /* Bits other than [3:0] are RAZ/WI. */ 4354 raw_write(env, ri, value & 0xf); 4355 } 4356 4357 static const ARMCPRegInfo zcr_el1_reginfo = { 4358 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 4359 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 4360 .access = PL1_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT, 4361 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 4362 .writefn = zcr_write, .raw_writefn = raw_write 4363 }; 4364 4365 static const ARMCPRegInfo zcr_el2_reginfo = { 4366 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4367 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4368 .access = PL2_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT, 4369 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 4370 .writefn = zcr_write, .raw_writefn = raw_write 4371 }; 4372 4373 static const ARMCPRegInfo zcr_no_el2_reginfo = { 4374 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4375 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4376 .access = PL2_RW, .type = ARM_CP_64BIT, 4377 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 4378 }; 4379 4380 static const ARMCPRegInfo zcr_el3_reginfo = { 4381 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 4382 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 4383 .access = PL3_RW, .accessfn = zcr_access, .type = ARM_CP_64BIT, 4384 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 4385 .writefn = zcr_write, .raw_writefn = raw_write 4386 }; 4387 4388 void hw_watchpoint_update(ARMCPU *cpu, int n) 4389 { 4390 CPUARMState *env = &cpu->env; 4391 vaddr len = 0; 4392 vaddr wvr = env->cp15.dbgwvr[n]; 4393 uint64_t wcr = env->cp15.dbgwcr[n]; 4394 int mask; 4395 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 4396 4397 if (env->cpu_watchpoint[n]) { 4398 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 4399 env->cpu_watchpoint[n] = NULL; 4400 } 4401 4402 if (!extract64(wcr, 0, 1)) { 4403 /* E bit clear : watchpoint disabled */ 4404 return; 4405 } 4406 4407 switch (extract64(wcr, 3, 2)) { 4408 case 0: 4409 /* LSC 00 is reserved and must behave as if the wp is disabled */ 4410 return; 4411 case 1: 4412 flags |= BP_MEM_READ; 4413 break; 4414 case 2: 4415 flags |= BP_MEM_WRITE; 4416 break; 4417 case 3: 4418 flags |= BP_MEM_ACCESS; 4419 break; 4420 } 4421 4422 /* Attempts to use both MASK and BAS fields simultaneously are 4423 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 4424 * thus generating a watchpoint for every byte in the masked region. 4425 */ 4426 mask = extract64(wcr, 24, 4); 4427 if (mask == 1 || mask == 2) { 4428 /* Reserved values of MASK; we must act as if the mask value was 4429 * some non-reserved value, or as if the watchpoint were disabled. 4430 * We choose the latter. 4431 */ 4432 return; 4433 } else if (mask) { 4434 /* Watchpoint covers an aligned area up to 2GB in size */ 4435 len = 1ULL << mask; 4436 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 4437 * whether the watchpoint fires when the unmasked bits match; we opt 4438 * to generate the exceptions. 4439 */ 4440 wvr &= ~(len - 1); 4441 } else { 4442 /* Watchpoint covers bytes defined by the byte address select bits */ 4443 int bas = extract64(wcr, 5, 8); 4444 int basstart; 4445 4446 if (bas == 0) { 4447 /* This must act as if the watchpoint is disabled */ 4448 return; 4449 } 4450 4451 if (extract64(wvr, 2, 1)) { 4452 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 4453 * ignored, and BAS[3:0] define which bytes to watch. 4454 */ 4455 bas &= 0xf; 4456 } 4457 /* The BAS bits are supposed to be programmed to indicate a contiguous 4458 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 4459 * we fire for each byte in the word/doubleword addressed by the WVR. 4460 * We choose to ignore any non-zero bits after the first range of 1s. 4461 */ 4462 basstart = ctz32(bas); 4463 len = cto32(bas >> basstart); 4464 wvr += basstart; 4465 } 4466 4467 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 4468 &env->cpu_watchpoint[n]); 4469 } 4470 4471 void hw_watchpoint_update_all(ARMCPU *cpu) 4472 { 4473 int i; 4474 CPUARMState *env = &cpu->env; 4475 4476 /* Completely clear out existing QEMU watchpoints and our array, to 4477 * avoid possible stale entries following migration load. 4478 */ 4479 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 4480 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 4481 4482 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 4483 hw_watchpoint_update(cpu, i); 4484 } 4485 } 4486 4487 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4488 uint64_t value) 4489 { 4490 ARMCPU *cpu = arm_env_get_cpu(env); 4491 int i = ri->crm; 4492 4493 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 4494 * register reads and behaves as if values written are sign extended. 4495 * Bits [1:0] are RES0. 4496 */ 4497 value = sextract64(value, 0, 49) & ~3ULL; 4498 4499 raw_write(env, ri, value); 4500 hw_watchpoint_update(cpu, i); 4501 } 4502 4503 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4504 uint64_t value) 4505 { 4506 ARMCPU *cpu = arm_env_get_cpu(env); 4507 int i = ri->crm; 4508 4509 raw_write(env, ri, value); 4510 hw_watchpoint_update(cpu, i); 4511 } 4512 4513 void hw_breakpoint_update(ARMCPU *cpu, int n) 4514 { 4515 CPUARMState *env = &cpu->env; 4516 uint64_t bvr = env->cp15.dbgbvr[n]; 4517 uint64_t bcr = env->cp15.dbgbcr[n]; 4518 vaddr addr; 4519 int bt; 4520 int flags = BP_CPU; 4521 4522 if (env->cpu_breakpoint[n]) { 4523 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 4524 env->cpu_breakpoint[n] = NULL; 4525 } 4526 4527 if (!extract64(bcr, 0, 1)) { 4528 /* E bit clear : watchpoint disabled */ 4529 return; 4530 } 4531 4532 bt = extract64(bcr, 20, 4); 4533 4534 switch (bt) { 4535 case 4: /* unlinked address mismatch (reserved if AArch64) */ 4536 case 5: /* linked address mismatch (reserved if AArch64) */ 4537 qemu_log_mask(LOG_UNIMP, 4538 "arm: address mismatch breakpoint types not implemented"); 4539 return; 4540 case 0: /* unlinked address match */ 4541 case 1: /* linked address match */ 4542 { 4543 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 4544 * we behave as if the register was sign extended. Bits [1:0] are 4545 * RES0. The BAS field is used to allow setting breakpoints on 16 4546 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 4547 * a bp will fire if the addresses covered by the bp and the addresses 4548 * covered by the insn overlap but the insn doesn't start at the 4549 * start of the bp address range. We choose to require the insn and 4550 * the bp to have the same address. The constraints on writing to 4551 * BAS enforced in dbgbcr_write mean we have only four cases: 4552 * 0b0000 => no breakpoint 4553 * 0b0011 => breakpoint on addr 4554 * 0b1100 => breakpoint on addr + 2 4555 * 0b1111 => breakpoint on addr 4556 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 4557 */ 4558 int bas = extract64(bcr, 5, 4); 4559 addr = sextract64(bvr, 0, 49) & ~3ULL; 4560 if (bas == 0) { 4561 return; 4562 } 4563 if (bas == 0xc) { 4564 addr += 2; 4565 } 4566 break; 4567 } 4568 case 2: /* unlinked context ID match */ 4569 case 8: /* unlinked VMID match (reserved if no EL2) */ 4570 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 4571 qemu_log_mask(LOG_UNIMP, 4572 "arm: unlinked context breakpoint types not implemented"); 4573 return; 4574 case 9: /* linked VMID match (reserved if no EL2) */ 4575 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 4576 case 3: /* linked context ID match */ 4577 default: 4578 /* We must generate no events for Linked context matches (unless 4579 * they are linked to by some other bp/wp, which is handled in 4580 * updates for the linking bp/wp). We choose to also generate no events 4581 * for reserved values. 4582 */ 4583 return; 4584 } 4585 4586 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 4587 } 4588 4589 void hw_breakpoint_update_all(ARMCPU *cpu) 4590 { 4591 int i; 4592 CPUARMState *env = &cpu->env; 4593 4594 /* Completely clear out existing QEMU breakpoints and our array, to 4595 * avoid possible stale entries following migration load. 4596 */ 4597 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 4598 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 4599 4600 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 4601 hw_breakpoint_update(cpu, i); 4602 } 4603 } 4604 4605 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4606 uint64_t value) 4607 { 4608 ARMCPU *cpu = arm_env_get_cpu(env); 4609 int i = ri->crm; 4610 4611 raw_write(env, ri, value); 4612 hw_breakpoint_update(cpu, i); 4613 } 4614 4615 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4616 uint64_t value) 4617 { 4618 ARMCPU *cpu = arm_env_get_cpu(env); 4619 int i = ri->crm; 4620 4621 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 4622 * copy of BAS[0]. 4623 */ 4624 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 4625 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 4626 4627 raw_write(env, ri, value); 4628 hw_breakpoint_update(cpu, i); 4629 } 4630 4631 static void define_debug_regs(ARMCPU *cpu) 4632 { 4633 /* Define v7 and v8 architectural debug registers. 4634 * These are just dummy implementations for now. 4635 */ 4636 int i; 4637 int wrps, brps, ctx_cmps; 4638 ARMCPRegInfo dbgdidr = { 4639 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 4640 .access = PL0_R, .accessfn = access_tda, 4641 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, 4642 }; 4643 4644 /* Note that all these register fields hold "number of Xs minus 1". */ 4645 brps = extract32(cpu->dbgdidr, 24, 4); 4646 wrps = extract32(cpu->dbgdidr, 28, 4); 4647 ctx_cmps = extract32(cpu->dbgdidr, 20, 4); 4648 4649 assert(ctx_cmps <= brps); 4650 4651 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties 4652 * of the debug registers such as number of breakpoints; 4653 * check that if they both exist then they agree. 4654 */ 4655 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 4656 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); 4657 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); 4658 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); 4659 } 4660 4661 define_one_arm_cp_reg(cpu, &dbgdidr); 4662 define_arm_cp_regs(cpu, debug_cp_reginfo); 4663 4664 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 4665 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 4666 } 4667 4668 for (i = 0; i < brps + 1; i++) { 4669 ARMCPRegInfo dbgregs[] = { 4670 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 4671 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 4672 .access = PL1_RW, .accessfn = access_tda, 4673 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 4674 .writefn = dbgbvr_write, .raw_writefn = raw_write 4675 }, 4676 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 4677 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 4678 .access = PL1_RW, .accessfn = access_tda, 4679 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 4680 .writefn = dbgbcr_write, .raw_writefn = raw_write 4681 }, 4682 REGINFO_SENTINEL 4683 }; 4684 define_arm_cp_regs(cpu, dbgregs); 4685 } 4686 4687 for (i = 0; i < wrps + 1; i++) { 4688 ARMCPRegInfo dbgregs[] = { 4689 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 4690 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 4691 .access = PL1_RW, .accessfn = access_tda, 4692 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 4693 .writefn = dbgwvr_write, .raw_writefn = raw_write 4694 }, 4695 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 4696 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 4697 .access = PL1_RW, .accessfn = access_tda, 4698 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 4699 .writefn = dbgwcr_write, .raw_writefn = raw_write 4700 }, 4701 REGINFO_SENTINEL 4702 }; 4703 define_arm_cp_regs(cpu, dbgregs); 4704 } 4705 } 4706 4707 /* We don't know until after realize whether there's a GICv3 4708 * attached, and that is what registers the gicv3 sysregs. 4709 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 4710 * at runtime. 4711 */ 4712 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 4713 { 4714 ARMCPU *cpu = arm_env_get_cpu(env); 4715 uint64_t pfr1 = cpu->id_pfr1; 4716 4717 if (env->gicv3state) { 4718 pfr1 |= 1 << 28; 4719 } 4720 return pfr1; 4721 } 4722 4723 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 4724 { 4725 ARMCPU *cpu = arm_env_get_cpu(env); 4726 uint64_t pfr0 = cpu->id_aa64pfr0; 4727 4728 if (env->gicv3state) { 4729 pfr0 |= 1 << 24; 4730 } 4731 return pfr0; 4732 } 4733 4734 void register_cp_regs_for_features(ARMCPU *cpu) 4735 { 4736 /* Register all the coprocessor registers based on feature bits */ 4737 CPUARMState *env = &cpu->env; 4738 if (arm_feature(env, ARM_FEATURE_M)) { 4739 /* M profile has no coprocessor registers */ 4740 return; 4741 } 4742 4743 define_arm_cp_regs(cpu, cp_reginfo); 4744 if (!arm_feature(env, ARM_FEATURE_V8)) { 4745 /* Must go early as it is full of wildcards that may be 4746 * overridden by later definitions. 4747 */ 4748 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 4749 } 4750 4751 if (arm_feature(env, ARM_FEATURE_V6)) { 4752 /* The ID registers all have impdef reset values */ 4753 ARMCPRegInfo v6_idregs[] = { 4754 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 4755 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4756 .access = PL1_R, .type = ARM_CP_CONST, 4757 .resetvalue = cpu->id_pfr0 }, 4758 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 4759 * the value of the GIC field until after we define these regs. 4760 */ 4761 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 4762 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 4763 .access = PL1_R, .type = ARM_CP_NO_RAW, 4764 .readfn = id_pfr1_read, 4765 .writefn = arm_cp_write_ignore }, 4766 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 4767 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 4768 .access = PL1_R, .type = ARM_CP_CONST, 4769 .resetvalue = cpu->id_dfr0 }, 4770 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 4771 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 4772 .access = PL1_R, .type = ARM_CP_CONST, 4773 .resetvalue = cpu->id_afr0 }, 4774 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 4775 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 4776 .access = PL1_R, .type = ARM_CP_CONST, 4777 .resetvalue = cpu->id_mmfr0 }, 4778 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 4779 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 4780 .access = PL1_R, .type = ARM_CP_CONST, 4781 .resetvalue = cpu->id_mmfr1 }, 4782 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 4783 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 4784 .access = PL1_R, .type = ARM_CP_CONST, 4785 .resetvalue = cpu->id_mmfr2 }, 4786 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 4787 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 4788 .access = PL1_R, .type = ARM_CP_CONST, 4789 .resetvalue = cpu->id_mmfr3 }, 4790 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 4791 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4792 .access = PL1_R, .type = ARM_CP_CONST, 4793 .resetvalue = cpu->id_isar0 }, 4794 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 4795 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 4796 .access = PL1_R, .type = ARM_CP_CONST, 4797 .resetvalue = cpu->id_isar1 }, 4798 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 4799 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4800 .access = PL1_R, .type = ARM_CP_CONST, 4801 .resetvalue = cpu->id_isar2 }, 4802 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 4803 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 4804 .access = PL1_R, .type = ARM_CP_CONST, 4805 .resetvalue = cpu->id_isar3 }, 4806 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 4807 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 4808 .access = PL1_R, .type = ARM_CP_CONST, 4809 .resetvalue = cpu->id_isar4 }, 4810 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 4811 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 4812 .access = PL1_R, .type = ARM_CP_CONST, 4813 .resetvalue = cpu->id_isar5 }, 4814 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 4815 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 4816 .access = PL1_R, .type = ARM_CP_CONST, 4817 .resetvalue = cpu->id_mmfr4 }, 4818 /* 7 is as yet unallocated and must RAZ */ 4819 { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH, 4820 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 4821 .access = PL1_R, .type = ARM_CP_CONST, 4822 .resetvalue = 0 }, 4823 REGINFO_SENTINEL 4824 }; 4825 define_arm_cp_regs(cpu, v6_idregs); 4826 define_arm_cp_regs(cpu, v6_cp_reginfo); 4827 } else { 4828 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 4829 } 4830 if (arm_feature(env, ARM_FEATURE_V6K)) { 4831 define_arm_cp_regs(cpu, v6k_cp_reginfo); 4832 } 4833 if (arm_feature(env, ARM_FEATURE_V7MP) && 4834 !arm_feature(env, ARM_FEATURE_PMSA)) { 4835 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 4836 } 4837 if (arm_feature(env, ARM_FEATURE_V7)) { 4838 /* v7 performance monitor control register: same implementor 4839 * field as main ID register, and we implement only the cycle 4840 * count register. 4841 */ 4842 #ifndef CONFIG_USER_ONLY 4843 ARMCPRegInfo pmcr = { 4844 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 4845 .access = PL0_RW, 4846 .type = ARM_CP_IO | ARM_CP_ALIAS, 4847 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 4848 .accessfn = pmreg_access, .writefn = pmcr_write, 4849 .raw_writefn = raw_write, 4850 }; 4851 ARMCPRegInfo pmcr64 = { 4852 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 4853 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 4854 .access = PL0_RW, .accessfn = pmreg_access, 4855 .type = ARM_CP_IO, 4856 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 4857 .resetvalue = cpu->midr & 0xff000000, 4858 .writefn = pmcr_write, .raw_writefn = raw_write, 4859 }; 4860 define_one_arm_cp_reg(cpu, &pmcr); 4861 define_one_arm_cp_reg(cpu, &pmcr64); 4862 #endif 4863 ARMCPRegInfo clidr = { 4864 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 4865 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 4866 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr 4867 }; 4868 define_one_arm_cp_reg(cpu, &clidr); 4869 define_arm_cp_regs(cpu, v7_cp_reginfo); 4870 define_debug_regs(cpu); 4871 } else { 4872 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 4873 } 4874 if (arm_feature(env, ARM_FEATURE_V8)) { 4875 /* AArch64 ID registers, which all have impdef reset values. 4876 * Note that within the ID register ranges the unused slots 4877 * must all RAZ, not UNDEF; future architecture versions may 4878 * define new registers here. 4879 */ 4880 ARMCPRegInfo v8_idregs[] = { 4881 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 4882 * know the right value for the GIC field until after we 4883 * define these regs. 4884 */ 4885 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 4886 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 4887 .access = PL1_R, .type = ARM_CP_NO_RAW, 4888 .readfn = id_aa64pfr0_read, 4889 .writefn = arm_cp_write_ignore }, 4890 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 4891 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 4892 .access = PL1_R, .type = ARM_CP_CONST, 4893 .resetvalue = cpu->id_aa64pfr1}, 4894 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4895 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 4896 .access = PL1_R, .type = ARM_CP_CONST, 4897 .resetvalue = 0 }, 4898 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4899 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 4900 .access = PL1_R, .type = ARM_CP_CONST, 4901 .resetvalue = 0 }, 4902 { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4903 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 4904 .access = PL1_R, .type = ARM_CP_CONST, 4905 .resetvalue = 0 }, 4906 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4907 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 4908 .access = PL1_R, .type = ARM_CP_CONST, 4909 .resetvalue = 0 }, 4910 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4911 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 4912 .access = PL1_R, .type = ARM_CP_CONST, 4913 .resetvalue = 0 }, 4914 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4915 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 4916 .access = PL1_R, .type = ARM_CP_CONST, 4917 .resetvalue = 0 }, 4918 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 4919 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 4920 .access = PL1_R, .type = ARM_CP_CONST, 4921 .resetvalue = cpu->id_aa64dfr0 }, 4922 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 4923 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 4924 .access = PL1_R, .type = ARM_CP_CONST, 4925 .resetvalue = cpu->id_aa64dfr1 }, 4926 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4927 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 4928 .access = PL1_R, .type = ARM_CP_CONST, 4929 .resetvalue = 0 }, 4930 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4931 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 4932 .access = PL1_R, .type = ARM_CP_CONST, 4933 .resetvalue = 0 }, 4934 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 4935 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 4936 .access = PL1_R, .type = ARM_CP_CONST, 4937 .resetvalue = cpu->id_aa64afr0 }, 4938 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 4939 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 4940 .access = PL1_R, .type = ARM_CP_CONST, 4941 .resetvalue = cpu->id_aa64afr1 }, 4942 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4943 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 4944 .access = PL1_R, .type = ARM_CP_CONST, 4945 .resetvalue = 0 }, 4946 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4947 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 4948 .access = PL1_R, .type = ARM_CP_CONST, 4949 .resetvalue = 0 }, 4950 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 4951 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 4952 .access = PL1_R, .type = ARM_CP_CONST, 4953 .resetvalue = cpu->id_aa64isar0 }, 4954 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 4955 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 4956 .access = PL1_R, .type = ARM_CP_CONST, 4957 .resetvalue = cpu->id_aa64isar1 }, 4958 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4959 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 4960 .access = PL1_R, .type = ARM_CP_CONST, 4961 .resetvalue = 0 }, 4962 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4963 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 4964 .access = PL1_R, .type = ARM_CP_CONST, 4965 .resetvalue = 0 }, 4966 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4967 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 4968 .access = PL1_R, .type = ARM_CP_CONST, 4969 .resetvalue = 0 }, 4970 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4971 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 4972 .access = PL1_R, .type = ARM_CP_CONST, 4973 .resetvalue = 0 }, 4974 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4975 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 4976 .access = PL1_R, .type = ARM_CP_CONST, 4977 .resetvalue = 0 }, 4978 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4979 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 4980 .access = PL1_R, .type = ARM_CP_CONST, 4981 .resetvalue = 0 }, 4982 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 4983 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 4984 .access = PL1_R, .type = ARM_CP_CONST, 4985 .resetvalue = cpu->id_aa64mmfr0 }, 4986 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 4987 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 4988 .access = PL1_R, .type = ARM_CP_CONST, 4989 .resetvalue = cpu->id_aa64mmfr1 }, 4990 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4991 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 4992 .access = PL1_R, .type = ARM_CP_CONST, 4993 .resetvalue = 0 }, 4994 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4995 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 4996 .access = PL1_R, .type = ARM_CP_CONST, 4997 .resetvalue = 0 }, 4998 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4999 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 5000 .access = PL1_R, .type = ARM_CP_CONST, 5001 .resetvalue = 0 }, 5002 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5003 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 5004 .access = PL1_R, .type = ARM_CP_CONST, 5005 .resetvalue = 0 }, 5006 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5007 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 5008 .access = PL1_R, .type = ARM_CP_CONST, 5009 .resetvalue = 0 }, 5010 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5011 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 5012 .access = PL1_R, .type = ARM_CP_CONST, 5013 .resetvalue = 0 }, 5014 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 5015 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 5016 .access = PL1_R, .type = ARM_CP_CONST, 5017 .resetvalue = cpu->mvfr0 }, 5018 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 5019 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 5020 .access = PL1_R, .type = ARM_CP_CONST, 5021 .resetvalue = cpu->mvfr1 }, 5022 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 5023 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 5024 .access = PL1_R, .type = ARM_CP_CONST, 5025 .resetvalue = cpu->mvfr2 }, 5026 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5027 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 5028 .access = PL1_R, .type = ARM_CP_CONST, 5029 .resetvalue = 0 }, 5030 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5031 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 5032 .access = PL1_R, .type = ARM_CP_CONST, 5033 .resetvalue = 0 }, 5034 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5035 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 5036 .access = PL1_R, .type = ARM_CP_CONST, 5037 .resetvalue = 0 }, 5038 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5039 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 5040 .access = PL1_R, .type = ARM_CP_CONST, 5041 .resetvalue = 0 }, 5042 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5043 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 5044 .access = PL1_R, .type = ARM_CP_CONST, 5045 .resetvalue = 0 }, 5046 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 5047 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 5048 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5049 .resetvalue = cpu->pmceid0 }, 5050 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 5051 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 5052 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5053 .resetvalue = cpu->pmceid0 }, 5054 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 5055 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 5056 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5057 .resetvalue = cpu->pmceid1 }, 5058 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 5059 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 5060 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5061 .resetvalue = cpu->pmceid1 }, 5062 REGINFO_SENTINEL 5063 }; 5064 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 5065 if (!arm_feature(env, ARM_FEATURE_EL3) && 5066 !arm_feature(env, ARM_FEATURE_EL2)) { 5067 ARMCPRegInfo rvbar = { 5068 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 5069 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5070 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 5071 }; 5072 define_one_arm_cp_reg(cpu, &rvbar); 5073 } 5074 define_arm_cp_regs(cpu, v8_idregs); 5075 define_arm_cp_regs(cpu, v8_cp_reginfo); 5076 } 5077 if (arm_feature(env, ARM_FEATURE_EL2)) { 5078 uint64_t vmpidr_def = mpidr_read_val(env); 5079 ARMCPRegInfo vpidr_regs[] = { 5080 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 5081 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5082 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5083 .resetvalue = cpu->midr, 5084 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5085 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 5086 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5087 .access = PL2_RW, .resetvalue = cpu->midr, 5088 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5089 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 5090 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5091 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5092 .resetvalue = vmpidr_def, 5093 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 5094 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 5095 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5096 .access = PL2_RW, 5097 .resetvalue = vmpidr_def, 5098 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 5099 REGINFO_SENTINEL 5100 }; 5101 define_arm_cp_regs(cpu, vpidr_regs); 5102 define_arm_cp_regs(cpu, el2_cp_reginfo); 5103 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 5104 if (!arm_feature(env, ARM_FEATURE_EL3)) { 5105 ARMCPRegInfo rvbar = { 5106 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 5107 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 5108 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 5109 }; 5110 define_one_arm_cp_reg(cpu, &rvbar); 5111 } 5112 } else { 5113 /* If EL2 is missing but higher ELs are enabled, we need to 5114 * register the no_el2 reginfos. 5115 */ 5116 if (arm_feature(env, ARM_FEATURE_EL3)) { 5117 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 5118 * of MIDR_EL1 and MPIDR_EL1. 5119 */ 5120 ARMCPRegInfo vpidr_regs[] = { 5121 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5122 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5123 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5124 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 5125 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5126 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5127 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5128 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5129 .type = ARM_CP_NO_RAW, 5130 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 5131 REGINFO_SENTINEL 5132 }; 5133 define_arm_cp_regs(cpu, vpidr_regs); 5134 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 5135 } 5136 } 5137 if (arm_feature(env, ARM_FEATURE_EL3)) { 5138 define_arm_cp_regs(cpu, el3_cp_reginfo); 5139 ARMCPRegInfo el3_regs[] = { 5140 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 5141 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 5142 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 5143 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 5144 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 5145 .access = PL3_RW, 5146 .raw_writefn = raw_write, .writefn = sctlr_write, 5147 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 5148 .resetvalue = cpu->reset_sctlr }, 5149 REGINFO_SENTINEL 5150 }; 5151 5152 define_arm_cp_regs(cpu, el3_regs); 5153 } 5154 /* The behaviour of NSACR is sufficiently various that we don't 5155 * try to describe it in a single reginfo: 5156 * if EL3 is 64 bit, then trap to EL3 from S EL1, 5157 * reads as constant 0xc00 from NS EL1 and NS EL2 5158 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 5159 * if v7 without EL3, register doesn't exist 5160 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 5161 */ 5162 if (arm_feature(env, ARM_FEATURE_EL3)) { 5163 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5164 ARMCPRegInfo nsacr = { 5165 .name = "NSACR", .type = ARM_CP_CONST, 5166 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5167 .access = PL1_RW, .accessfn = nsacr_access, 5168 .resetvalue = 0xc00 5169 }; 5170 define_one_arm_cp_reg(cpu, &nsacr); 5171 } else { 5172 ARMCPRegInfo nsacr = { 5173 .name = "NSACR", 5174 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5175 .access = PL3_RW | PL1_R, 5176 .resetvalue = 0, 5177 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 5178 }; 5179 define_one_arm_cp_reg(cpu, &nsacr); 5180 } 5181 } else { 5182 if (arm_feature(env, ARM_FEATURE_V8)) { 5183 ARMCPRegInfo nsacr = { 5184 .name = "NSACR", .type = ARM_CP_CONST, 5185 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5186 .access = PL1_R, 5187 .resetvalue = 0xc00 5188 }; 5189 define_one_arm_cp_reg(cpu, &nsacr); 5190 } 5191 } 5192 5193 if (arm_feature(env, ARM_FEATURE_PMSA)) { 5194 if (arm_feature(env, ARM_FEATURE_V6)) { 5195 /* PMSAv6 not implemented */ 5196 assert(arm_feature(env, ARM_FEATURE_V7)); 5197 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5198 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 5199 } else { 5200 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 5201 } 5202 } else { 5203 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5204 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 5205 } 5206 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 5207 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 5208 } 5209 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 5210 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 5211 } 5212 if (arm_feature(env, ARM_FEATURE_VAPA)) { 5213 define_arm_cp_regs(cpu, vapa_cp_reginfo); 5214 } 5215 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 5216 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 5217 } 5218 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 5219 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 5220 } 5221 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 5222 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 5223 } 5224 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 5225 define_arm_cp_regs(cpu, omap_cp_reginfo); 5226 } 5227 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 5228 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 5229 } 5230 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5231 define_arm_cp_regs(cpu, xscale_cp_reginfo); 5232 } 5233 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 5234 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 5235 } 5236 if (arm_feature(env, ARM_FEATURE_LPAE)) { 5237 define_arm_cp_regs(cpu, lpae_cp_reginfo); 5238 } 5239 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 5240 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 5241 * be read-only (ie write causes UNDEF exception). 5242 */ 5243 { 5244 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 5245 /* Pre-v8 MIDR space. 5246 * Note that the MIDR isn't a simple constant register because 5247 * of the TI925 behaviour where writes to another register can 5248 * cause the MIDR value to change. 5249 * 5250 * Unimplemented registers in the c15 0 0 0 space default to 5251 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 5252 * and friends override accordingly. 5253 */ 5254 { .name = "MIDR", 5255 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 5256 .access = PL1_R, .resetvalue = cpu->midr, 5257 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 5258 .readfn = midr_read, 5259 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5260 .type = ARM_CP_OVERRIDE }, 5261 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 5262 { .name = "DUMMY", 5263 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 5264 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5265 { .name = "DUMMY", 5266 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 5267 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5268 { .name = "DUMMY", 5269 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 5270 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5271 { .name = "DUMMY", 5272 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 5273 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5274 { .name = "DUMMY", 5275 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 5276 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5277 REGINFO_SENTINEL 5278 }; 5279 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 5280 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 5281 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 5282 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 5283 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5284 .readfn = midr_read }, 5285 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 5286 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5287 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5288 .access = PL1_R, .resetvalue = cpu->midr }, 5289 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5290 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 5291 .access = PL1_R, .resetvalue = cpu->midr }, 5292 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 5293 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 5294 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 5295 REGINFO_SENTINEL 5296 }; 5297 ARMCPRegInfo id_cp_reginfo[] = { 5298 /* These are common to v8 and pre-v8 */ 5299 { .name = "CTR", 5300 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 5301 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5302 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 5303 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 5304 .access = PL0_R, .accessfn = ctr_el0_access, 5305 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5306 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 5307 { .name = "TCMTR", 5308 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 5309 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5310 REGINFO_SENTINEL 5311 }; 5312 /* TLBTR is specific to VMSA */ 5313 ARMCPRegInfo id_tlbtr_reginfo = { 5314 .name = "TLBTR", 5315 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 5316 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, 5317 }; 5318 /* MPUIR is specific to PMSA V6+ */ 5319 ARMCPRegInfo id_mpuir_reginfo = { 5320 .name = "MPUIR", 5321 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5322 .access = PL1_R, .type = ARM_CP_CONST, 5323 .resetvalue = cpu->pmsav7_dregion << 8 5324 }; 5325 ARMCPRegInfo crn0_wi_reginfo = { 5326 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 5327 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 5328 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 5329 }; 5330 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 5331 arm_feature(env, ARM_FEATURE_STRONGARM)) { 5332 ARMCPRegInfo *r; 5333 /* Register the blanket "writes ignored" value first to cover the 5334 * whole space. Then update the specific ID registers to allow write 5335 * access, so that they ignore writes rather than causing them to 5336 * UNDEF. 5337 */ 5338 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 5339 for (r = id_pre_v8_midr_cp_reginfo; 5340 r->type != ARM_CP_SENTINEL; r++) { 5341 r->access = PL1_RW; 5342 } 5343 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 5344 r->access = PL1_RW; 5345 } 5346 id_tlbtr_reginfo.access = PL1_RW; 5347 id_tlbtr_reginfo.access = PL1_RW; 5348 } 5349 if (arm_feature(env, ARM_FEATURE_V8)) { 5350 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 5351 } else { 5352 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 5353 } 5354 define_arm_cp_regs(cpu, id_cp_reginfo); 5355 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 5356 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 5357 } else if (arm_feature(env, ARM_FEATURE_V7)) { 5358 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 5359 } 5360 } 5361 5362 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 5363 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 5364 } 5365 5366 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 5367 ARMCPRegInfo auxcr_reginfo[] = { 5368 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 5369 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 5370 .access = PL1_RW, .type = ARM_CP_CONST, 5371 .resetvalue = cpu->reset_auxcr }, 5372 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 5373 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 5374 .access = PL2_RW, .type = ARM_CP_CONST, 5375 .resetvalue = 0 }, 5376 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 5377 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 5378 .access = PL3_RW, .type = ARM_CP_CONST, 5379 .resetvalue = 0 }, 5380 REGINFO_SENTINEL 5381 }; 5382 define_arm_cp_regs(cpu, auxcr_reginfo); 5383 } 5384 5385 if (arm_feature(env, ARM_FEATURE_CBAR)) { 5386 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5387 /* 32 bit view is [31:18] 0...0 [43:32]. */ 5388 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 5389 | extract64(cpu->reset_cbar, 32, 12); 5390 ARMCPRegInfo cbar_reginfo[] = { 5391 { .name = "CBAR", 5392 .type = ARM_CP_CONST, 5393 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5394 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 5395 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 5396 .type = ARM_CP_CONST, 5397 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 5398 .access = PL1_R, .resetvalue = cbar32 }, 5399 REGINFO_SENTINEL 5400 }; 5401 /* We don't implement a r/w 64 bit CBAR currently */ 5402 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 5403 define_arm_cp_regs(cpu, cbar_reginfo); 5404 } else { 5405 ARMCPRegInfo cbar = { 5406 .name = "CBAR", 5407 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5408 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 5409 .fieldoffset = offsetof(CPUARMState, 5410 cp15.c15_config_base_address) 5411 }; 5412 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 5413 cbar.access = PL1_R; 5414 cbar.fieldoffset = 0; 5415 cbar.type = ARM_CP_CONST; 5416 } 5417 define_one_arm_cp_reg(cpu, &cbar); 5418 } 5419 } 5420 5421 if (arm_feature(env, ARM_FEATURE_VBAR)) { 5422 ARMCPRegInfo vbar_cp_reginfo[] = { 5423 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 5424 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 5425 .access = PL1_RW, .writefn = vbar_write, 5426 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 5427 offsetof(CPUARMState, cp15.vbar_ns) }, 5428 .resetvalue = 0 }, 5429 REGINFO_SENTINEL 5430 }; 5431 define_arm_cp_regs(cpu, vbar_cp_reginfo); 5432 } 5433 5434 /* Generic registers whose values depend on the implementation */ 5435 { 5436 ARMCPRegInfo sctlr = { 5437 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 5438 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5439 .access = PL1_RW, 5440 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 5441 offsetof(CPUARMState, cp15.sctlr_ns) }, 5442 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 5443 .raw_writefn = raw_write, 5444 }; 5445 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5446 /* Normally we would always end the TB on an SCTLR write, but Linux 5447 * arch/arm/mach-pxa/sleep.S expects two instructions following 5448 * an MMU enable to execute from cache. Imitate this behaviour. 5449 */ 5450 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 5451 } 5452 define_one_arm_cp_reg(cpu, &sctlr); 5453 } 5454 5455 if (arm_feature(env, ARM_FEATURE_SVE)) { 5456 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 5457 if (arm_feature(env, ARM_FEATURE_EL2)) { 5458 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 5459 } else { 5460 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 5461 } 5462 if (arm_feature(env, ARM_FEATURE_EL3)) { 5463 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 5464 } 5465 } 5466 } 5467 5468 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 5469 { 5470 CPUState *cs = CPU(cpu); 5471 CPUARMState *env = &cpu->env; 5472 5473 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5474 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 5475 aarch64_fpu_gdb_set_reg, 5476 34, "aarch64-fpu.xml", 0); 5477 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 5478 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5479 51, "arm-neon.xml", 0); 5480 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 5481 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5482 35, "arm-vfp3.xml", 0); 5483 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 5484 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5485 19, "arm-vfp.xml", 0); 5486 } 5487 } 5488 5489 /* Sort alphabetically by type name, except for "any". */ 5490 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 5491 { 5492 ObjectClass *class_a = (ObjectClass *)a; 5493 ObjectClass *class_b = (ObjectClass *)b; 5494 const char *name_a, *name_b; 5495 5496 name_a = object_class_get_name(class_a); 5497 name_b = object_class_get_name(class_b); 5498 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 5499 return 1; 5500 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 5501 return -1; 5502 } else { 5503 return strcmp(name_a, name_b); 5504 } 5505 } 5506 5507 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 5508 { 5509 ObjectClass *oc = data; 5510 CPUListState *s = user_data; 5511 const char *typename; 5512 char *name; 5513 5514 typename = object_class_get_name(oc); 5515 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5516 (*s->cpu_fprintf)(s->file, " %s\n", 5517 name); 5518 g_free(name); 5519 } 5520 5521 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) 5522 { 5523 CPUListState s = { 5524 .file = f, 5525 .cpu_fprintf = cpu_fprintf, 5526 }; 5527 GSList *list; 5528 5529 list = object_class_get_list(TYPE_ARM_CPU, false); 5530 list = g_slist_sort(list, arm_cpu_list_compare); 5531 (*cpu_fprintf)(f, "Available CPUs:\n"); 5532 g_slist_foreach(list, arm_cpu_list_entry, &s); 5533 g_slist_free(list); 5534 #ifdef CONFIG_KVM 5535 /* The 'host' CPU type is dynamically registered only if KVM is 5536 * enabled, so we have to special-case it here: 5537 */ 5538 (*cpu_fprintf)(f, " host (only available in KVM mode)\n"); 5539 #endif 5540 } 5541 5542 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 5543 { 5544 ObjectClass *oc = data; 5545 CpuDefinitionInfoList **cpu_list = user_data; 5546 CpuDefinitionInfoList *entry; 5547 CpuDefinitionInfo *info; 5548 const char *typename; 5549 5550 typename = object_class_get_name(oc); 5551 info = g_malloc0(sizeof(*info)); 5552 info->name = g_strndup(typename, 5553 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5554 info->q_typename = g_strdup(typename); 5555 5556 entry = g_malloc0(sizeof(*entry)); 5557 entry->value = info; 5558 entry->next = *cpu_list; 5559 *cpu_list = entry; 5560 } 5561 5562 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 5563 { 5564 CpuDefinitionInfoList *cpu_list = NULL; 5565 GSList *list; 5566 5567 list = object_class_get_list(TYPE_ARM_CPU, false); 5568 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 5569 g_slist_free(list); 5570 5571 return cpu_list; 5572 } 5573 5574 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 5575 void *opaque, int state, int secstate, 5576 int crm, int opc1, int opc2) 5577 { 5578 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 5579 * add a single reginfo struct to the hash table. 5580 */ 5581 uint32_t *key = g_new(uint32_t, 1); 5582 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 5583 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 5584 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 5585 5586 /* Reset the secure state to the specific incoming state. This is 5587 * necessary as the register may have been defined with both states. 5588 */ 5589 r2->secure = secstate; 5590 5591 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5592 /* Register is banked (using both entries in array). 5593 * Overwriting fieldoffset as the array is only used to define 5594 * banked registers but later only fieldoffset is used. 5595 */ 5596 r2->fieldoffset = r->bank_fieldoffsets[ns]; 5597 } 5598 5599 if (state == ARM_CP_STATE_AA32) { 5600 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5601 /* If the register is banked then we don't need to migrate or 5602 * reset the 32-bit instance in certain cases: 5603 * 5604 * 1) If the register has both 32-bit and 64-bit instances then we 5605 * can count on the 64-bit instance taking care of the 5606 * non-secure bank. 5607 * 2) If ARMv8 is enabled then we can count on a 64-bit version 5608 * taking care of the secure bank. This requires that separate 5609 * 32 and 64-bit definitions are provided. 5610 */ 5611 if ((r->state == ARM_CP_STATE_BOTH && ns) || 5612 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 5613 r2->type |= ARM_CP_ALIAS; 5614 } 5615 } else if ((secstate != r->secure) && !ns) { 5616 /* The register is not banked so we only want to allow migration of 5617 * the non-secure instance. 5618 */ 5619 r2->type |= ARM_CP_ALIAS; 5620 } 5621 5622 if (r->state == ARM_CP_STATE_BOTH) { 5623 /* We assume it is a cp15 register if the .cp field is left unset. 5624 */ 5625 if (r2->cp == 0) { 5626 r2->cp = 15; 5627 } 5628 5629 #ifdef HOST_WORDS_BIGENDIAN 5630 if (r2->fieldoffset) { 5631 r2->fieldoffset += sizeof(uint32_t); 5632 } 5633 #endif 5634 } 5635 } 5636 if (state == ARM_CP_STATE_AA64) { 5637 /* To allow abbreviation of ARMCPRegInfo 5638 * definitions, we treat cp == 0 as equivalent to 5639 * the value for "standard guest-visible sysreg". 5640 * STATE_BOTH definitions are also always "standard 5641 * sysreg" in their AArch64 view (the .cp value may 5642 * be non-zero for the benefit of the AArch32 view). 5643 */ 5644 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 5645 r2->cp = CP_REG_ARM64_SYSREG_CP; 5646 } 5647 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 5648 r2->opc0, opc1, opc2); 5649 } else { 5650 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 5651 } 5652 if (opaque) { 5653 r2->opaque = opaque; 5654 } 5655 /* reginfo passed to helpers is correct for the actual access, 5656 * and is never ARM_CP_STATE_BOTH: 5657 */ 5658 r2->state = state; 5659 /* Make sure reginfo passed to helpers for wildcarded regs 5660 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 5661 */ 5662 r2->crm = crm; 5663 r2->opc1 = opc1; 5664 r2->opc2 = opc2; 5665 /* By convention, for wildcarded registers only the first 5666 * entry is used for migration; the others are marked as 5667 * ALIAS so we don't try to transfer the register 5668 * multiple times. Special registers (ie NOP/WFI) are 5669 * never migratable and not even raw-accessible. 5670 */ 5671 if ((r->type & ARM_CP_SPECIAL)) { 5672 r2->type |= ARM_CP_NO_RAW; 5673 } 5674 if (((r->crm == CP_ANY) && crm != 0) || 5675 ((r->opc1 == CP_ANY) && opc1 != 0) || 5676 ((r->opc2 == CP_ANY) && opc2 != 0)) { 5677 r2->type |= ARM_CP_ALIAS; 5678 } 5679 5680 /* Check that raw accesses are either forbidden or handled. Note that 5681 * we can't assert this earlier because the setup of fieldoffset for 5682 * banked registers has to be done first. 5683 */ 5684 if (!(r2->type & ARM_CP_NO_RAW)) { 5685 assert(!raw_accessors_invalid(r2)); 5686 } 5687 5688 /* Overriding of an existing definition must be explicitly 5689 * requested. 5690 */ 5691 if (!(r->type & ARM_CP_OVERRIDE)) { 5692 ARMCPRegInfo *oldreg; 5693 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 5694 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 5695 fprintf(stderr, "Register redefined: cp=%d %d bit " 5696 "crn=%d crm=%d opc1=%d opc2=%d, " 5697 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 5698 r2->crn, r2->crm, r2->opc1, r2->opc2, 5699 oldreg->name, r2->name); 5700 g_assert_not_reached(); 5701 } 5702 } 5703 g_hash_table_insert(cpu->cp_regs, key, r2); 5704 } 5705 5706 5707 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 5708 const ARMCPRegInfo *r, void *opaque) 5709 { 5710 /* Define implementations of coprocessor registers. 5711 * We store these in a hashtable because typically 5712 * there are less than 150 registers in a space which 5713 * is 16*16*16*8*8 = 262144 in size. 5714 * Wildcarding is supported for the crm, opc1 and opc2 fields. 5715 * If a register is defined twice then the second definition is 5716 * used, so this can be used to define some generic registers and 5717 * then override them with implementation specific variations. 5718 * At least one of the original and the second definition should 5719 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 5720 * against accidental use. 5721 * 5722 * The state field defines whether the register is to be 5723 * visible in the AArch32 or AArch64 execution state. If the 5724 * state is set to ARM_CP_STATE_BOTH then we synthesise a 5725 * reginfo structure for the AArch32 view, which sees the lower 5726 * 32 bits of the 64 bit register. 5727 * 5728 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 5729 * be wildcarded. AArch64 registers are always considered to be 64 5730 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 5731 * the register, if any. 5732 */ 5733 int crm, opc1, opc2, state; 5734 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 5735 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 5736 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 5737 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 5738 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 5739 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 5740 /* 64 bit registers have only CRm and Opc1 fields */ 5741 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 5742 /* op0 only exists in the AArch64 encodings */ 5743 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 5744 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 5745 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 5746 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 5747 * encodes a minimum access level for the register. We roll this 5748 * runtime check into our general permission check code, so check 5749 * here that the reginfo's specified permissions are strict enough 5750 * to encompass the generic architectural permission check. 5751 */ 5752 if (r->state != ARM_CP_STATE_AA32) { 5753 int mask = 0; 5754 switch (r->opc1) { 5755 case 0: case 1: case 2: 5756 /* min_EL EL1 */ 5757 mask = PL1_RW; 5758 break; 5759 case 3: 5760 /* min_EL EL0 */ 5761 mask = PL0_RW; 5762 break; 5763 case 4: 5764 /* min_EL EL2 */ 5765 mask = PL2_RW; 5766 break; 5767 case 5: 5768 /* unallocated encoding, so not possible */ 5769 assert(false); 5770 break; 5771 case 6: 5772 /* min_EL EL3 */ 5773 mask = PL3_RW; 5774 break; 5775 case 7: 5776 /* min_EL EL1, secure mode only (we don't check the latter) */ 5777 mask = PL1_RW; 5778 break; 5779 default: 5780 /* broken reginfo with out-of-range opc1 */ 5781 assert(false); 5782 break; 5783 } 5784 /* assert our permissions are not too lax (stricter is fine) */ 5785 assert((r->access & ~mask) == 0); 5786 } 5787 5788 /* Check that the register definition has enough info to handle 5789 * reads and writes if they are permitted. 5790 */ 5791 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 5792 if (r->access & PL3_R) { 5793 assert((r->fieldoffset || 5794 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5795 r->readfn); 5796 } 5797 if (r->access & PL3_W) { 5798 assert((r->fieldoffset || 5799 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5800 r->writefn); 5801 } 5802 } 5803 /* Bad type field probably means missing sentinel at end of reg list */ 5804 assert(cptype_valid(r->type)); 5805 for (crm = crmmin; crm <= crmmax; crm++) { 5806 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 5807 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 5808 for (state = ARM_CP_STATE_AA32; 5809 state <= ARM_CP_STATE_AA64; state++) { 5810 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 5811 continue; 5812 } 5813 if (state == ARM_CP_STATE_AA32) { 5814 /* Under AArch32 CP registers can be common 5815 * (same for secure and non-secure world) or banked. 5816 */ 5817 switch (r->secure) { 5818 case ARM_CP_SECSTATE_S: 5819 case ARM_CP_SECSTATE_NS: 5820 add_cpreg_to_hashtable(cpu, r, opaque, state, 5821 r->secure, crm, opc1, opc2); 5822 break; 5823 default: 5824 add_cpreg_to_hashtable(cpu, r, opaque, state, 5825 ARM_CP_SECSTATE_S, 5826 crm, opc1, opc2); 5827 add_cpreg_to_hashtable(cpu, r, opaque, state, 5828 ARM_CP_SECSTATE_NS, 5829 crm, opc1, opc2); 5830 break; 5831 } 5832 } else { 5833 /* AArch64 registers get mapped to non-secure instance 5834 * of AArch32 */ 5835 add_cpreg_to_hashtable(cpu, r, opaque, state, 5836 ARM_CP_SECSTATE_NS, 5837 crm, opc1, opc2); 5838 } 5839 } 5840 } 5841 } 5842 } 5843 } 5844 5845 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 5846 const ARMCPRegInfo *regs, void *opaque) 5847 { 5848 /* Define a whole list of registers */ 5849 const ARMCPRegInfo *r; 5850 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 5851 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 5852 } 5853 } 5854 5855 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 5856 { 5857 return g_hash_table_lookup(cpregs, &encoded_cp); 5858 } 5859 5860 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 5861 uint64_t value) 5862 { 5863 /* Helper coprocessor write function for write-ignore registers */ 5864 } 5865 5866 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 5867 { 5868 /* Helper coprocessor write function for read-as-zero registers */ 5869 return 0; 5870 } 5871 5872 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 5873 { 5874 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 5875 } 5876 5877 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 5878 { 5879 /* Return true if it is not valid for us to switch to 5880 * this CPU mode (ie all the UNPREDICTABLE cases in 5881 * the ARM ARM CPSRWriteByInstr pseudocode). 5882 */ 5883 5884 /* Changes to or from Hyp via MSR and CPS are illegal. */ 5885 if (write_type == CPSRWriteByInstr && 5886 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 5887 mode == ARM_CPU_MODE_HYP)) { 5888 return 1; 5889 } 5890 5891 switch (mode) { 5892 case ARM_CPU_MODE_USR: 5893 return 0; 5894 case ARM_CPU_MODE_SYS: 5895 case ARM_CPU_MODE_SVC: 5896 case ARM_CPU_MODE_ABT: 5897 case ARM_CPU_MODE_UND: 5898 case ARM_CPU_MODE_IRQ: 5899 case ARM_CPU_MODE_FIQ: 5900 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 5901 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 5902 */ 5903 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 5904 * and CPS are treated as illegal mode changes. 5905 */ 5906 if (write_type == CPSRWriteByInstr && 5907 (env->cp15.hcr_el2 & HCR_TGE) && 5908 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 5909 !arm_is_secure_below_el3(env)) { 5910 return 1; 5911 } 5912 return 0; 5913 case ARM_CPU_MODE_HYP: 5914 return !arm_feature(env, ARM_FEATURE_EL2) 5915 || arm_current_el(env) < 2 || arm_is_secure(env); 5916 case ARM_CPU_MODE_MON: 5917 return arm_current_el(env) < 3; 5918 default: 5919 return 1; 5920 } 5921 } 5922 5923 uint32_t cpsr_read(CPUARMState *env) 5924 { 5925 int ZF; 5926 ZF = (env->ZF == 0); 5927 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 5928 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 5929 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 5930 | ((env->condexec_bits & 0xfc) << 8) 5931 | (env->GE << 16) | (env->daif & CPSR_AIF); 5932 } 5933 5934 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 5935 CPSRWriteType write_type) 5936 { 5937 uint32_t changed_daif; 5938 5939 if (mask & CPSR_NZCV) { 5940 env->ZF = (~val) & CPSR_Z; 5941 env->NF = val; 5942 env->CF = (val >> 29) & 1; 5943 env->VF = (val << 3) & 0x80000000; 5944 } 5945 if (mask & CPSR_Q) 5946 env->QF = ((val & CPSR_Q) != 0); 5947 if (mask & CPSR_T) 5948 env->thumb = ((val & CPSR_T) != 0); 5949 if (mask & CPSR_IT_0_1) { 5950 env->condexec_bits &= ~3; 5951 env->condexec_bits |= (val >> 25) & 3; 5952 } 5953 if (mask & CPSR_IT_2_7) { 5954 env->condexec_bits &= 3; 5955 env->condexec_bits |= (val >> 8) & 0xfc; 5956 } 5957 if (mask & CPSR_GE) { 5958 env->GE = (val >> 16) & 0xf; 5959 } 5960 5961 /* In a V7 implementation that includes the security extensions but does 5962 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 5963 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 5964 * bits respectively. 5965 * 5966 * In a V8 implementation, it is permitted for privileged software to 5967 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 5968 */ 5969 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 5970 arm_feature(env, ARM_FEATURE_EL3) && 5971 !arm_feature(env, ARM_FEATURE_EL2) && 5972 !arm_is_secure(env)) { 5973 5974 changed_daif = (env->daif ^ val) & mask; 5975 5976 if (changed_daif & CPSR_A) { 5977 /* Check to see if we are allowed to change the masking of async 5978 * abort exceptions from a non-secure state. 5979 */ 5980 if (!(env->cp15.scr_el3 & SCR_AW)) { 5981 qemu_log_mask(LOG_GUEST_ERROR, 5982 "Ignoring attempt to switch CPSR_A flag from " 5983 "non-secure world with SCR.AW bit clear\n"); 5984 mask &= ~CPSR_A; 5985 } 5986 } 5987 5988 if (changed_daif & CPSR_F) { 5989 /* Check to see if we are allowed to change the masking of FIQ 5990 * exceptions from a non-secure state. 5991 */ 5992 if (!(env->cp15.scr_el3 & SCR_FW)) { 5993 qemu_log_mask(LOG_GUEST_ERROR, 5994 "Ignoring attempt to switch CPSR_F flag from " 5995 "non-secure world with SCR.FW bit clear\n"); 5996 mask &= ~CPSR_F; 5997 } 5998 5999 /* Check whether non-maskable FIQ (NMFI) support is enabled. 6000 * If this bit is set software is not allowed to mask 6001 * FIQs, but is allowed to set CPSR_F to 0. 6002 */ 6003 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 6004 (val & CPSR_F)) { 6005 qemu_log_mask(LOG_GUEST_ERROR, 6006 "Ignoring attempt to enable CPSR_F flag " 6007 "(non-maskable FIQ [NMFI] support enabled)\n"); 6008 mask &= ~CPSR_F; 6009 } 6010 } 6011 } 6012 6013 env->daif &= ~(CPSR_AIF & mask); 6014 env->daif |= val & CPSR_AIF & mask; 6015 6016 if (write_type != CPSRWriteRaw && 6017 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 6018 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 6019 /* Note that we can only get here in USR mode if this is a 6020 * gdb stub write; for this case we follow the architectural 6021 * behaviour for guest writes in USR mode of ignoring an attempt 6022 * to switch mode. (Those are caught by translate.c for writes 6023 * triggered by guest instructions.) 6024 */ 6025 mask &= ~CPSR_M; 6026 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 6027 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 6028 * v7, and has defined behaviour in v8: 6029 * + leave CPSR.M untouched 6030 * + allow changes to the other CPSR fields 6031 * + set PSTATE.IL 6032 * For user changes via the GDB stub, we don't set PSTATE.IL, 6033 * as this would be unnecessarily harsh for a user error. 6034 */ 6035 mask &= ~CPSR_M; 6036 if (write_type != CPSRWriteByGDBStub && 6037 arm_feature(env, ARM_FEATURE_V8)) { 6038 mask |= CPSR_IL; 6039 val |= CPSR_IL; 6040 } 6041 } else { 6042 switch_mode(env, val & CPSR_M); 6043 } 6044 } 6045 mask &= ~CACHED_CPSR_BITS; 6046 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 6047 } 6048 6049 /* Sign/zero extend */ 6050 uint32_t HELPER(sxtb16)(uint32_t x) 6051 { 6052 uint32_t res; 6053 res = (uint16_t)(int8_t)x; 6054 res |= (uint32_t)(int8_t)(x >> 16) << 16; 6055 return res; 6056 } 6057 6058 uint32_t HELPER(uxtb16)(uint32_t x) 6059 { 6060 uint32_t res; 6061 res = (uint16_t)(uint8_t)x; 6062 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 6063 return res; 6064 } 6065 6066 int32_t HELPER(sdiv)(int32_t num, int32_t den) 6067 { 6068 if (den == 0) 6069 return 0; 6070 if (num == INT_MIN && den == -1) 6071 return INT_MIN; 6072 return num / den; 6073 } 6074 6075 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 6076 { 6077 if (den == 0) 6078 return 0; 6079 return num / den; 6080 } 6081 6082 uint32_t HELPER(rbit)(uint32_t x) 6083 { 6084 return revbit32(x); 6085 } 6086 6087 #if defined(CONFIG_USER_ONLY) 6088 6089 /* These should probably raise undefined insn exceptions. */ 6090 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 6091 { 6092 ARMCPU *cpu = arm_env_get_cpu(env); 6093 6094 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); 6095 } 6096 6097 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 6098 { 6099 ARMCPU *cpu = arm_env_get_cpu(env); 6100 6101 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); 6102 return 0; 6103 } 6104 6105 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6106 { 6107 /* translate.c should never generate calls here in user-only mode */ 6108 g_assert_not_reached(); 6109 } 6110 6111 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6112 { 6113 /* translate.c should never generate calls here in user-only mode */ 6114 g_assert_not_reached(); 6115 } 6116 6117 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 6118 { 6119 /* The TT instructions can be used by unprivileged code, but in 6120 * user-only emulation we don't have the MPU. 6121 * Luckily since we know we are NonSecure unprivileged (and that in 6122 * turn means that the A flag wasn't specified), all the bits in the 6123 * register must be zero: 6124 * IREGION: 0 because IRVALID is 0 6125 * IRVALID: 0 because NS 6126 * S: 0 because NS 6127 * NSRW: 0 because NS 6128 * NSR: 0 because NS 6129 * RW: 0 because unpriv and A flag not set 6130 * R: 0 because unpriv and A flag not set 6131 * SRVALID: 0 because NS 6132 * MRVALID: 0 because unpriv and A flag not set 6133 * SREGION: 0 becaus SRVALID is 0 6134 * MREGION: 0 because MRVALID is 0 6135 */ 6136 return 0; 6137 } 6138 6139 void switch_mode(CPUARMState *env, int mode) 6140 { 6141 ARMCPU *cpu = arm_env_get_cpu(env); 6142 6143 if (mode != ARM_CPU_MODE_USR) { 6144 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 6145 } 6146 } 6147 6148 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6149 uint32_t cur_el, bool secure) 6150 { 6151 return 1; 6152 } 6153 6154 void aarch64_sync_64_to_32(CPUARMState *env) 6155 { 6156 g_assert_not_reached(); 6157 } 6158 6159 #else 6160 6161 void switch_mode(CPUARMState *env, int mode) 6162 { 6163 int old_mode; 6164 int i; 6165 6166 old_mode = env->uncached_cpsr & CPSR_M; 6167 if (mode == old_mode) 6168 return; 6169 6170 if (old_mode == ARM_CPU_MODE_FIQ) { 6171 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6172 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 6173 } else if (mode == ARM_CPU_MODE_FIQ) { 6174 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6175 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 6176 } 6177 6178 i = bank_number(old_mode); 6179 env->banked_r13[i] = env->regs[13]; 6180 env->banked_r14[i] = env->regs[14]; 6181 env->banked_spsr[i] = env->spsr; 6182 6183 i = bank_number(mode); 6184 env->regs[13] = env->banked_r13[i]; 6185 env->regs[14] = env->banked_r14[i]; 6186 env->spsr = env->banked_spsr[i]; 6187 } 6188 6189 /* Physical Interrupt Target EL Lookup Table 6190 * 6191 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 6192 * 6193 * The below multi-dimensional table is used for looking up the target 6194 * exception level given numerous condition criteria. Specifically, the 6195 * target EL is based on SCR and HCR routing controls as well as the 6196 * currently executing EL and secure state. 6197 * 6198 * Dimensions: 6199 * target_el_table[2][2][2][2][2][4] 6200 * | | | | | +--- Current EL 6201 * | | | | +------ Non-secure(0)/Secure(1) 6202 * | | | +--------- HCR mask override 6203 * | | +------------ SCR exec state control 6204 * | +--------------- SCR mask override 6205 * +------------------ 32-bit(0)/64-bit(1) EL3 6206 * 6207 * The table values are as such: 6208 * 0-3 = EL0-EL3 6209 * -1 = Cannot occur 6210 * 6211 * The ARM ARM target EL table includes entries indicating that an "exception 6212 * is not taken". The two cases where this is applicable are: 6213 * 1) An exception is taken from EL3 but the SCR does not have the exception 6214 * routed to EL3. 6215 * 2) An exception is taken from EL2 but the HCR does not have the exception 6216 * routed to EL2. 6217 * In these two cases, the below table contain a target of EL1. This value is 6218 * returned as it is expected that the consumer of the table data will check 6219 * for "target EL >= current EL" to ensure the exception is not taken. 6220 * 6221 * SCR HCR 6222 * 64 EA AMO From 6223 * BIT IRQ IMO Non-secure Secure 6224 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 6225 */ 6226 static const int8_t target_el_table[2][2][2][2][2][4] = { 6227 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6228 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 6229 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6230 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 6231 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6232 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 6233 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6234 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 6235 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 6236 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 6237 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 6238 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 6239 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6240 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 6241 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6242 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 6243 }; 6244 6245 /* 6246 * Determine the target EL for physical exceptions 6247 */ 6248 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6249 uint32_t cur_el, bool secure) 6250 { 6251 CPUARMState *env = cs->env_ptr; 6252 int rw; 6253 int scr; 6254 int hcr; 6255 int target_el; 6256 /* Is the highest EL AArch64? */ 6257 int is64 = arm_feature(env, ARM_FEATURE_AARCH64); 6258 6259 if (arm_feature(env, ARM_FEATURE_EL3)) { 6260 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 6261 } else { 6262 /* Either EL2 is the highest EL (and so the EL2 register width 6263 * is given by is64); or there is no EL2 or EL3, in which case 6264 * the value of 'rw' does not affect the table lookup anyway. 6265 */ 6266 rw = is64; 6267 } 6268 6269 switch (excp_idx) { 6270 case EXCP_IRQ: 6271 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 6272 hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO); 6273 break; 6274 case EXCP_FIQ: 6275 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 6276 hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO); 6277 break; 6278 default: 6279 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 6280 hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO); 6281 break; 6282 }; 6283 6284 /* If HCR.TGE is set then HCR is treated as being 1 */ 6285 hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE); 6286 6287 /* Perform a table-lookup for the target EL given the current state */ 6288 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 6289 6290 assert(target_el > 0); 6291 6292 return target_el; 6293 } 6294 6295 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, 6296 ARMMMUIdx mmu_idx, bool ignfault) 6297 { 6298 CPUState *cs = CPU(cpu); 6299 CPUARMState *env = &cpu->env; 6300 MemTxAttrs attrs = {}; 6301 MemTxResult txres; 6302 target_ulong page_size; 6303 hwaddr physaddr; 6304 int prot; 6305 ARMMMUFaultInfo fi; 6306 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6307 int exc; 6308 bool exc_secure; 6309 6310 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, 6311 &attrs, &prot, &page_size, &fi, NULL)) { 6312 /* MPU/SAU lookup failed */ 6313 if (fi.type == ARMFault_QEMU_SFault) { 6314 qemu_log_mask(CPU_LOG_INT, 6315 "...SecureFault with SFSR.AUVIOL during stacking\n"); 6316 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6317 env->v7m.sfar = addr; 6318 exc = ARMV7M_EXCP_SECURE; 6319 exc_secure = false; 6320 } else { 6321 qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n"); 6322 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; 6323 exc = ARMV7M_EXCP_MEM; 6324 exc_secure = secure; 6325 } 6326 goto pend_fault; 6327 } 6328 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value, 6329 attrs, &txres); 6330 if (txres != MEMTX_OK) { 6331 /* BusFault trying to write the data */ 6332 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); 6333 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; 6334 exc = ARMV7M_EXCP_BUS; 6335 exc_secure = false; 6336 goto pend_fault; 6337 } 6338 return true; 6339 6340 pend_fault: 6341 /* By pending the exception at this point we are making 6342 * the IMPDEF choice "overridden exceptions pended" (see the 6343 * MergeExcInfo() pseudocode). The other choice would be to not 6344 * pend them now and then make a choice about which to throw away 6345 * later if we have two derived exceptions. 6346 * The only case when we must not pend the exception but instead 6347 * throw it away is if we are doing the push of the callee registers 6348 * and we've already generated a derived exception. Even in this 6349 * case we will still update the fault status registers. 6350 */ 6351 if (!ignfault) { 6352 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); 6353 } 6354 return false; 6355 } 6356 6357 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, 6358 ARMMMUIdx mmu_idx) 6359 { 6360 CPUState *cs = CPU(cpu); 6361 CPUARMState *env = &cpu->env; 6362 MemTxAttrs attrs = {}; 6363 MemTxResult txres; 6364 target_ulong page_size; 6365 hwaddr physaddr; 6366 int prot; 6367 ARMMMUFaultInfo fi; 6368 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6369 int exc; 6370 bool exc_secure; 6371 uint32_t value; 6372 6373 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, 6374 &attrs, &prot, &page_size, &fi, NULL)) { 6375 /* MPU/SAU lookup failed */ 6376 if (fi.type == ARMFault_QEMU_SFault) { 6377 qemu_log_mask(CPU_LOG_INT, 6378 "...SecureFault with SFSR.AUVIOL during unstack\n"); 6379 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6380 env->v7m.sfar = addr; 6381 exc = ARMV7M_EXCP_SECURE; 6382 exc_secure = false; 6383 } else { 6384 qemu_log_mask(CPU_LOG_INT, 6385 "...MemManageFault with CFSR.MUNSTKERR\n"); 6386 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; 6387 exc = ARMV7M_EXCP_MEM; 6388 exc_secure = secure; 6389 } 6390 goto pend_fault; 6391 } 6392 6393 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, 6394 attrs, &txres); 6395 if (txres != MEMTX_OK) { 6396 /* BusFault trying to read the data */ 6397 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); 6398 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; 6399 exc = ARMV7M_EXCP_BUS; 6400 exc_secure = false; 6401 goto pend_fault; 6402 } 6403 6404 *dest = value; 6405 return true; 6406 6407 pend_fault: 6408 /* By pending the exception at this point we are making 6409 * the IMPDEF choice "overridden exceptions pended" (see the 6410 * MergeExcInfo() pseudocode). The other choice would be to not 6411 * pend them now and then make a choice about which to throw away 6412 * later if we have two derived exceptions. 6413 */ 6414 armv7m_nvic_set_pending(env->nvic, exc, exc_secure); 6415 return false; 6416 } 6417 6418 /* Return true if we're using the process stack pointer (not the MSP) */ 6419 static bool v7m_using_psp(CPUARMState *env) 6420 { 6421 /* Handler mode always uses the main stack; for thread mode 6422 * the CONTROL.SPSEL bit determines the answer. 6423 * Note that in v7M it is not possible to be in Handler mode with 6424 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 6425 */ 6426 return !arm_v7m_is_handler_mode(env) && 6427 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 6428 } 6429 6430 /* Write to v7M CONTROL.SPSEL bit for the specified security bank. 6431 * This may change the current stack pointer between Main and Process 6432 * stack pointers if it is done for the CONTROL register for the current 6433 * security state. 6434 */ 6435 static void write_v7m_control_spsel_for_secstate(CPUARMState *env, 6436 bool new_spsel, 6437 bool secstate) 6438 { 6439 bool old_is_psp = v7m_using_psp(env); 6440 6441 env->v7m.control[secstate] = 6442 deposit32(env->v7m.control[secstate], 6443 R_V7M_CONTROL_SPSEL_SHIFT, 6444 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); 6445 6446 if (secstate == env->v7m.secure) { 6447 bool new_is_psp = v7m_using_psp(env); 6448 uint32_t tmp; 6449 6450 if (old_is_psp != new_is_psp) { 6451 tmp = env->v7m.other_sp; 6452 env->v7m.other_sp = env->regs[13]; 6453 env->regs[13] = tmp; 6454 } 6455 } 6456 } 6457 6458 /* Write to v7M CONTROL.SPSEL bit. This may change the current 6459 * stack pointer between Main and Process stack pointers. 6460 */ 6461 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) 6462 { 6463 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); 6464 } 6465 6466 void write_v7m_exception(CPUARMState *env, uint32_t new_exc) 6467 { 6468 /* Write a new value to v7m.exception, thus transitioning into or out 6469 * of Handler mode; this may result in a change of active stack pointer. 6470 */ 6471 bool new_is_psp, old_is_psp = v7m_using_psp(env); 6472 uint32_t tmp; 6473 6474 env->v7m.exception = new_exc; 6475 6476 new_is_psp = v7m_using_psp(env); 6477 6478 if (old_is_psp != new_is_psp) { 6479 tmp = env->v7m.other_sp; 6480 env->v7m.other_sp = env->regs[13]; 6481 env->regs[13] = tmp; 6482 } 6483 } 6484 6485 /* Switch M profile security state between NS and S */ 6486 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate) 6487 { 6488 uint32_t new_ss_msp, new_ss_psp; 6489 6490 if (env->v7m.secure == new_secstate) { 6491 return; 6492 } 6493 6494 /* All the banked state is accessed by looking at env->v7m.secure 6495 * except for the stack pointer; rearrange the SP appropriately. 6496 */ 6497 new_ss_msp = env->v7m.other_ss_msp; 6498 new_ss_psp = env->v7m.other_ss_psp; 6499 6500 if (v7m_using_psp(env)) { 6501 env->v7m.other_ss_psp = env->regs[13]; 6502 env->v7m.other_ss_msp = env->v7m.other_sp; 6503 } else { 6504 env->v7m.other_ss_msp = env->regs[13]; 6505 env->v7m.other_ss_psp = env->v7m.other_sp; 6506 } 6507 6508 env->v7m.secure = new_secstate; 6509 6510 if (v7m_using_psp(env)) { 6511 env->regs[13] = new_ss_psp; 6512 env->v7m.other_sp = new_ss_msp; 6513 } else { 6514 env->regs[13] = new_ss_msp; 6515 env->v7m.other_sp = new_ss_psp; 6516 } 6517 } 6518 6519 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6520 { 6521 /* Handle v7M BXNS: 6522 * - if the return value is a magic value, do exception return (like BX) 6523 * - otherwise bit 0 of the return value is the target security state 6524 */ 6525 uint32_t min_magic; 6526 6527 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6528 /* Covers FNC_RETURN and EXC_RETURN magic */ 6529 min_magic = FNC_RETURN_MIN_MAGIC; 6530 } else { 6531 /* EXC_RETURN magic only */ 6532 min_magic = EXC_RETURN_MIN_MAGIC; 6533 } 6534 6535 if (dest >= min_magic) { 6536 /* This is an exception return magic value; put it where 6537 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. 6538 * Note that if we ever add gen_ss_advance() singlestep support to 6539 * M profile this should count as an "instruction execution complete" 6540 * event (compare gen_bx_excret_final_code()). 6541 */ 6542 env->regs[15] = dest & ~1; 6543 env->thumb = dest & 1; 6544 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT); 6545 /* notreached */ 6546 } 6547 6548 /* translate.c should have made BXNS UNDEF unless we're secure */ 6549 assert(env->v7m.secure); 6550 6551 switch_v7m_security_state(env, dest & 1); 6552 env->thumb = 1; 6553 env->regs[15] = dest & ~1; 6554 } 6555 6556 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6557 { 6558 /* Handle v7M BLXNS: 6559 * - bit 0 of the destination address is the target security state 6560 */ 6561 6562 /* At this point regs[15] is the address just after the BLXNS */ 6563 uint32_t nextinst = env->regs[15] | 1; 6564 uint32_t sp = env->regs[13] - 8; 6565 uint32_t saved_psr; 6566 6567 /* translate.c will have made BLXNS UNDEF unless we're secure */ 6568 assert(env->v7m.secure); 6569 6570 if (dest & 1) { 6571 /* target is Secure, so this is just a normal BLX, 6572 * except that the low bit doesn't indicate Thumb/not. 6573 */ 6574 env->regs[14] = nextinst; 6575 env->thumb = 1; 6576 env->regs[15] = dest & ~1; 6577 return; 6578 } 6579 6580 /* Target is non-secure: first push a stack frame */ 6581 if (!QEMU_IS_ALIGNED(sp, 8)) { 6582 qemu_log_mask(LOG_GUEST_ERROR, 6583 "BLXNS with misaligned SP is UNPREDICTABLE\n"); 6584 } 6585 6586 saved_psr = env->v7m.exception; 6587 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { 6588 saved_psr |= XPSR_SFPA; 6589 } 6590 6591 /* Note that these stores can throw exceptions on MPU faults */ 6592 cpu_stl_data(env, sp, nextinst); 6593 cpu_stl_data(env, sp + 4, saved_psr); 6594 6595 env->regs[13] = sp; 6596 env->regs[14] = 0xfeffffff; 6597 if (arm_v7m_is_handler_mode(env)) { 6598 /* Write a dummy value to IPSR, to avoid leaking the current secure 6599 * exception number to non-secure code. This is guaranteed not 6600 * to cause write_v7m_exception() to actually change stacks. 6601 */ 6602 write_v7m_exception(env, 1); 6603 } 6604 switch_v7m_security_state(env, 0); 6605 env->thumb = 1; 6606 env->regs[15] = dest; 6607 } 6608 6609 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, 6610 bool spsel) 6611 { 6612 /* Return a pointer to the location where we currently store the 6613 * stack pointer for the requested security state and thread mode. 6614 * This pointer will become invalid if the CPU state is updated 6615 * such that the stack pointers are switched around (eg changing 6616 * the SPSEL control bit). 6617 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). 6618 * Unlike that pseudocode, we require the caller to pass us in the 6619 * SPSEL control bit value; this is because we also use this 6620 * function in handling of pushing of the callee-saves registers 6621 * part of the v8M stack frame (pseudocode PushCalleeStack()), 6622 * and in the tailchain codepath the SPSEL bit comes from the exception 6623 * return magic LR value from the previous exception. The pseudocode 6624 * opencodes the stack-selection in PushCalleeStack(), but we prefer 6625 * to make this utility function generic enough to do the job. 6626 */ 6627 bool want_psp = threadmode && spsel; 6628 6629 if (secure == env->v7m.secure) { 6630 if (want_psp == v7m_using_psp(env)) { 6631 return &env->regs[13]; 6632 } else { 6633 return &env->v7m.other_sp; 6634 } 6635 } else { 6636 if (want_psp) { 6637 return &env->v7m.other_ss_psp; 6638 } else { 6639 return &env->v7m.other_ss_msp; 6640 } 6641 } 6642 } 6643 6644 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, 6645 uint32_t *pvec) 6646 { 6647 CPUState *cs = CPU(cpu); 6648 CPUARMState *env = &cpu->env; 6649 MemTxResult result; 6650 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; 6651 uint32_t vector_entry; 6652 MemTxAttrs attrs = {}; 6653 ARMMMUIdx mmu_idx; 6654 bool exc_secure; 6655 6656 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); 6657 6658 /* We don't do a get_phys_addr() here because the rules for vector 6659 * loads are special: they always use the default memory map, and 6660 * the default memory map permits reads from all addresses. 6661 * Since there's no easy way to pass through to pmsav8_mpu_lookup() 6662 * that we want this special case which would always say "yes", 6663 * we just do the SAU lookup here followed by a direct physical load. 6664 */ 6665 attrs.secure = targets_secure; 6666 attrs.user = false; 6667 6668 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6669 V8M_SAttributes sattrs = {}; 6670 6671 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 6672 if (sattrs.ns) { 6673 attrs.secure = false; 6674 } else if (!targets_secure) { 6675 /* NS access to S memory */ 6676 goto load_fail; 6677 } 6678 } 6679 6680 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr, 6681 attrs, &result); 6682 if (result != MEMTX_OK) { 6683 goto load_fail; 6684 } 6685 *pvec = vector_entry; 6686 return true; 6687 6688 load_fail: 6689 /* All vector table fetch fails are reported as HardFault, with 6690 * HFSR.VECTTBL and .FORCED set. (FORCED is set because 6691 * technically the underlying exception is a MemManage or BusFault 6692 * that is escalated to HardFault.) This is a terminal exception, 6693 * so we will either take the HardFault immediately or else enter 6694 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). 6695 */ 6696 exc_secure = targets_secure || 6697 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); 6698 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; 6699 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); 6700 return false; 6701 } 6702 6703 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6704 bool ignore_faults) 6705 { 6706 /* For v8M, push the callee-saves register part of the stack frame. 6707 * Compare the v8M pseudocode PushCalleeStack(). 6708 * In the tailchaining case this may not be the current stack. 6709 */ 6710 CPUARMState *env = &cpu->env; 6711 uint32_t *frame_sp_p; 6712 uint32_t frameptr; 6713 ARMMMUIdx mmu_idx; 6714 bool stacked_ok; 6715 6716 if (dotailchain) { 6717 bool mode = lr & R_V7M_EXCRET_MODE_MASK; 6718 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || 6719 !mode; 6720 6721 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); 6722 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, 6723 lr & R_V7M_EXCRET_SPSEL_MASK); 6724 } else { 6725 mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6726 frame_sp_p = &env->regs[13]; 6727 } 6728 6729 frameptr = *frame_sp_p - 0x28; 6730 6731 /* Write as much of the stack frame as we can. A write failure may 6732 * cause us to pend a derived exception. 6733 */ 6734 stacked_ok = 6735 v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) && 6736 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, 6737 ignore_faults) && 6738 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, 6739 ignore_faults) && 6740 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, 6741 ignore_faults) && 6742 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, 6743 ignore_faults) && 6744 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, 6745 ignore_faults) && 6746 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, 6747 ignore_faults) && 6748 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, 6749 ignore_faults) && 6750 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, 6751 ignore_faults); 6752 6753 /* Update SP regardless of whether any of the stack accesses failed. 6754 * When we implement v8M stack limit checking then this attempt to 6755 * update SP might also fail and result in a derived exception. 6756 */ 6757 *frame_sp_p = frameptr; 6758 6759 return !stacked_ok; 6760 } 6761 6762 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6763 bool ignore_stackfaults) 6764 { 6765 /* Do the "take the exception" parts of exception entry, 6766 * but not the pushing of state to the stack. This is 6767 * similar to the pseudocode ExceptionTaken() function. 6768 */ 6769 CPUARMState *env = &cpu->env; 6770 uint32_t addr; 6771 bool targets_secure; 6772 int exc; 6773 bool push_failed = false; 6774 6775 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); 6776 6777 if (arm_feature(env, ARM_FEATURE_V8)) { 6778 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 6779 (lr & R_V7M_EXCRET_S_MASK)) { 6780 /* The background code (the owner of the registers in the 6781 * exception frame) is Secure. This means it may either already 6782 * have or now needs to push callee-saves registers. 6783 */ 6784 if (targets_secure) { 6785 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { 6786 /* We took an exception from Secure to NonSecure 6787 * (which means the callee-saved registers got stacked) 6788 * and are now tailchaining to a Secure exception. 6789 * Clear DCRS so eventual return from this Secure 6790 * exception unstacks the callee-saved registers. 6791 */ 6792 lr &= ~R_V7M_EXCRET_DCRS_MASK; 6793 } 6794 } else { 6795 /* We're going to a non-secure exception; push the 6796 * callee-saves registers to the stack now, if they're 6797 * not already saved. 6798 */ 6799 if (lr & R_V7M_EXCRET_DCRS_MASK && 6800 !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { 6801 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, 6802 ignore_stackfaults); 6803 } 6804 lr |= R_V7M_EXCRET_DCRS_MASK; 6805 } 6806 } 6807 6808 lr &= ~R_V7M_EXCRET_ES_MASK; 6809 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6810 lr |= R_V7M_EXCRET_ES_MASK; 6811 } 6812 lr &= ~R_V7M_EXCRET_SPSEL_MASK; 6813 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { 6814 lr |= R_V7M_EXCRET_SPSEL_MASK; 6815 } 6816 6817 /* Clear registers if necessary to prevent non-secure exception 6818 * code being able to see register values from secure code. 6819 * Where register values become architecturally UNKNOWN we leave 6820 * them with their previous values. 6821 */ 6822 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6823 if (!targets_secure) { 6824 /* Always clear the caller-saved registers (they have been 6825 * pushed to the stack earlier in v7m_push_stack()). 6826 * Clear callee-saved registers if the background code is 6827 * Secure (in which case these regs were saved in 6828 * v7m_push_callee_stack()). 6829 */ 6830 int i; 6831 6832 for (i = 0; i < 13; i++) { 6833 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ 6834 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { 6835 env->regs[i] = 0; 6836 } 6837 } 6838 /* Clear EAPSR */ 6839 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); 6840 } 6841 } 6842 } 6843 6844 if (push_failed && !ignore_stackfaults) { 6845 /* Derived exception on callee-saves register stacking: 6846 * we might now want to take a different exception which 6847 * targets a different security state, so try again from the top. 6848 */ 6849 v7m_exception_taken(cpu, lr, true, true); 6850 return; 6851 } 6852 6853 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { 6854 /* Vector load failed: derived exception */ 6855 v7m_exception_taken(cpu, lr, true, true); 6856 return; 6857 } 6858 6859 /* Now we've done everything that might cause a derived exception 6860 * we can go ahead and activate whichever exception we're going to 6861 * take (which might now be the derived exception). 6862 */ 6863 armv7m_nvic_acknowledge_irq(env->nvic); 6864 6865 /* Switch to target security state -- must do this before writing SPSEL */ 6866 switch_v7m_security_state(env, targets_secure); 6867 write_v7m_control_spsel(env, 0); 6868 arm_clear_exclusive(env); 6869 /* Clear IT bits */ 6870 env->condexec_bits = 0; 6871 env->regs[14] = lr; 6872 env->regs[15] = addr & 0xfffffffe; 6873 env->thumb = addr & 1; 6874 } 6875 6876 static bool v7m_push_stack(ARMCPU *cpu) 6877 { 6878 /* Do the "set up stack frame" part of exception entry, 6879 * similar to pseudocode PushStack(). 6880 * Return true if we generate a derived exception (and so 6881 * should ignore further stack faults trying to process 6882 * that derived exception.) 6883 */ 6884 bool stacked_ok; 6885 CPUARMState *env = &cpu->env; 6886 uint32_t xpsr = xpsr_read(env); 6887 uint32_t frameptr = env->regs[13]; 6888 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6889 6890 /* Align stack pointer if the guest wants that */ 6891 if ((frameptr & 4) && 6892 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { 6893 frameptr -= 4; 6894 xpsr |= XPSR_SPREALIGN; 6895 } 6896 6897 frameptr -= 0x20; 6898 6899 /* Write as much of the stack frame as we can. If we fail a stack 6900 * write this will result in a derived exception being pended 6901 * (which may be taken in preference to the one we started with 6902 * if it has higher priority). 6903 */ 6904 stacked_ok = 6905 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) && 6906 v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) && 6907 v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) && 6908 v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) && 6909 v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) && 6910 v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) && 6911 v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) && 6912 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false); 6913 6914 /* Update SP regardless of whether any of the stack accesses failed. 6915 * When we implement v8M stack limit checking then this attempt to 6916 * update SP might also fail and result in a derived exception. 6917 */ 6918 env->regs[13] = frameptr; 6919 6920 return !stacked_ok; 6921 } 6922 6923 static void do_v7m_exception_exit(ARMCPU *cpu) 6924 { 6925 CPUARMState *env = &cpu->env; 6926 CPUState *cs = CPU(cpu); 6927 uint32_t excret; 6928 uint32_t xpsr; 6929 bool ufault = false; 6930 bool sfault = false; 6931 bool return_to_sp_process; 6932 bool return_to_handler; 6933 bool rettobase = false; 6934 bool exc_secure = false; 6935 bool return_to_secure; 6936 6937 /* If we're not in Handler mode then jumps to magic exception-exit 6938 * addresses don't have magic behaviour. However for the v8M 6939 * security extensions the magic secure-function-return has to 6940 * work in thread mode too, so to avoid doing an extra check in 6941 * the generated code we allow exception-exit magic to also cause the 6942 * internal exception and bring us here in thread mode. Correct code 6943 * will never try to do this (the following insn fetch will always 6944 * fault) so we the overhead of having taken an unnecessary exception 6945 * doesn't matter. 6946 */ 6947 if (!arm_v7m_is_handler_mode(env)) { 6948 return; 6949 } 6950 6951 /* In the spec pseudocode ExceptionReturn() is called directly 6952 * from BXWritePC() and gets the full target PC value including 6953 * bit zero. In QEMU's implementation we treat it as a normal 6954 * jump-to-register (which is then caught later on), and so split 6955 * the target value up between env->regs[15] and env->thumb in 6956 * gen_bx(). Reconstitute it. 6957 */ 6958 excret = env->regs[15]; 6959 if (env->thumb) { 6960 excret |= 1; 6961 } 6962 6963 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 6964 " previous exception %d\n", 6965 excret, env->v7m.exception); 6966 6967 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) { 6968 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " 6969 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", 6970 excret); 6971 } 6972 6973 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6974 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before 6975 * we pick which FAULTMASK to clear. 6976 */ 6977 if (!env->v7m.secure && 6978 ((excret & R_V7M_EXCRET_ES_MASK) || 6979 !(excret & R_V7M_EXCRET_DCRS_MASK))) { 6980 sfault = 1; 6981 /* For all other purposes, treat ES as 0 (R_HXSR) */ 6982 excret &= ~R_V7M_EXCRET_ES_MASK; 6983 } 6984 } 6985 6986 if (env->v7m.exception != ARMV7M_EXCP_NMI) { 6987 /* Auto-clear FAULTMASK on return from other than NMI. 6988 * If the security extension is implemented then this only 6989 * happens if the raw execution priority is >= 0; the 6990 * value of the ES bit in the exception return value indicates 6991 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.) 6992 */ 6993 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6994 exc_secure = excret & R_V7M_EXCRET_ES_MASK; 6995 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) { 6996 env->v7m.faultmask[exc_secure] = 0; 6997 } 6998 } else { 6999 env->v7m.faultmask[M_REG_NS] = 0; 7000 } 7001 } 7002 7003 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, 7004 exc_secure)) { 7005 case -1: 7006 /* attempt to exit an exception that isn't active */ 7007 ufault = true; 7008 break; 7009 case 0: 7010 /* still an irq active now */ 7011 break; 7012 case 1: 7013 /* we returned to base exception level, no nesting. 7014 * (In the pseudocode this is written using "NestedActivation != 1" 7015 * where we have 'rettobase == false'.) 7016 */ 7017 rettobase = true; 7018 break; 7019 default: 7020 g_assert_not_reached(); 7021 } 7022 7023 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); 7024 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; 7025 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && 7026 (excret & R_V7M_EXCRET_S_MASK); 7027 7028 if (arm_feature(env, ARM_FEATURE_V8)) { 7029 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7030 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); 7031 * we choose to take the UsageFault. 7032 */ 7033 if ((excret & R_V7M_EXCRET_S_MASK) || 7034 (excret & R_V7M_EXCRET_ES_MASK) || 7035 !(excret & R_V7M_EXCRET_DCRS_MASK)) { 7036 ufault = true; 7037 } 7038 } 7039 if (excret & R_V7M_EXCRET_RES0_MASK) { 7040 ufault = true; 7041 } 7042 } else { 7043 /* For v7M we only recognize certain combinations of the low bits */ 7044 switch (excret & 0xf) { 7045 case 1: /* Return to Handler */ 7046 break; 7047 case 13: /* Return to Thread using Process stack */ 7048 case 9: /* Return to Thread using Main stack */ 7049 /* We only need to check NONBASETHRDENA for v7M, because in 7050 * v8M this bit does not exist (it is RES1). 7051 */ 7052 if (!rettobase && 7053 !(env->v7m.ccr[env->v7m.secure] & 7054 R_V7M_CCR_NONBASETHRDENA_MASK)) { 7055 ufault = true; 7056 } 7057 break; 7058 default: 7059 ufault = true; 7060 } 7061 } 7062 7063 if (sfault) { 7064 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; 7065 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7066 v7m_exception_taken(cpu, excret, true, false); 7067 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7068 "stackframe: failed EXC_RETURN.ES validity check\n"); 7069 return; 7070 } 7071 7072 if (ufault) { 7073 /* Bad exception return: instead of popping the exception 7074 * stack, directly take a usage fault on the current stack. 7075 */ 7076 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7077 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7078 v7m_exception_taken(cpu, excret, true, false); 7079 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7080 "stackframe: failed exception return integrity check\n"); 7081 return; 7082 } 7083 7084 /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in 7085 * Handler mode (and will be until we write the new XPSR.Interrupt 7086 * field) this does not switch around the current stack pointer. 7087 */ 7088 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); 7089 7090 switch_v7m_security_state(env, return_to_secure); 7091 7092 { 7093 /* The stack pointer we should be reading the exception frame from 7094 * depends on bits in the magic exception return type value (and 7095 * for v8M isn't necessarily the stack pointer we will eventually 7096 * end up resuming execution with). Get a pointer to the location 7097 * in the CPU state struct where the SP we need is currently being 7098 * stored; we will use and modify it in place. 7099 * We use this limited C variable scope so we don't accidentally 7100 * use 'frame_sp_p' after we do something that makes it invalid. 7101 */ 7102 uint32_t *frame_sp_p = get_v7m_sp_ptr(env, 7103 return_to_secure, 7104 !return_to_handler, 7105 return_to_sp_process); 7106 uint32_t frameptr = *frame_sp_p; 7107 bool pop_ok = true; 7108 ARMMMUIdx mmu_idx; 7109 7110 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, 7111 !return_to_handler); 7112 7113 if (!QEMU_IS_ALIGNED(frameptr, 8) && 7114 arm_feature(env, ARM_FEATURE_V8)) { 7115 qemu_log_mask(LOG_GUEST_ERROR, 7116 "M profile exception return with non-8-aligned SP " 7117 "for destination state is UNPREDICTABLE\n"); 7118 } 7119 7120 /* Do we need to pop callee-saved registers? */ 7121 if (return_to_secure && 7122 ((excret & R_V7M_EXCRET_ES_MASK) == 0 || 7123 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { 7124 uint32_t expected_sig = 0xfefa125b; 7125 uint32_t actual_sig = ldl_phys(cs->as, frameptr); 7126 7127 if (expected_sig != actual_sig) { 7128 /* Take a SecureFault on the current stack */ 7129 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; 7130 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7131 v7m_exception_taken(cpu, excret, true, false); 7132 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7133 "stackframe: failed exception return integrity " 7134 "signature check\n"); 7135 return; 7136 } 7137 7138 pop_ok = 7139 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7140 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7141 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && 7142 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && 7143 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && 7144 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && 7145 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && 7146 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && 7147 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); 7148 7149 frameptr += 0x28; 7150 } 7151 7152 /* Pop registers */ 7153 pop_ok = pop_ok && 7154 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && 7155 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && 7156 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && 7157 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && 7158 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && 7159 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && 7160 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && 7161 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); 7162 7163 if (!pop_ok) { 7164 /* v7m_stack_read() pended a fault, so take it (as a tail 7165 * chained exception on the same stack frame) 7166 */ 7167 v7m_exception_taken(cpu, excret, true, false); 7168 return; 7169 } 7170 7171 /* Returning from an exception with a PC with bit 0 set is defined 7172 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified 7173 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore 7174 * the lsbit, and there are several RTOSes out there which incorrectly 7175 * assume the r15 in the stack frame should be a Thumb-style "lsbit 7176 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but 7177 * complain about the badly behaved guest. 7178 */ 7179 if (env->regs[15] & 1) { 7180 env->regs[15] &= ~1U; 7181 if (!arm_feature(env, ARM_FEATURE_V8)) { 7182 qemu_log_mask(LOG_GUEST_ERROR, 7183 "M profile return from interrupt with misaligned " 7184 "PC is UNPREDICTABLE on v7M\n"); 7185 } 7186 } 7187 7188 if (arm_feature(env, ARM_FEATURE_V8)) { 7189 /* For v8M we have to check whether the xPSR exception field 7190 * matches the EXCRET value for return to handler/thread 7191 * before we commit to changing the SP and xPSR. 7192 */ 7193 bool will_be_handler = (xpsr & XPSR_EXCP) != 0; 7194 if (return_to_handler != will_be_handler) { 7195 /* Take an INVPC UsageFault on the current stack. 7196 * By this point we will have switched to the security state 7197 * for the background state, so this UsageFault will target 7198 * that state. 7199 */ 7200 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7201 env->v7m.secure); 7202 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7203 v7m_exception_taken(cpu, excret, true, false); 7204 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7205 "stackframe: failed exception return integrity " 7206 "check\n"); 7207 return; 7208 } 7209 } 7210 7211 /* Commit to consuming the stack frame */ 7212 frameptr += 0x20; 7213 /* Undo stack alignment (the SPREALIGN bit indicates that the original 7214 * pre-exception SP was not 8-aligned and we added a padding word to 7215 * align it, so we undo this by ORing in the bit that increases it 7216 * from the current 8-aligned value to the 8-unaligned value. (Adding 4 7217 * would work too but a logical OR is how the pseudocode specifies it.) 7218 */ 7219 if (xpsr & XPSR_SPREALIGN) { 7220 frameptr |= 4; 7221 } 7222 *frame_sp_p = frameptr; 7223 } 7224 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ 7225 xpsr_write(env, xpsr, ~XPSR_SPREALIGN); 7226 7227 /* The restored xPSR exception field will be zero if we're 7228 * resuming in Thread mode. If that doesn't match what the 7229 * exception return excret specified then this is a UsageFault. 7230 * v7M requires we make this check here; v8M did it earlier. 7231 */ 7232 if (return_to_handler != arm_v7m_is_handler_mode(env)) { 7233 /* Take an INVPC UsageFault by pushing the stack again; 7234 * we know we're v7M so this is never a Secure UsageFault. 7235 */ 7236 bool ignore_stackfaults; 7237 7238 assert(!arm_feature(env, ARM_FEATURE_V8)); 7239 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); 7240 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7241 ignore_stackfaults = v7m_push_stack(cpu); 7242 v7m_exception_taken(cpu, excret, false, ignore_stackfaults); 7243 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " 7244 "failed exception return integrity check\n"); 7245 return; 7246 } 7247 7248 /* Otherwise, we have a successful exception exit. */ 7249 arm_clear_exclusive(env); 7250 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); 7251 } 7252 7253 static bool do_v7m_function_return(ARMCPU *cpu) 7254 { 7255 /* v8M security extensions magic function return. 7256 * We may either: 7257 * (1) throw an exception (longjump) 7258 * (2) return true if we successfully handled the function return 7259 * (3) return false if we failed a consistency check and have 7260 * pended a UsageFault that needs to be taken now 7261 * 7262 * At this point the magic return value is split between env->regs[15] 7263 * and env->thumb. We don't bother to reconstitute it because we don't 7264 * need it (all values are handled the same way). 7265 */ 7266 CPUARMState *env = &cpu->env; 7267 uint32_t newpc, newpsr, newpsr_exc; 7268 7269 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); 7270 7271 { 7272 bool threadmode, spsel; 7273 TCGMemOpIdx oi; 7274 ARMMMUIdx mmu_idx; 7275 uint32_t *frame_sp_p; 7276 uint32_t frameptr; 7277 7278 /* Pull the return address and IPSR from the Secure stack */ 7279 threadmode = !arm_v7m_is_handler_mode(env); 7280 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; 7281 7282 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); 7283 frameptr = *frame_sp_p; 7284 7285 /* These loads may throw an exception (for MPU faults). We want to 7286 * do them as secure, so work out what MMU index that is. 7287 */ 7288 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7289 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); 7290 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); 7291 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); 7292 7293 /* Consistency checks on new IPSR */ 7294 newpsr_exc = newpsr & XPSR_EXCP; 7295 if (!((env->v7m.exception == 0 && newpsr_exc == 0) || 7296 (env->v7m.exception == 1 && newpsr_exc != 0))) { 7297 /* Pend the fault and tell our caller to take it */ 7298 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7299 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7300 env->v7m.secure); 7301 qemu_log_mask(CPU_LOG_INT, 7302 "...taking INVPC UsageFault: " 7303 "IPSR consistency check failed\n"); 7304 return false; 7305 } 7306 7307 *frame_sp_p = frameptr + 8; 7308 } 7309 7310 /* This invalidates frame_sp_p */ 7311 switch_v7m_security_state(env, true); 7312 env->v7m.exception = newpsr_exc; 7313 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; 7314 if (newpsr & XPSR_SFPA) { 7315 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; 7316 } 7317 xpsr_write(env, 0, XPSR_IT); 7318 env->thumb = newpc & 1; 7319 env->regs[15] = newpc & ~1; 7320 7321 qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); 7322 return true; 7323 } 7324 7325 static void arm_log_exception(int idx) 7326 { 7327 if (qemu_loglevel_mask(CPU_LOG_INT)) { 7328 const char *exc = NULL; 7329 static const char * const excnames[] = { 7330 [EXCP_UDEF] = "Undefined Instruction", 7331 [EXCP_SWI] = "SVC", 7332 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 7333 [EXCP_DATA_ABORT] = "Data Abort", 7334 [EXCP_IRQ] = "IRQ", 7335 [EXCP_FIQ] = "FIQ", 7336 [EXCP_BKPT] = "Breakpoint", 7337 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 7338 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 7339 [EXCP_HVC] = "Hypervisor Call", 7340 [EXCP_HYP_TRAP] = "Hypervisor Trap", 7341 [EXCP_SMC] = "Secure Monitor Call", 7342 [EXCP_VIRQ] = "Virtual IRQ", 7343 [EXCP_VFIQ] = "Virtual FIQ", 7344 [EXCP_SEMIHOST] = "Semihosting call", 7345 [EXCP_NOCP] = "v7M NOCP UsageFault", 7346 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 7347 }; 7348 7349 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 7350 exc = excnames[idx]; 7351 } 7352 if (!exc) { 7353 exc = "unknown"; 7354 } 7355 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 7356 } 7357 } 7358 7359 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, 7360 uint32_t addr, uint16_t *insn) 7361 { 7362 /* Load a 16-bit portion of a v7M instruction, returning true on success, 7363 * or false on failure (in which case we will have pended the appropriate 7364 * exception). 7365 * We need to do the instruction fetch's MPU and SAU checks 7366 * like this because there is no MMU index that would allow 7367 * doing the load with a single function call. Instead we must 7368 * first check that the security attributes permit the load 7369 * and that they don't mismatch on the two halves of the instruction, 7370 * and then we do the load as a secure load (ie using the security 7371 * attributes of the address, not the CPU, as architecturally required). 7372 */ 7373 CPUState *cs = CPU(cpu); 7374 CPUARMState *env = &cpu->env; 7375 V8M_SAttributes sattrs = {}; 7376 MemTxAttrs attrs = {}; 7377 ARMMMUFaultInfo fi = {}; 7378 MemTxResult txres; 7379 target_ulong page_size; 7380 hwaddr physaddr; 7381 int prot; 7382 7383 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); 7384 if (!sattrs.nsc || sattrs.ns) { 7385 /* This must be the second half of the insn, and it straddles a 7386 * region boundary with the second half not being S&NSC. 7387 */ 7388 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7389 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7390 qemu_log_mask(CPU_LOG_INT, 7391 "...really SecureFault with SFSR.INVEP\n"); 7392 return false; 7393 } 7394 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, 7395 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { 7396 /* the MPU lookup failed */ 7397 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7398 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); 7399 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); 7400 return false; 7401 } 7402 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, 7403 attrs, &txres); 7404 if (txres != MEMTX_OK) { 7405 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7406 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7407 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); 7408 return false; 7409 } 7410 return true; 7411 } 7412 7413 static bool v7m_handle_execute_nsc(ARMCPU *cpu) 7414 { 7415 /* Check whether this attempt to execute code in a Secure & NS-Callable 7416 * memory region is for an SG instruction; if so, then emulate the 7417 * effect of the SG instruction and return true. Otherwise pend 7418 * the correct kind of exception and return false. 7419 */ 7420 CPUARMState *env = &cpu->env; 7421 ARMMMUIdx mmu_idx; 7422 uint16_t insn; 7423 7424 /* We should never get here unless get_phys_addr_pmsav8() caused 7425 * an exception for NS executing in S&NSC memory. 7426 */ 7427 assert(!env->v7m.secure); 7428 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7429 7430 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ 7431 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7432 7433 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { 7434 return false; 7435 } 7436 7437 if (!env->thumb) { 7438 goto gen_invep; 7439 } 7440 7441 if (insn != 0xe97f) { 7442 /* Not an SG instruction first half (we choose the IMPDEF 7443 * early-SG-check option). 7444 */ 7445 goto gen_invep; 7446 } 7447 7448 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { 7449 return false; 7450 } 7451 7452 if (insn != 0xe97f) { 7453 /* Not an SG instruction second half (yes, both halves of the SG 7454 * insn have the same hex value) 7455 */ 7456 goto gen_invep; 7457 } 7458 7459 /* OK, we have confirmed that we really have an SG instruction. 7460 * We know we're NS in S memory so don't need to repeat those checks. 7461 */ 7462 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 7463 ", executing it\n", env->regs[15]); 7464 env->regs[14] &= ~1; 7465 switch_v7m_security_state(env, true); 7466 xpsr_write(env, 0, XPSR_IT); 7467 env->regs[15] += 4; 7468 return true; 7469 7470 gen_invep: 7471 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7472 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7473 qemu_log_mask(CPU_LOG_INT, 7474 "...really SecureFault with SFSR.INVEP\n"); 7475 return false; 7476 } 7477 7478 void arm_v7m_cpu_do_interrupt(CPUState *cs) 7479 { 7480 ARMCPU *cpu = ARM_CPU(cs); 7481 CPUARMState *env = &cpu->env; 7482 uint32_t lr; 7483 bool ignore_stackfaults; 7484 7485 arm_log_exception(cs->exception_index); 7486 7487 /* For exceptions we just mark as pending on the NVIC, and let that 7488 handle it. */ 7489 switch (cs->exception_index) { 7490 case EXCP_UDEF: 7491 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7492 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK; 7493 break; 7494 case EXCP_NOCP: 7495 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7496 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK; 7497 break; 7498 case EXCP_INVSTATE: 7499 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7500 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; 7501 break; 7502 case EXCP_SWI: 7503 /* The PC already points to the next instruction. */ 7504 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); 7505 break; 7506 case EXCP_PREFETCH_ABORT: 7507 case EXCP_DATA_ABORT: 7508 /* Note that for M profile we don't have a guest facing FSR, but 7509 * the env->exception.fsr will be populated by the code that 7510 * raises the fault, in the A profile short-descriptor format. 7511 */ 7512 switch (env->exception.fsr & 0xf) { 7513 case M_FAKE_FSR_NSC_EXEC: 7514 /* Exception generated when we try to execute code at an address 7515 * which is marked as Secure & Non-Secure Callable and the CPU 7516 * is in the Non-Secure state. The only instruction which can 7517 * be executed like this is SG (and that only if both halves of 7518 * the SG instruction have the same security attributes.) 7519 * Everything else must generate an INVEP SecureFault, so we 7520 * emulate the SG instruction here. 7521 */ 7522 if (v7m_handle_execute_nsc(cpu)) { 7523 return; 7524 } 7525 break; 7526 case M_FAKE_FSR_SFAULT: 7527 /* Various flavours of SecureFault for attempts to execute or 7528 * access data in the wrong security state. 7529 */ 7530 switch (cs->exception_index) { 7531 case EXCP_PREFETCH_ABORT: 7532 if (env->v7m.secure) { 7533 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; 7534 qemu_log_mask(CPU_LOG_INT, 7535 "...really SecureFault with SFSR.INVTRAN\n"); 7536 } else { 7537 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7538 qemu_log_mask(CPU_LOG_INT, 7539 "...really SecureFault with SFSR.INVEP\n"); 7540 } 7541 break; 7542 case EXCP_DATA_ABORT: 7543 /* This must be an NS access to S memory */ 7544 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; 7545 qemu_log_mask(CPU_LOG_INT, 7546 "...really SecureFault with SFSR.AUVIOL\n"); 7547 break; 7548 } 7549 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7550 break; 7551 case 0x8: /* External Abort */ 7552 switch (cs->exception_index) { 7553 case EXCP_PREFETCH_ABORT: 7554 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7555 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n"); 7556 break; 7557 case EXCP_DATA_ABORT: 7558 env->v7m.cfsr[M_REG_NS] |= 7559 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK); 7560 env->v7m.bfar = env->exception.vaddress; 7561 qemu_log_mask(CPU_LOG_INT, 7562 "...with CFSR.PRECISERR and BFAR 0x%x\n", 7563 env->v7m.bfar); 7564 break; 7565 } 7566 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7567 break; 7568 default: 7569 /* All other FSR values are either MPU faults or "can't happen 7570 * for M profile" cases. 7571 */ 7572 switch (cs->exception_index) { 7573 case EXCP_PREFETCH_ABORT: 7574 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7575 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n"); 7576 break; 7577 case EXCP_DATA_ABORT: 7578 env->v7m.cfsr[env->v7m.secure] |= 7579 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK); 7580 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress; 7581 qemu_log_mask(CPU_LOG_INT, 7582 "...with CFSR.DACCVIOL and MMFAR 0x%x\n", 7583 env->v7m.mmfar[env->v7m.secure]); 7584 break; 7585 } 7586 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, 7587 env->v7m.secure); 7588 break; 7589 } 7590 break; 7591 case EXCP_BKPT: 7592 if (semihosting_enabled()) { 7593 int nr; 7594 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff; 7595 if (nr == 0xab) { 7596 env->regs[15] += 2; 7597 qemu_log_mask(CPU_LOG_INT, 7598 "...handling as semihosting call 0x%x\n", 7599 env->regs[0]); 7600 env->regs[0] = do_arm_semihosting(env); 7601 return; 7602 } 7603 } 7604 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); 7605 break; 7606 case EXCP_IRQ: 7607 break; 7608 case EXCP_EXCEPTION_EXIT: 7609 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { 7610 /* Must be v8M security extension function return */ 7611 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); 7612 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7613 if (do_v7m_function_return(cpu)) { 7614 return; 7615 } 7616 } else { 7617 do_v7m_exception_exit(cpu); 7618 return; 7619 } 7620 break; 7621 default: 7622 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 7623 return; /* Never happens. Keep compiler happy. */ 7624 } 7625 7626 if (arm_feature(env, ARM_FEATURE_V8)) { 7627 lr = R_V7M_EXCRET_RES1_MASK | 7628 R_V7M_EXCRET_DCRS_MASK | 7629 R_V7M_EXCRET_FTYPE_MASK; 7630 /* The S bit indicates whether we should return to Secure 7631 * or NonSecure (ie our current state). 7632 * The ES bit indicates whether we're taking this exception 7633 * to Secure or NonSecure (ie our target state). We set it 7634 * later, in v7m_exception_taken(). 7635 * The SPSEL bit is also set in v7m_exception_taken() for v8M. 7636 * This corresponds to the ARM ARM pseudocode for v8M setting 7637 * some LR bits in PushStack() and some in ExceptionTaken(); 7638 * the distinction matters for the tailchain cases where we 7639 * can take an exception without pushing the stack. 7640 */ 7641 if (env->v7m.secure) { 7642 lr |= R_V7M_EXCRET_S_MASK; 7643 } 7644 } else { 7645 lr = R_V7M_EXCRET_RES1_MASK | 7646 R_V7M_EXCRET_S_MASK | 7647 R_V7M_EXCRET_DCRS_MASK | 7648 R_V7M_EXCRET_FTYPE_MASK | 7649 R_V7M_EXCRET_ES_MASK; 7650 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { 7651 lr |= R_V7M_EXCRET_SPSEL_MASK; 7652 } 7653 } 7654 if (!arm_v7m_is_handler_mode(env)) { 7655 lr |= R_V7M_EXCRET_MODE_MASK; 7656 } 7657 7658 ignore_stackfaults = v7m_push_stack(cpu); 7659 v7m_exception_taken(cpu, lr, false, ignore_stackfaults); 7660 qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception); 7661 } 7662 7663 /* Function used to synchronize QEMU's AArch64 register set with AArch32 7664 * register set. This is necessary when switching between AArch32 and AArch64 7665 * execution state. 7666 */ 7667 void aarch64_sync_32_to_64(CPUARMState *env) 7668 { 7669 int i; 7670 uint32_t mode = env->uncached_cpsr & CPSR_M; 7671 7672 /* We can blanket copy R[0:7] to X[0:7] */ 7673 for (i = 0; i < 8; i++) { 7674 env->xregs[i] = env->regs[i]; 7675 } 7676 7677 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 7678 * Otherwise, they come from the banked user regs. 7679 */ 7680 if (mode == ARM_CPU_MODE_FIQ) { 7681 for (i = 8; i < 13; i++) { 7682 env->xregs[i] = env->usr_regs[i - 8]; 7683 } 7684 } else { 7685 for (i = 8; i < 13; i++) { 7686 env->xregs[i] = env->regs[i]; 7687 } 7688 } 7689 7690 /* Registers x13-x23 are the various mode SP and FP registers. Registers 7691 * r13 and r14 are only copied if we are in that mode, otherwise we copy 7692 * from the mode banked register. 7693 */ 7694 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7695 env->xregs[13] = env->regs[13]; 7696 env->xregs[14] = env->regs[14]; 7697 } else { 7698 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 7699 /* HYP is an exception in that it is copied from r14 */ 7700 if (mode == ARM_CPU_MODE_HYP) { 7701 env->xregs[14] = env->regs[14]; 7702 } else { 7703 env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)]; 7704 } 7705 } 7706 7707 if (mode == ARM_CPU_MODE_HYP) { 7708 env->xregs[15] = env->regs[13]; 7709 } else { 7710 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 7711 } 7712 7713 if (mode == ARM_CPU_MODE_IRQ) { 7714 env->xregs[16] = env->regs[14]; 7715 env->xregs[17] = env->regs[13]; 7716 } else { 7717 env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)]; 7718 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 7719 } 7720 7721 if (mode == ARM_CPU_MODE_SVC) { 7722 env->xregs[18] = env->regs[14]; 7723 env->xregs[19] = env->regs[13]; 7724 } else { 7725 env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)]; 7726 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 7727 } 7728 7729 if (mode == ARM_CPU_MODE_ABT) { 7730 env->xregs[20] = env->regs[14]; 7731 env->xregs[21] = env->regs[13]; 7732 } else { 7733 env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)]; 7734 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 7735 } 7736 7737 if (mode == ARM_CPU_MODE_UND) { 7738 env->xregs[22] = env->regs[14]; 7739 env->xregs[23] = env->regs[13]; 7740 } else { 7741 env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)]; 7742 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 7743 } 7744 7745 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7746 * mode, then we can copy from r8-r14. Otherwise, we copy from the 7747 * FIQ bank for r8-r14. 7748 */ 7749 if (mode == ARM_CPU_MODE_FIQ) { 7750 for (i = 24; i < 31; i++) { 7751 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 7752 } 7753 } else { 7754 for (i = 24; i < 29; i++) { 7755 env->xregs[i] = env->fiq_regs[i - 24]; 7756 } 7757 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 7758 env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)]; 7759 } 7760 7761 env->pc = env->regs[15]; 7762 } 7763 7764 /* Function used to synchronize QEMU's AArch32 register set with AArch64 7765 * register set. This is necessary when switching between AArch32 and AArch64 7766 * execution state. 7767 */ 7768 void aarch64_sync_64_to_32(CPUARMState *env) 7769 { 7770 int i; 7771 uint32_t mode = env->uncached_cpsr & CPSR_M; 7772 7773 /* We can blanket copy X[0:7] to R[0:7] */ 7774 for (i = 0; i < 8; i++) { 7775 env->regs[i] = env->xregs[i]; 7776 } 7777 7778 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 7779 * Otherwise, we copy x8-x12 into the banked user regs. 7780 */ 7781 if (mode == ARM_CPU_MODE_FIQ) { 7782 for (i = 8; i < 13; i++) { 7783 env->usr_regs[i - 8] = env->xregs[i]; 7784 } 7785 } else { 7786 for (i = 8; i < 13; i++) { 7787 env->regs[i] = env->xregs[i]; 7788 } 7789 } 7790 7791 /* Registers r13 & r14 depend on the current mode. 7792 * If we are in a given mode, we copy the corresponding x registers to r13 7793 * and r14. Otherwise, we copy the x register to the banked r13 and r14 7794 * for the mode. 7795 */ 7796 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7797 env->regs[13] = env->xregs[13]; 7798 env->regs[14] = env->xregs[14]; 7799 } else { 7800 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 7801 7802 /* HYP is an exception in that it does not have its own banked r14 but 7803 * shares the USR r14 7804 */ 7805 if (mode == ARM_CPU_MODE_HYP) { 7806 env->regs[14] = env->xregs[14]; 7807 } else { 7808 env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 7809 } 7810 } 7811 7812 if (mode == ARM_CPU_MODE_HYP) { 7813 env->regs[13] = env->xregs[15]; 7814 } else { 7815 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 7816 } 7817 7818 if (mode == ARM_CPU_MODE_IRQ) { 7819 env->regs[14] = env->xregs[16]; 7820 env->regs[13] = env->xregs[17]; 7821 } else { 7822 env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 7823 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 7824 } 7825 7826 if (mode == ARM_CPU_MODE_SVC) { 7827 env->regs[14] = env->xregs[18]; 7828 env->regs[13] = env->xregs[19]; 7829 } else { 7830 env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 7831 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 7832 } 7833 7834 if (mode == ARM_CPU_MODE_ABT) { 7835 env->regs[14] = env->xregs[20]; 7836 env->regs[13] = env->xregs[21]; 7837 } else { 7838 env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 7839 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 7840 } 7841 7842 if (mode == ARM_CPU_MODE_UND) { 7843 env->regs[14] = env->xregs[22]; 7844 env->regs[13] = env->xregs[23]; 7845 } else { 7846 env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 7847 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 7848 } 7849 7850 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7851 * mode, then we can copy to r8-r14. Otherwise, we copy to the 7852 * FIQ bank for r8-r14. 7853 */ 7854 if (mode == ARM_CPU_MODE_FIQ) { 7855 for (i = 24; i < 31; i++) { 7856 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 7857 } 7858 } else { 7859 for (i = 24; i < 29; i++) { 7860 env->fiq_regs[i - 24] = env->xregs[i]; 7861 } 7862 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 7863 env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 7864 } 7865 7866 env->regs[15] = env->pc; 7867 } 7868 7869 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 7870 { 7871 ARMCPU *cpu = ARM_CPU(cs); 7872 CPUARMState *env = &cpu->env; 7873 uint32_t addr; 7874 uint32_t mask; 7875 int new_mode; 7876 uint32_t offset; 7877 uint32_t moe; 7878 7879 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 7880 switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { 7881 case EC_BREAKPOINT: 7882 case EC_BREAKPOINT_SAME_EL: 7883 moe = 1; 7884 break; 7885 case EC_WATCHPOINT: 7886 case EC_WATCHPOINT_SAME_EL: 7887 moe = 10; 7888 break; 7889 case EC_AA32_BKPT: 7890 moe = 3; 7891 break; 7892 case EC_VECTORCATCH: 7893 moe = 5; 7894 break; 7895 default: 7896 moe = 0; 7897 break; 7898 } 7899 7900 if (moe) { 7901 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 7902 } 7903 7904 /* TODO: Vectored interrupt controller. */ 7905 switch (cs->exception_index) { 7906 case EXCP_UDEF: 7907 new_mode = ARM_CPU_MODE_UND; 7908 addr = 0x04; 7909 mask = CPSR_I; 7910 if (env->thumb) 7911 offset = 2; 7912 else 7913 offset = 4; 7914 break; 7915 case EXCP_SWI: 7916 new_mode = ARM_CPU_MODE_SVC; 7917 addr = 0x08; 7918 mask = CPSR_I; 7919 /* The PC already points to the next instruction. */ 7920 offset = 0; 7921 break; 7922 case EXCP_BKPT: 7923 env->exception.fsr = 2; 7924 /* Fall through to prefetch abort. */ 7925 case EXCP_PREFETCH_ABORT: 7926 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 7927 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 7928 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 7929 env->exception.fsr, (uint32_t)env->exception.vaddress); 7930 new_mode = ARM_CPU_MODE_ABT; 7931 addr = 0x0c; 7932 mask = CPSR_A | CPSR_I; 7933 offset = 4; 7934 break; 7935 case EXCP_DATA_ABORT: 7936 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 7937 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 7938 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 7939 env->exception.fsr, 7940 (uint32_t)env->exception.vaddress); 7941 new_mode = ARM_CPU_MODE_ABT; 7942 addr = 0x10; 7943 mask = CPSR_A | CPSR_I; 7944 offset = 8; 7945 break; 7946 case EXCP_IRQ: 7947 new_mode = ARM_CPU_MODE_IRQ; 7948 addr = 0x18; 7949 /* Disable IRQ and imprecise data aborts. */ 7950 mask = CPSR_A | CPSR_I; 7951 offset = 4; 7952 if (env->cp15.scr_el3 & SCR_IRQ) { 7953 /* IRQ routed to monitor mode */ 7954 new_mode = ARM_CPU_MODE_MON; 7955 mask |= CPSR_F; 7956 } 7957 break; 7958 case EXCP_FIQ: 7959 new_mode = ARM_CPU_MODE_FIQ; 7960 addr = 0x1c; 7961 /* Disable FIQ, IRQ and imprecise data aborts. */ 7962 mask = CPSR_A | CPSR_I | CPSR_F; 7963 if (env->cp15.scr_el3 & SCR_FIQ) { 7964 /* FIQ routed to monitor mode */ 7965 new_mode = ARM_CPU_MODE_MON; 7966 } 7967 offset = 4; 7968 break; 7969 case EXCP_VIRQ: 7970 new_mode = ARM_CPU_MODE_IRQ; 7971 addr = 0x18; 7972 /* Disable IRQ and imprecise data aborts. */ 7973 mask = CPSR_A | CPSR_I; 7974 offset = 4; 7975 break; 7976 case EXCP_VFIQ: 7977 new_mode = ARM_CPU_MODE_FIQ; 7978 addr = 0x1c; 7979 /* Disable FIQ, IRQ and imprecise data aborts. */ 7980 mask = CPSR_A | CPSR_I | CPSR_F; 7981 offset = 4; 7982 break; 7983 case EXCP_SMC: 7984 new_mode = ARM_CPU_MODE_MON; 7985 addr = 0x08; 7986 mask = CPSR_A | CPSR_I | CPSR_F; 7987 offset = 0; 7988 break; 7989 default: 7990 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 7991 return; /* Never happens. Keep compiler happy. */ 7992 } 7993 7994 if (new_mode == ARM_CPU_MODE_MON) { 7995 addr += env->cp15.mvbar; 7996 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 7997 /* High vectors. When enabled, base address cannot be remapped. */ 7998 addr += 0xffff0000; 7999 } else { 8000 /* ARM v7 architectures provide a vector base address register to remap 8001 * the interrupt vector table. 8002 * This register is only followed in non-monitor mode, and is banked. 8003 * Note: only bits 31:5 are valid. 8004 */ 8005 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 8006 } 8007 8008 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 8009 env->cp15.scr_el3 &= ~SCR_NS; 8010 } 8011 8012 switch_mode (env, new_mode); 8013 /* For exceptions taken to AArch32 we must clear the SS bit in both 8014 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 8015 */ 8016 env->uncached_cpsr &= ~PSTATE_SS; 8017 env->spsr = cpsr_read(env); 8018 /* Clear IT bits. */ 8019 env->condexec_bits = 0; 8020 /* Switch to the new mode, and to the correct instruction set. */ 8021 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 8022 /* Set new mode endianness */ 8023 env->uncached_cpsr &= ~CPSR_E; 8024 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { 8025 env->uncached_cpsr |= CPSR_E; 8026 } 8027 env->daif |= mask; 8028 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares 8029 * and we should just guard the thumb mode on V4 */ 8030 if (arm_feature(env, ARM_FEATURE_V4T)) { 8031 env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 8032 } 8033 env->regs[14] = env->regs[15] + offset; 8034 env->regs[15] = addr; 8035 } 8036 8037 /* Handle exception entry to a target EL which is using AArch64 */ 8038 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 8039 { 8040 ARMCPU *cpu = ARM_CPU(cs); 8041 CPUARMState *env = &cpu->env; 8042 unsigned int new_el = env->exception.target_el; 8043 target_ulong addr = env->cp15.vbar_el[new_el]; 8044 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 8045 8046 if (arm_current_el(env) < new_el) { 8047 /* Entry vector offset depends on whether the implemented EL 8048 * immediately lower than the target level is using AArch32 or AArch64 8049 */ 8050 bool is_aa64; 8051 8052 switch (new_el) { 8053 case 3: 8054 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 8055 break; 8056 case 2: 8057 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0; 8058 break; 8059 case 1: 8060 is_aa64 = is_a64(env); 8061 break; 8062 default: 8063 g_assert_not_reached(); 8064 } 8065 8066 if (is_aa64) { 8067 addr += 0x400; 8068 } else { 8069 addr += 0x600; 8070 } 8071 } else if (pstate_read(env) & PSTATE_SP) { 8072 addr += 0x200; 8073 } 8074 8075 switch (cs->exception_index) { 8076 case EXCP_PREFETCH_ABORT: 8077 case EXCP_DATA_ABORT: 8078 env->cp15.far_el[new_el] = env->exception.vaddress; 8079 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 8080 env->cp15.far_el[new_el]); 8081 /* fall through */ 8082 case EXCP_BKPT: 8083 case EXCP_UDEF: 8084 case EXCP_SWI: 8085 case EXCP_HVC: 8086 case EXCP_HYP_TRAP: 8087 case EXCP_SMC: 8088 env->cp15.esr_el[new_el] = env->exception.syndrome; 8089 break; 8090 case EXCP_IRQ: 8091 case EXCP_VIRQ: 8092 addr += 0x80; 8093 break; 8094 case EXCP_FIQ: 8095 case EXCP_VFIQ: 8096 addr += 0x100; 8097 break; 8098 case EXCP_SEMIHOST: 8099 qemu_log_mask(CPU_LOG_INT, 8100 "...handling as semihosting call 0x%" PRIx64 "\n", 8101 env->xregs[0]); 8102 env->xregs[0] = do_arm_semihosting(env); 8103 return; 8104 default: 8105 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8106 } 8107 8108 if (is_a64(env)) { 8109 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); 8110 aarch64_save_sp(env, arm_current_el(env)); 8111 env->elr_el[new_el] = env->pc; 8112 } else { 8113 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); 8114 env->elr_el[new_el] = env->regs[15]; 8115 8116 aarch64_sync_32_to_64(env); 8117 8118 env->condexec_bits = 0; 8119 } 8120 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 8121 env->elr_el[new_el]); 8122 8123 pstate_write(env, PSTATE_DAIF | new_mode); 8124 env->aarch64 = 1; 8125 aarch64_restore_sp(env, new_el); 8126 8127 env->pc = addr; 8128 8129 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 8130 new_el, env->pc, pstate_read(env)); 8131 } 8132 8133 static inline bool check_for_semihosting(CPUState *cs) 8134 { 8135 /* Check whether this exception is a semihosting call; if so 8136 * then handle it and return true; otherwise return false. 8137 */ 8138 ARMCPU *cpu = ARM_CPU(cs); 8139 CPUARMState *env = &cpu->env; 8140 8141 if (is_a64(env)) { 8142 if (cs->exception_index == EXCP_SEMIHOST) { 8143 /* This is always the 64-bit semihosting exception. 8144 * The "is this usermode" and "is semihosting enabled" 8145 * checks have been done at translate time. 8146 */ 8147 qemu_log_mask(CPU_LOG_INT, 8148 "...handling as semihosting call 0x%" PRIx64 "\n", 8149 env->xregs[0]); 8150 env->xregs[0] = do_arm_semihosting(env); 8151 return true; 8152 } 8153 return false; 8154 } else { 8155 uint32_t imm; 8156 8157 /* Only intercept calls from privileged modes, to provide some 8158 * semblance of security. 8159 */ 8160 if (cs->exception_index != EXCP_SEMIHOST && 8161 (!semihosting_enabled() || 8162 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) { 8163 return false; 8164 } 8165 8166 switch (cs->exception_index) { 8167 case EXCP_SEMIHOST: 8168 /* This is always a semihosting call; the "is this usermode" 8169 * and "is semihosting enabled" checks have been done at 8170 * translate time. 8171 */ 8172 break; 8173 case EXCP_SWI: 8174 /* Check for semihosting interrupt. */ 8175 if (env->thumb) { 8176 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env)) 8177 & 0xff; 8178 if (imm == 0xab) { 8179 break; 8180 } 8181 } else { 8182 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env)) 8183 & 0xffffff; 8184 if (imm == 0x123456) { 8185 break; 8186 } 8187 } 8188 return false; 8189 case EXCP_BKPT: 8190 /* See if this is a semihosting syscall. */ 8191 if (env->thumb) { 8192 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) 8193 & 0xff; 8194 if (imm == 0xab) { 8195 env->regs[15] += 2; 8196 break; 8197 } 8198 } 8199 return false; 8200 default: 8201 return false; 8202 } 8203 8204 qemu_log_mask(CPU_LOG_INT, 8205 "...handling as semihosting call 0x%x\n", 8206 env->regs[0]); 8207 env->regs[0] = do_arm_semihosting(env); 8208 return true; 8209 } 8210 } 8211 8212 /* Handle a CPU exception for A and R profile CPUs. 8213 * Do any appropriate logging, handle PSCI calls, and then hand off 8214 * to the AArch64-entry or AArch32-entry function depending on the 8215 * target exception level's register width. 8216 */ 8217 void arm_cpu_do_interrupt(CPUState *cs) 8218 { 8219 ARMCPU *cpu = ARM_CPU(cs); 8220 CPUARMState *env = &cpu->env; 8221 unsigned int new_el = env->exception.target_el; 8222 8223 assert(!arm_feature(env, ARM_FEATURE_M)); 8224 8225 arm_log_exception(cs->exception_index); 8226 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 8227 new_el); 8228 if (qemu_loglevel_mask(CPU_LOG_INT) 8229 && !excp_is_internal(cs->exception_index)) { 8230 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 8231 env->exception.syndrome >> ARM_EL_EC_SHIFT, 8232 env->exception.syndrome); 8233 } 8234 8235 if (arm_is_psci_call(cpu, cs->exception_index)) { 8236 arm_handle_psci_call(cpu); 8237 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 8238 return; 8239 } 8240 8241 /* Semihosting semantics depend on the register width of the 8242 * code that caused the exception, not the target exception level, 8243 * so must be handled here. 8244 */ 8245 if (check_for_semihosting(cs)) { 8246 return; 8247 } 8248 8249 assert(!excp_is_internal(cs->exception_index)); 8250 if (arm_el_is_aa64(env, new_el)) { 8251 arm_cpu_do_interrupt_aarch64(cs); 8252 } else { 8253 arm_cpu_do_interrupt_aarch32(cs); 8254 } 8255 8256 /* Hooks may change global state so BQL should be held, also the 8257 * BQL needs to be held for any modification of 8258 * cs->interrupt_request. 8259 */ 8260 g_assert(qemu_mutex_iothread_locked()); 8261 8262 arm_call_el_change_hook(cpu); 8263 8264 if (!kvm_enabled()) { 8265 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 8266 } 8267 } 8268 8269 /* Return the exception level which controls this address translation regime */ 8270 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 8271 { 8272 switch (mmu_idx) { 8273 case ARMMMUIdx_S2NS: 8274 case ARMMMUIdx_S1E2: 8275 return 2; 8276 case ARMMMUIdx_S1E3: 8277 return 3; 8278 case ARMMMUIdx_S1SE0: 8279 return arm_el_is_aa64(env, 3) ? 1 : 3; 8280 case ARMMMUIdx_S1SE1: 8281 case ARMMMUIdx_S1NSE0: 8282 case ARMMMUIdx_S1NSE1: 8283 case ARMMMUIdx_MPrivNegPri: 8284 case ARMMMUIdx_MUserNegPri: 8285 case ARMMMUIdx_MPriv: 8286 case ARMMMUIdx_MUser: 8287 case ARMMMUIdx_MSPrivNegPri: 8288 case ARMMMUIdx_MSUserNegPri: 8289 case ARMMMUIdx_MSPriv: 8290 case ARMMMUIdx_MSUser: 8291 return 1; 8292 default: 8293 g_assert_not_reached(); 8294 } 8295 } 8296 8297 /* Return the SCTLR value which controls this address translation regime */ 8298 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 8299 { 8300 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 8301 } 8302 8303 /* Return true if the specified stage of address translation is disabled */ 8304 static inline bool regime_translation_disabled(CPUARMState *env, 8305 ARMMMUIdx mmu_idx) 8306 { 8307 if (arm_feature(env, ARM_FEATURE_M)) { 8308 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 8309 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 8310 case R_V7M_MPU_CTRL_ENABLE_MASK: 8311 /* Enabled, but not for HardFault and NMI */ 8312 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 8313 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 8314 /* Enabled for all cases */ 8315 return false; 8316 case 0: 8317 default: 8318 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 8319 * we warned about that in armv7m_nvic.c when the guest set it. 8320 */ 8321 return true; 8322 } 8323 } 8324 8325 if (mmu_idx == ARMMMUIdx_S2NS) { 8326 return (env->cp15.hcr_el2 & HCR_VM) == 0; 8327 } 8328 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 8329 } 8330 8331 static inline bool regime_translation_big_endian(CPUARMState *env, 8332 ARMMMUIdx mmu_idx) 8333 { 8334 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 8335 } 8336 8337 /* Return the TCR controlling this translation regime */ 8338 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 8339 { 8340 if (mmu_idx == ARMMMUIdx_S2NS) { 8341 return &env->cp15.vtcr_el2; 8342 } 8343 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 8344 } 8345 8346 /* Convert a possible stage1+2 MMU index into the appropriate 8347 * stage 1 MMU index 8348 */ 8349 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 8350 { 8351 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 8352 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0); 8353 } 8354 return mmu_idx; 8355 } 8356 8357 /* Returns TBI0 value for current regime el */ 8358 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx) 8359 { 8360 TCR *tcr; 8361 uint32_t el; 8362 8363 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8364 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8365 */ 8366 mmu_idx = stage_1_mmu_idx(mmu_idx); 8367 8368 tcr = regime_tcr(env, mmu_idx); 8369 el = regime_el(env, mmu_idx); 8370 8371 if (el > 1) { 8372 return extract64(tcr->raw_tcr, 20, 1); 8373 } else { 8374 return extract64(tcr->raw_tcr, 37, 1); 8375 } 8376 } 8377 8378 /* Returns TBI1 value for current regime el */ 8379 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx) 8380 { 8381 TCR *tcr; 8382 uint32_t el; 8383 8384 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8385 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8386 */ 8387 mmu_idx = stage_1_mmu_idx(mmu_idx); 8388 8389 tcr = regime_tcr(env, mmu_idx); 8390 el = regime_el(env, mmu_idx); 8391 8392 if (el > 1) { 8393 return 0; 8394 } else { 8395 return extract64(tcr->raw_tcr, 38, 1); 8396 } 8397 } 8398 8399 /* Return the TTBR associated with this translation regime */ 8400 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 8401 int ttbrn) 8402 { 8403 if (mmu_idx == ARMMMUIdx_S2NS) { 8404 return env->cp15.vttbr_el2; 8405 } 8406 if (ttbrn == 0) { 8407 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 8408 } else { 8409 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 8410 } 8411 } 8412 8413 /* Return true if the translation regime is using LPAE format page tables */ 8414 static inline bool regime_using_lpae_format(CPUARMState *env, 8415 ARMMMUIdx mmu_idx) 8416 { 8417 int el = regime_el(env, mmu_idx); 8418 if (el == 2 || arm_el_is_aa64(env, el)) { 8419 return true; 8420 } 8421 if (arm_feature(env, ARM_FEATURE_LPAE) 8422 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 8423 return true; 8424 } 8425 return false; 8426 } 8427 8428 /* Returns true if the stage 1 translation regime is using LPAE format page 8429 * tables. Used when raising alignment exceptions, whose FSR changes depending 8430 * on whether the long or short descriptor format is in use. */ 8431 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 8432 { 8433 mmu_idx = stage_1_mmu_idx(mmu_idx); 8434 8435 return regime_using_lpae_format(env, mmu_idx); 8436 } 8437 8438 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 8439 { 8440 switch (mmu_idx) { 8441 case ARMMMUIdx_S1SE0: 8442 case ARMMMUIdx_S1NSE0: 8443 case ARMMMUIdx_MUser: 8444 case ARMMMUIdx_MSUser: 8445 case ARMMMUIdx_MUserNegPri: 8446 case ARMMMUIdx_MSUserNegPri: 8447 return true; 8448 default: 8449 return false; 8450 case ARMMMUIdx_S12NSE0: 8451 case ARMMMUIdx_S12NSE1: 8452 g_assert_not_reached(); 8453 } 8454 } 8455 8456 /* Translate section/page access permissions to page 8457 * R/W protection flags 8458 * 8459 * @env: CPUARMState 8460 * @mmu_idx: MMU index indicating required translation regime 8461 * @ap: The 3-bit access permissions (AP[2:0]) 8462 * @domain_prot: The 2-bit domain access permissions 8463 */ 8464 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 8465 int ap, int domain_prot) 8466 { 8467 bool is_user = regime_is_user(env, mmu_idx); 8468 8469 if (domain_prot == 3) { 8470 return PAGE_READ | PAGE_WRITE; 8471 } 8472 8473 switch (ap) { 8474 case 0: 8475 if (arm_feature(env, ARM_FEATURE_V7)) { 8476 return 0; 8477 } 8478 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 8479 case SCTLR_S: 8480 return is_user ? 0 : PAGE_READ; 8481 case SCTLR_R: 8482 return PAGE_READ; 8483 default: 8484 return 0; 8485 } 8486 case 1: 8487 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8488 case 2: 8489 if (is_user) { 8490 return PAGE_READ; 8491 } else { 8492 return PAGE_READ | PAGE_WRITE; 8493 } 8494 case 3: 8495 return PAGE_READ | PAGE_WRITE; 8496 case 4: /* Reserved. */ 8497 return 0; 8498 case 5: 8499 return is_user ? 0 : PAGE_READ; 8500 case 6: 8501 return PAGE_READ; 8502 case 7: 8503 if (!arm_feature(env, ARM_FEATURE_V6K)) { 8504 return 0; 8505 } 8506 return PAGE_READ; 8507 default: 8508 g_assert_not_reached(); 8509 } 8510 } 8511 8512 /* Translate section/page access permissions to page 8513 * R/W protection flags. 8514 * 8515 * @ap: The 2-bit simple AP (AP[2:1]) 8516 * @is_user: TRUE if accessing from PL0 8517 */ 8518 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 8519 { 8520 switch (ap) { 8521 case 0: 8522 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8523 case 1: 8524 return PAGE_READ | PAGE_WRITE; 8525 case 2: 8526 return is_user ? 0 : PAGE_READ; 8527 case 3: 8528 return PAGE_READ; 8529 default: 8530 g_assert_not_reached(); 8531 } 8532 } 8533 8534 static inline int 8535 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 8536 { 8537 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 8538 } 8539 8540 /* Translate S2 section/page access permissions to protection flags 8541 * 8542 * @env: CPUARMState 8543 * @s2ap: The 2-bit stage2 access permissions (S2AP) 8544 * @xn: XN (execute-never) bit 8545 */ 8546 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 8547 { 8548 int prot = 0; 8549 8550 if (s2ap & 1) { 8551 prot |= PAGE_READ; 8552 } 8553 if (s2ap & 2) { 8554 prot |= PAGE_WRITE; 8555 } 8556 if (!xn) { 8557 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 8558 prot |= PAGE_EXEC; 8559 } 8560 } 8561 return prot; 8562 } 8563 8564 /* Translate section/page access permissions to protection flags 8565 * 8566 * @env: CPUARMState 8567 * @mmu_idx: MMU index indicating required translation regime 8568 * @is_aa64: TRUE if AArch64 8569 * @ap: The 2-bit simple AP (AP[2:1]) 8570 * @ns: NS (non-secure) bit 8571 * @xn: XN (execute-never) bit 8572 * @pxn: PXN (privileged execute-never) bit 8573 */ 8574 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 8575 int ap, int ns, int xn, int pxn) 8576 { 8577 bool is_user = regime_is_user(env, mmu_idx); 8578 int prot_rw, user_rw; 8579 bool have_wxn; 8580 int wxn = 0; 8581 8582 assert(mmu_idx != ARMMMUIdx_S2NS); 8583 8584 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 8585 if (is_user) { 8586 prot_rw = user_rw; 8587 } else { 8588 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 8589 } 8590 8591 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 8592 return prot_rw; 8593 } 8594 8595 /* TODO have_wxn should be replaced with 8596 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 8597 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 8598 * compatible processors have EL2, which is required for [U]WXN. 8599 */ 8600 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 8601 8602 if (have_wxn) { 8603 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 8604 } 8605 8606 if (is_aa64) { 8607 switch (regime_el(env, mmu_idx)) { 8608 case 1: 8609 if (!is_user) { 8610 xn = pxn || (user_rw & PAGE_WRITE); 8611 } 8612 break; 8613 case 2: 8614 case 3: 8615 break; 8616 } 8617 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8618 switch (regime_el(env, mmu_idx)) { 8619 case 1: 8620 case 3: 8621 if (is_user) { 8622 xn = xn || !(user_rw & PAGE_READ); 8623 } else { 8624 int uwxn = 0; 8625 if (have_wxn) { 8626 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 8627 } 8628 xn = xn || !(prot_rw & PAGE_READ) || pxn || 8629 (uwxn && (user_rw & PAGE_WRITE)); 8630 } 8631 break; 8632 case 2: 8633 break; 8634 } 8635 } else { 8636 xn = wxn = 0; 8637 } 8638 8639 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 8640 return prot_rw; 8641 } 8642 return prot_rw | PAGE_EXEC; 8643 } 8644 8645 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 8646 uint32_t *table, uint32_t address) 8647 { 8648 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 8649 TCR *tcr = regime_tcr(env, mmu_idx); 8650 8651 if (address & tcr->mask) { 8652 if (tcr->raw_tcr & TTBCR_PD1) { 8653 /* Translation table walk disabled for TTBR1 */ 8654 return false; 8655 } 8656 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 8657 } else { 8658 if (tcr->raw_tcr & TTBCR_PD0) { 8659 /* Translation table walk disabled for TTBR0 */ 8660 return false; 8661 } 8662 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 8663 } 8664 *table |= (address >> 18) & 0x3ffc; 8665 return true; 8666 } 8667 8668 /* Translate a S1 pagetable walk through S2 if needed. */ 8669 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 8670 hwaddr addr, MemTxAttrs txattrs, 8671 ARMMMUFaultInfo *fi) 8672 { 8673 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) && 8674 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 8675 target_ulong s2size; 8676 hwaddr s2pa; 8677 int s2prot; 8678 int ret; 8679 8680 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, 8681 &txattrs, &s2prot, &s2size, fi, NULL); 8682 if (ret) { 8683 assert(fi->type != ARMFault_None); 8684 fi->s2addr = addr; 8685 fi->stage2 = true; 8686 fi->s1ptw = true; 8687 return ~0; 8688 } 8689 addr = s2pa; 8690 } 8691 return addr; 8692 } 8693 8694 /* All loads done in the course of a page table walk go through here. 8695 * TODO: rather than ignoring errors from physical memory reads (which 8696 * are external aborts in ARM terminology) we should propagate this 8697 * error out so that we can turn it into a Data Abort if this walk 8698 * was being done for a CPU load/store or an address translation instruction 8699 * (but not if it was for a debug access). 8700 */ 8701 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8702 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8703 { 8704 ARMCPU *cpu = ARM_CPU(cs); 8705 CPUARMState *env = &cpu->env; 8706 MemTxAttrs attrs = {}; 8707 MemTxResult result = MEMTX_OK; 8708 AddressSpace *as; 8709 uint32_t data; 8710 8711 attrs.secure = is_secure; 8712 as = arm_addressspace(cs, attrs); 8713 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8714 if (fi->s1ptw) { 8715 return 0; 8716 } 8717 if (regime_translation_big_endian(env, mmu_idx)) { 8718 data = address_space_ldl_be(as, addr, attrs, &result); 8719 } else { 8720 data = address_space_ldl_le(as, addr, attrs, &result); 8721 } 8722 if (result == MEMTX_OK) { 8723 return data; 8724 } 8725 fi->type = ARMFault_SyncExternalOnWalk; 8726 fi->ea = arm_extabort_type(result); 8727 return 0; 8728 } 8729 8730 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8731 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8732 { 8733 ARMCPU *cpu = ARM_CPU(cs); 8734 CPUARMState *env = &cpu->env; 8735 MemTxAttrs attrs = {}; 8736 MemTxResult result = MEMTX_OK; 8737 AddressSpace *as; 8738 uint64_t data; 8739 8740 attrs.secure = is_secure; 8741 as = arm_addressspace(cs, attrs); 8742 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8743 if (fi->s1ptw) { 8744 return 0; 8745 } 8746 if (regime_translation_big_endian(env, mmu_idx)) { 8747 data = address_space_ldq_be(as, addr, attrs, &result); 8748 } else { 8749 data = address_space_ldq_le(as, addr, attrs, &result); 8750 } 8751 if (result == MEMTX_OK) { 8752 return data; 8753 } 8754 fi->type = ARMFault_SyncExternalOnWalk; 8755 fi->ea = arm_extabort_type(result); 8756 return 0; 8757 } 8758 8759 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 8760 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8761 hwaddr *phys_ptr, int *prot, 8762 target_ulong *page_size, 8763 ARMMMUFaultInfo *fi) 8764 { 8765 CPUState *cs = CPU(arm_env_get_cpu(env)); 8766 int level = 1; 8767 uint32_t table; 8768 uint32_t desc; 8769 int type; 8770 int ap; 8771 int domain = 0; 8772 int domain_prot; 8773 hwaddr phys_addr; 8774 uint32_t dacr; 8775 8776 /* Pagetable walk. */ 8777 /* Lookup l1 descriptor. */ 8778 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 8779 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 8780 fi->type = ARMFault_Translation; 8781 goto do_fault; 8782 } 8783 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8784 mmu_idx, fi); 8785 if (fi->type != ARMFault_None) { 8786 goto do_fault; 8787 } 8788 type = (desc & 3); 8789 domain = (desc >> 5) & 0x0f; 8790 if (regime_el(env, mmu_idx) == 1) { 8791 dacr = env->cp15.dacr_ns; 8792 } else { 8793 dacr = env->cp15.dacr_s; 8794 } 8795 domain_prot = (dacr >> (domain * 2)) & 3; 8796 if (type == 0) { 8797 /* Section translation fault. */ 8798 fi->type = ARMFault_Translation; 8799 goto do_fault; 8800 } 8801 if (type != 2) { 8802 level = 2; 8803 } 8804 if (domain_prot == 0 || domain_prot == 2) { 8805 fi->type = ARMFault_Domain; 8806 goto do_fault; 8807 } 8808 if (type == 2) { 8809 /* 1Mb section. */ 8810 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 8811 ap = (desc >> 10) & 3; 8812 *page_size = 1024 * 1024; 8813 } else { 8814 /* Lookup l2 entry. */ 8815 if (type == 1) { 8816 /* Coarse pagetable. */ 8817 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 8818 } else { 8819 /* Fine pagetable. */ 8820 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 8821 } 8822 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8823 mmu_idx, fi); 8824 if (fi->type != ARMFault_None) { 8825 goto do_fault; 8826 } 8827 switch (desc & 3) { 8828 case 0: /* Page translation fault. */ 8829 fi->type = ARMFault_Translation; 8830 goto do_fault; 8831 case 1: /* 64k page. */ 8832 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 8833 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 8834 *page_size = 0x10000; 8835 break; 8836 case 2: /* 4k page. */ 8837 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8838 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 8839 *page_size = 0x1000; 8840 break; 8841 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 8842 if (type == 1) { 8843 /* ARMv6/XScale extended small page format */ 8844 if (arm_feature(env, ARM_FEATURE_XSCALE) 8845 || arm_feature(env, ARM_FEATURE_V6)) { 8846 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8847 *page_size = 0x1000; 8848 } else { 8849 /* UNPREDICTABLE in ARMv5; we choose to take a 8850 * page translation fault. 8851 */ 8852 fi->type = ARMFault_Translation; 8853 goto do_fault; 8854 } 8855 } else { 8856 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 8857 *page_size = 0x400; 8858 } 8859 ap = (desc >> 4) & 3; 8860 break; 8861 default: 8862 /* Never happens, but compiler isn't smart enough to tell. */ 8863 abort(); 8864 } 8865 } 8866 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 8867 *prot |= *prot ? PAGE_EXEC : 0; 8868 if (!(*prot & (1 << access_type))) { 8869 /* Access permission fault. */ 8870 fi->type = ARMFault_Permission; 8871 goto do_fault; 8872 } 8873 *phys_ptr = phys_addr; 8874 return false; 8875 do_fault: 8876 fi->domain = domain; 8877 fi->level = level; 8878 return true; 8879 } 8880 8881 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 8882 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8883 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 8884 target_ulong *page_size, ARMMMUFaultInfo *fi) 8885 { 8886 CPUState *cs = CPU(arm_env_get_cpu(env)); 8887 int level = 1; 8888 uint32_t table; 8889 uint32_t desc; 8890 uint32_t xn; 8891 uint32_t pxn = 0; 8892 int type; 8893 int ap; 8894 int domain = 0; 8895 int domain_prot; 8896 hwaddr phys_addr; 8897 uint32_t dacr; 8898 bool ns; 8899 8900 /* Pagetable walk. */ 8901 /* Lookup l1 descriptor. */ 8902 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 8903 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 8904 fi->type = ARMFault_Translation; 8905 goto do_fault; 8906 } 8907 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8908 mmu_idx, fi); 8909 if (fi->type != ARMFault_None) { 8910 goto do_fault; 8911 } 8912 type = (desc & 3); 8913 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 8914 /* Section translation fault, or attempt to use the encoding 8915 * which is Reserved on implementations without PXN. 8916 */ 8917 fi->type = ARMFault_Translation; 8918 goto do_fault; 8919 } 8920 if ((type == 1) || !(desc & (1 << 18))) { 8921 /* Page or Section. */ 8922 domain = (desc >> 5) & 0x0f; 8923 } 8924 if (regime_el(env, mmu_idx) == 1) { 8925 dacr = env->cp15.dacr_ns; 8926 } else { 8927 dacr = env->cp15.dacr_s; 8928 } 8929 if (type == 1) { 8930 level = 2; 8931 } 8932 domain_prot = (dacr >> (domain * 2)) & 3; 8933 if (domain_prot == 0 || domain_prot == 2) { 8934 /* Section or Page domain fault */ 8935 fi->type = ARMFault_Domain; 8936 goto do_fault; 8937 } 8938 if (type != 1) { 8939 if (desc & (1 << 18)) { 8940 /* Supersection. */ 8941 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 8942 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 8943 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 8944 *page_size = 0x1000000; 8945 } else { 8946 /* Section. */ 8947 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 8948 *page_size = 0x100000; 8949 } 8950 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 8951 xn = desc & (1 << 4); 8952 pxn = desc & 1; 8953 ns = extract32(desc, 19, 1); 8954 } else { 8955 if (arm_feature(env, ARM_FEATURE_PXN)) { 8956 pxn = (desc >> 2) & 1; 8957 } 8958 ns = extract32(desc, 3, 1); 8959 /* Lookup l2 entry. */ 8960 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 8961 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8962 mmu_idx, fi); 8963 if (fi->type != ARMFault_None) { 8964 goto do_fault; 8965 } 8966 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 8967 switch (desc & 3) { 8968 case 0: /* Page translation fault. */ 8969 fi->type = ARMFault_Translation; 8970 goto do_fault; 8971 case 1: /* 64k page. */ 8972 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 8973 xn = desc & (1 << 15); 8974 *page_size = 0x10000; 8975 break; 8976 case 2: case 3: /* 4k page. */ 8977 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8978 xn = desc & 1; 8979 *page_size = 0x1000; 8980 break; 8981 default: 8982 /* Never happens, but compiler isn't smart enough to tell. */ 8983 abort(); 8984 } 8985 } 8986 if (domain_prot == 3) { 8987 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 8988 } else { 8989 if (pxn && !regime_is_user(env, mmu_idx)) { 8990 xn = 1; 8991 } 8992 if (xn && access_type == MMU_INST_FETCH) { 8993 fi->type = ARMFault_Permission; 8994 goto do_fault; 8995 } 8996 8997 if (arm_feature(env, ARM_FEATURE_V6K) && 8998 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 8999 /* The simplified model uses AP[0] as an access control bit. */ 9000 if ((ap & 1) == 0) { 9001 /* Access flag fault. */ 9002 fi->type = ARMFault_AccessFlag; 9003 goto do_fault; 9004 } 9005 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 9006 } else { 9007 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 9008 } 9009 if (*prot && !xn) { 9010 *prot |= PAGE_EXEC; 9011 } 9012 if (!(*prot & (1 << access_type))) { 9013 /* Access permission fault. */ 9014 fi->type = ARMFault_Permission; 9015 goto do_fault; 9016 } 9017 } 9018 if (ns) { 9019 /* The NS bit will (as required by the architecture) have no effect if 9020 * the CPU doesn't support TZ or this is a non-secure translation 9021 * regime, because the attribute will already be non-secure. 9022 */ 9023 attrs->secure = false; 9024 } 9025 *phys_ptr = phys_addr; 9026 return false; 9027 do_fault: 9028 fi->domain = domain; 9029 fi->level = level; 9030 return true; 9031 } 9032 9033 /* 9034 * check_s2_mmu_setup 9035 * @cpu: ARMCPU 9036 * @is_aa64: True if the translation regime is in AArch64 state 9037 * @startlevel: Suggested starting level 9038 * @inputsize: Bitsize of IPAs 9039 * @stride: Page-table stride (See the ARM ARM) 9040 * 9041 * Returns true if the suggested S2 translation parameters are OK and 9042 * false otherwise. 9043 */ 9044 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 9045 int inputsize, int stride) 9046 { 9047 const int grainsize = stride + 3; 9048 int startsizecheck; 9049 9050 /* Negative levels are never allowed. */ 9051 if (level < 0) { 9052 return false; 9053 } 9054 9055 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 9056 if (startsizecheck < 1 || startsizecheck > stride + 4) { 9057 return false; 9058 } 9059 9060 if (is_aa64) { 9061 CPUARMState *env = &cpu->env; 9062 unsigned int pamax = arm_pamax(cpu); 9063 9064 switch (stride) { 9065 case 13: /* 64KB Pages. */ 9066 if (level == 0 || (level == 1 && pamax <= 42)) { 9067 return false; 9068 } 9069 break; 9070 case 11: /* 16KB Pages. */ 9071 if (level == 0 || (level == 1 && pamax <= 40)) { 9072 return false; 9073 } 9074 break; 9075 case 9: /* 4KB Pages. */ 9076 if (level == 0 && pamax <= 42) { 9077 return false; 9078 } 9079 break; 9080 default: 9081 g_assert_not_reached(); 9082 } 9083 9084 /* Inputsize checks. */ 9085 if (inputsize > pamax && 9086 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 9087 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 9088 return false; 9089 } 9090 } else { 9091 /* AArch32 only supports 4KB pages. Assert on that. */ 9092 assert(stride == 9); 9093 9094 if (level == 0) { 9095 return false; 9096 } 9097 } 9098 return true; 9099 } 9100 9101 /* Translate from the 4-bit stage 2 representation of 9102 * memory attributes (without cache-allocation hints) to 9103 * the 8-bit representation of the stage 1 MAIR registers 9104 * (which includes allocation hints). 9105 * 9106 * ref: shared/translation/attrs/S2AttrDecode() 9107 * .../S2ConvertAttrsHints() 9108 */ 9109 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 9110 { 9111 uint8_t hiattr = extract32(s2attrs, 2, 2); 9112 uint8_t loattr = extract32(s2attrs, 0, 2); 9113 uint8_t hihint = 0, lohint = 0; 9114 9115 if (hiattr != 0) { /* normal memory */ 9116 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 9117 hiattr = loattr = 1; /* non-cacheable */ 9118 } else { 9119 if (hiattr != 1) { /* Write-through or write-back */ 9120 hihint = 3; /* RW allocate */ 9121 } 9122 if (loattr != 1) { /* Write-through or write-back */ 9123 lohint = 3; /* RW allocate */ 9124 } 9125 } 9126 } 9127 9128 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 9129 } 9130 9131 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 9132 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9133 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 9134 target_ulong *page_size_ptr, 9135 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 9136 { 9137 ARMCPU *cpu = arm_env_get_cpu(env); 9138 CPUState *cs = CPU(cpu); 9139 /* Read an LPAE long-descriptor translation table. */ 9140 ARMFaultType fault_type = ARMFault_Translation; 9141 uint32_t level; 9142 uint32_t epd = 0; 9143 int32_t t0sz, t1sz; 9144 uint32_t tg; 9145 uint64_t ttbr; 9146 int ttbr_select; 9147 hwaddr descaddr, indexmask, indexmask_grainsize; 9148 uint32_t tableattrs; 9149 target_ulong page_size; 9150 uint32_t attrs; 9151 int32_t stride = 9; 9152 int32_t addrsize; 9153 int inputsize; 9154 int32_t tbi = 0; 9155 TCR *tcr = regime_tcr(env, mmu_idx); 9156 int ap, ns, xn, pxn; 9157 uint32_t el = regime_el(env, mmu_idx); 9158 bool ttbr1_valid = true; 9159 uint64_t descaddrmask; 9160 bool aarch64 = arm_el_is_aa64(env, el); 9161 9162 /* TODO: 9163 * This code does not handle the different format TCR for VTCR_EL2. 9164 * This code also does not support shareability levels. 9165 * Attribute and permission bit handling should also be checked when adding 9166 * support for those page table walks. 9167 */ 9168 if (aarch64) { 9169 level = 0; 9170 addrsize = 64; 9171 if (el > 1) { 9172 if (mmu_idx != ARMMMUIdx_S2NS) { 9173 tbi = extract64(tcr->raw_tcr, 20, 1); 9174 } 9175 } else { 9176 if (extract64(address, 55, 1)) { 9177 tbi = extract64(tcr->raw_tcr, 38, 1); 9178 } else { 9179 tbi = extract64(tcr->raw_tcr, 37, 1); 9180 } 9181 } 9182 tbi *= 8; 9183 9184 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it 9185 * invalid. 9186 */ 9187 if (el > 1) { 9188 ttbr1_valid = false; 9189 } 9190 } else { 9191 level = 1; 9192 addrsize = 32; 9193 /* There is no TTBR1 for EL2 */ 9194 if (el == 2) { 9195 ttbr1_valid = false; 9196 } 9197 } 9198 9199 /* Determine whether this address is in the region controlled by 9200 * TTBR0 or TTBR1 (or if it is in neither region and should fault). 9201 * This is a Non-secure PL0/1 stage 1 translation, so controlled by 9202 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: 9203 */ 9204 if (aarch64) { 9205 /* AArch64 translation. */ 9206 t0sz = extract32(tcr->raw_tcr, 0, 6); 9207 t0sz = MIN(t0sz, 39); 9208 t0sz = MAX(t0sz, 16); 9209 } else if (mmu_idx != ARMMMUIdx_S2NS) { 9210 /* AArch32 stage 1 translation. */ 9211 t0sz = extract32(tcr->raw_tcr, 0, 3); 9212 } else { 9213 /* AArch32 stage 2 translation. */ 9214 bool sext = extract32(tcr->raw_tcr, 4, 1); 9215 bool sign = extract32(tcr->raw_tcr, 3, 1); 9216 /* Address size is 40-bit for a stage 2 translation, 9217 * and t0sz can be negative (from -8 to 7), 9218 * so we need to adjust it to use the TTBR selecting logic below. 9219 */ 9220 addrsize = 40; 9221 t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8; 9222 9223 /* If the sign-extend bit is not the same as t0sz[3], the result 9224 * is unpredictable. Flag this as a guest error. */ 9225 if (sign != sext) { 9226 qemu_log_mask(LOG_GUEST_ERROR, 9227 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 9228 } 9229 } 9230 t1sz = extract32(tcr->raw_tcr, 16, 6); 9231 if (aarch64) { 9232 t1sz = MIN(t1sz, 39); 9233 t1sz = MAX(t1sz, 16); 9234 } 9235 if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) { 9236 /* there is a ttbr0 region and we are in it (high bits all zero) */ 9237 ttbr_select = 0; 9238 } else if (ttbr1_valid && t1sz && 9239 !extract64(~address, addrsize - t1sz, t1sz - tbi)) { 9240 /* there is a ttbr1 region and we are in it (high bits all one) */ 9241 ttbr_select = 1; 9242 } else if (!t0sz) { 9243 /* ttbr0 region is "everything not in the ttbr1 region" */ 9244 ttbr_select = 0; 9245 } else if (!t1sz && ttbr1_valid) { 9246 /* ttbr1 region is "everything not in the ttbr0 region" */ 9247 ttbr_select = 1; 9248 } else { 9249 /* in the gap between the two regions, this is a Translation fault */ 9250 fault_type = ARMFault_Translation; 9251 goto do_fault; 9252 } 9253 9254 /* Note that QEMU ignores shareability and cacheability attributes, 9255 * so we don't need to do anything with the SH, ORGN, IRGN fields 9256 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 9257 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 9258 * implement any ASID-like capability so we can ignore it (instead 9259 * we will always flush the TLB any time the ASID is changed). 9260 */ 9261 if (ttbr_select == 0) { 9262 ttbr = regime_ttbr(env, mmu_idx, 0); 9263 if (el < 2) { 9264 epd = extract32(tcr->raw_tcr, 7, 1); 9265 } 9266 inputsize = addrsize - t0sz; 9267 9268 tg = extract32(tcr->raw_tcr, 14, 2); 9269 if (tg == 1) { /* 64KB pages */ 9270 stride = 13; 9271 } 9272 if (tg == 2) { /* 16KB pages */ 9273 stride = 11; 9274 } 9275 } else { 9276 /* We should only be here if TTBR1 is valid */ 9277 assert(ttbr1_valid); 9278 9279 ttbr = regime_ttbr(env, mmu_idx, 1); 9280 epd = extract32(tcr->raw_tcr, 23, 1); 9281 inputsize = addrsize - t1sz; 9282 9283 tg = extract32(tcr->raw_tcr, 30, 2); 9284 if (tg == 3) { /* 64KB pages */ 9285 stride = 13; 9286 } 9287 if (tg == 1) { /* 16KB pages */ 9288 stride = 11; 9289 } 9290 } 9291 9292 /* Here we should have set up all the parameters for the translation: 9293 * inputsize, ttbr, epd, stride, tbi 9294 */ 9295 9296 if (epd) { 9297 /* Translation table walk disabled => Translation fault on TLB miss 9298 * Note: This is always 0 on 64-bit EL2 and EL3. 9299 */ 9300 goto do_fault; 9301 } 9302 9303 if (mmu_idx != ARMMMUIdx_S2NS) { 9304 /* The starting level depends on the virtual address size (which can 9305 * be up to 48 bits) and the translation granule size. It indicates 9306 * the number of strides (stride bits at a time) needed to 9307 * consume the bits of the input address. In the pseudocode this is: 9308 * level = 4 - RoundUp((inputsize - grainsize) / stride) 9309 * where their 'inputsize' is our 'inputsize', 'grainsize' is 9310 * our 'stride + 3' and 'stride' is our 'stride'. 9311 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 9312 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 9313 * = 4 - (inputsize - 4) / stride; 9314 */ 9315 level = 4 - (inputsize - 4) / stride; 9316 } else { 9317 /* For stage 2 translations the starting level is specified by the 9318 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 9319 */ 9320 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 9321 uint32_t startlevel; 9322 bool ok; 9323 9324 if (!aarch64 || stride == 9) { 9325 /* AArch32 or 4KB pages */ 9326 startlevel = 2 - sl0; 9327 } else { 9328 /* 16KB or 64KB pages */ 9329 startlevel = 3 - sl0; 9330 } 9331 9332 /* Check that the starting level is valid. */ 9333 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 9334 inputsize, stride); 9335 if (!ok) { 9336 fault_type = ARMFault_Translation; 9337 goto do_fault; 9338 } 9339 level = startlevel; 9340 } 9341 9342 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 9343 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 9344 9345 /* Now we can extract the actual base address from the TTBR */ 9346 descaddr = extract64(ttbr, 0, 48); 9347 descaddr &= ~indexmask; 9348 9349 /* The address field in the descriptor goes up to bit 39 for ARMv7 9350 * but up to bit 47 for ARMv8, but we use the descaddrmask 9351 * up to bit 39 for AArch32, because we don't need other bits in that case 9352 * to construct next descriptor address (anyway they should be all zeroes). 9353 */ 9354 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 9355 ~indexmask_grainsize; 9356 9357 /* Secure accesses start with the page table in secure memory and 9358 * can be downgraded to non-secure at any step. Non-secure accesses 9359 * remain non-secure. We implement this by just ORing in the NSTable/NS 9360 * bits at each step. 9361 */ 9362 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 9363 for (;;) { 9364 uint64_t descriptor; 9365 bool nstable; 9366 9367 descaddr |= (address >> (stride * (4 - level))) & indexmask; 9368 descaddr &= ~7ULL; 9369 nstable = extract32(tableattrs, 4, 1); 9370 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 9371 if (fi->type != ARMFault_None) { 9372 goto do_fault; 9373 } 9374 9375 if (!(descriptor & 1) || 9376 (!(descriptor & 2) && (level == 3))) { 9377 /* Invalid, or the Reserved level 3 encoding */ 9378 goto do_fault; 9379 } 9380 descaddr = descriptor & descaddrmask; 9381 9382 if ((descriptor & 2) && (level < 3)) { 9383 /* Table entry. The top five bits are attributes which may 9384 * propagate down through lower levels of the table (and 9385 * which are all arranged so that 0 means "no effect", so 9386 * we can gather them up by ORing in the bits at each level). 9387 */ 9388 tableattrs |= extract64(descriptor, 59, 5); 9389 level++; 9390 indexmask = indexmask_grainsize; 9391 continue; 9392 } 9393 /* Block entry at level 1 or 2, or page entry at level 3. 9394 * These are basically the same thing, although the number 9395 * of bits we pull in from the vaddr varies. 9396 */ 9397 page_size = (1ULL << ((stride * (4 - level)) + 3)); 9398 descaddr |= (address & (page_size - 1)); 9399 /* Extract attributes from the descriptor */ 9400 attrs = extract64(descriptor, 2, 10) 9401 | (extract64(descriptor, 52, 12) << 10); 9402 9403 if (mmu_idx == ARMMMUIdx_S2NS) { 9404 /* Stage 2 table descriptors do not include any attribute fields */ 9405 break; 9406 } 9407 /* Merge in attributes from table descriptors */ 9408 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 9409 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ 9410 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 9411 * means "force PL1 access only", which means forcing AP[1] to 0. 9412 */ 9413 if (extract32(tableattrs, 2, 1)) { 9414 attrs &= ~(1 << 4); 9415 } 9416 attrs |= nstable << 3; /* NS */ 9417 break; 9418 } 9419 /* Here descaddr is the final physical address, and attributes 9420 * are all in attrs. 9421 */ 9422 fault_type = ARMFault_AccessFlag; 9423 if ((attrs & (1 << 8)) == 0) { 9424 /* Access flag */ 9425 goto do_fault; 9426 } 9427 9428 ap = extract32(attrs, 4, 2); 9429 xn = extract32(attrs, 12, 1); 9430 9431 if (mmu_idx == ARMMMUIdx_S2NS) { 9432 ns = true; 9433 *prot = get_S2prot(env, ap, xn); 9434 } else { 9435 ns = extract32(attrs, 3, 1); 9436 pxn = extract32(attrs, 11, 1); 9437 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 9438 } 9439 9440 fault_type = ARMFault_Permission; 9441 if (!(*prot & (1 << access_type))) { 9442 goto do_fault; 9443 } 9444 9445 if (ns) { 9446 /* The NS bit will (as required by the architecture) have no effect if 9447 * the CPU doesn't support TZ or this is a non-secure translation 9448 * regime, because the attribute will already be non-secure. 9449 */ 9450 txattrs->secure = false; 9451 } 9452 9453 if (cacheattrs != NULL) { 9454 if (mmu_idx == ARMMMUIdx_S2NS) { 9455 cacheattrs->attrs = convert_stage2_attrs(env, 9456 extract32(attrs, 0, 4)); 9457 } else { 9458 /* Index into MAIR registers for cache attributes */ 9459 uint8_t attrindx = extract32(attrs, 0, 3); 9460 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 9461 assert(attrindx <= 7); 9462 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 9463 } 9464 cacheattrs->shareability = extract32(attrs, 6, 2); 9465 } 9466 9467 *phys_ptr = descaddr; 9468 *page_size_ptr = page_size; 9469 return false; 9470 9471 do_fault: 9472 fi->type = fault_type; 9473 fi->level = level; 9474 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 9475 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); 9476 return true; 9477 } 9478 9479 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 9480 ARMMMUIdx mmu_idx, 9481 int32_t address, int *prot) 9482 { 9483 if (!arm_feature(env, ARM_FEATURE_M)) { 9484 *prot = PAGE_READ | PAGE_WRITE; 9485 switch (address) { 9486 case 0xF0000000 ... 0xFFFFFFFF: 9487 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 9488 /* hivecs execing is ok */ 9489 *prot |= PAGE_EXEC; 9490 } 9491 break; 9492 case 0x00000000 ... 0x7FFFFFFF: 9493 *prot |= PAGE_EXEC; 9494 break; 9495 } 9496 } else { 9497 /* Default system address map for M profile cores. 9498 * The architecture specifies which regions are execute-never; 9499 * at the MPU level no other checks are defined. 9500 */ 9501 switch (address) { 9502 case 0x00000000 ... 0x1fffffff: /* ROM */ 9503 case 0x20000000 ... 0x3fffffff: /* SRAM */ 9504 case 0x60000000 ... 0x7fffffff: /* RAM */ 9505 case 0x80000000 ... 0x9fffffff: /* RAM */ 9506 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9507 break; 9508 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 9509 case 0xa0000000 ... 0xbfffffff: /* Device */ 9510 case 0xc0000000 ... 0xdfffffff: /* Device */ 9511 case 0xe0000000 ... 0xffffffff: /* System */ 9512 *prot = PAGE_READ | PAGE_WRITE; 9513 break; 9514 default: 9515 g_assert_not_reached(); 9516 } 9517 } 9518 } 9519 9520 static bool pmsav7_use_background_region(ARMCPU *cpu, 9521 ARMMMUIdx mmu_idx, bool is_user) 9522 { 9523 /* Return true if we should use the default memory map as a 9524 * "background" region if there are no hits against any MPU regions. 9525 */ 9526 CPUARMState *env = &cpu->env; 9527 9528 if (is_user) { 9529 return false; 9530 } 9531 9532 if (arm_feature(env, ARM_FEATURE_M)) { 9533 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 9534 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 9535 } else { 9536 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 9537 } 9538 } 9539 9540 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 9541 { 9542 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 9543 return arm_feature(env, ARM_FEATURE_M) && 9544 extract32(address, 20, 12) == 0xe00; 9545 } 9546 9547 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 9548 { 9549 /* True if address is in the M profile system region 9550 * 0xe0000000 - 0xffffffff 9551 */ 9552 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 9553 } 9554 9555 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 9556 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9557 hwaddr *phys_ptr, int *prot, 9558 ARMMMUFaultInfo *fi) 9559 { 9560 ARMCPU *cpu = arm_env_get_cpu(env); 9561 int n; 9562 bool is_user = regime_is_user(env, mmu_idx); 9563 9564 *phys_ptr = address; 9565 *prot = 0; 9566 9567 if (regime_translation_disabled(env, mmu_idx) || 9568 m_is_ppb_region(env, address)) { 9569 /* MPU disabled or M profile PPB access: use default memory map. 9570 * The other case which uses the default memory map in the 9571 * v7M ARM ARM pseudocode is exception vector reads from the vector 9572 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 9573 * which always does a direct read using address_space_ldl(), rather 9574 * than going via this function, so we don't need to check that here. 9575 */ 9576 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9577 } else { /* MPU enabled */ 9578 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9579 /* region search */ 9580 uint32_t base = env->pmsav7.drbar[n]; 9581 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 9582 uint32_t rmask; 9583 bool srdis = false; 9584 9585 if (!(env->pmsav7.drsr[n] & 0x1)) { 9586 continue; 9587 } 9588 9589 if (!rsize) { 9590 qemu_log_mask(LOG_GUEST_ERROR, 9591 "DRSR[%d]: Rsize field cannot be 0\n", n); 9592 continue; 9593 } 9594 rsize++; 9595 rmask = (1ull << rsize) - 1; 9596 9597 if (base & rmask) { 9598 qemu_log_mask(LOG_GUEST_ERROR, 9599 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 9600 "to DRSR region size, mask = 0x%" PRIx32 "\n", 9601 n, base, rmask); 9602 continue; 9603 } 9604 9605 if (address < base || address > base + rmask) { 9606 continue; 9607 } 9608 9609 /* Region matched */ 9610 9611 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 9612 int i, snd; 9613 uint32_t srdis_mask; 9614 9615 rsize -= 3; /* sub region size (power of 2) */ 9616 snd = ((address - base) >> rsize) & 0x7; 9617 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 9618 9619 srdis_mask = srdis ? 0x3 : 0x0; 9620 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 9621 /* This will check in groups of 2, 4 and then 8, whether 9622 * the subregion bits are consistent. rsize is incremented 9623 * back up to give the region size, considering consistent 9624 * adjacent subregions as one region. Stop testing if rsize 9625 * is already big enough for an entire QEMU page. 9626 */ 9627 int snd_rounded = snd & ~(i - 1); 9628 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 9629 snd_rounded + 8, i); 9630 if (srdis_mask ^ srdis_multi) { 9631 break; 9632 } 9633 srdis_mask = (srdis_mask << i) | srdis_mask; 9634 rsize++; 9635 } 9636 } 9637 if (rsize < TARGET_PAGE_BITS) { 9638 qemu_log_mask(LOG_UNIMP, 9639 "DRSR[%d]: No support for MPU (sub)region " 9640 "alignment of %" PRIu32 " bits. Minimum is %d\n", 9641 n, rsize, TARGET_PAGE_BITS); 9642 continue; 9643 } 9644 if (srdis) { 9645 continue; 9646 } 9647 break; 9648 } 9649 9650 if (n == -1) { /* no hits */ 9651 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 9652 /* background fault */ 9653 fi->type = ARMFault_Background; 9654 return true; 9655 } 9656 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9657 } else { /* a MPU hit! */ 9658 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 9659 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 9660 9661 if (m_is_system_region(env, address)) { 9662 /* System space is always execute never */ 9663 xn = 1; 9664 } 9665 9666 if (is_user) { /* User mode AP bit decoding */ 9667 switch (ap) { 9668 case 0: 9669 case 1: 9670 case 5: 9671 break; /* no access */ 9672 case 3: 9673 *prot |= PAGE_WRITE; 9674 /* fall through */ 9675 case 2: 9676 case 6: 9677 *prot |= PAGE_READ | PAGE_EXEC; 9678 break; 9679 case 7: 9680 /* for v7M, same as 6; for R profile a reserved value */ 9681 if (arm_feature(env, ARM_FEATURE_M)) { 9682 *prot |= PAGE_READ | PAGE_EXEC; 9683 break; 9684 } 9685 /* fall through */ 9686 default: 9687 qemu_log_mask(LOG_GUEST_ERROR, 9688 "DRACR[%d]: Bad value for AP bits: 0x%" 9689 PRIx32 "\n", n, ap); 9690 } 9691 } else { /* Priv. mode AP bits decoding */ 9692 switch (ap) { 9693 case 0: 9694 break; /* no access */ 9695 case 1: 9696 case 2: 9697 case 3: 9698 *prot |= PAGE_WRITE; 9699 /* fall through */ 9700 case 5: 9701 case 6: 9702 *prot |= PAGE_READ | PAGE_EXEC; 9703 break; 9704 case 7: 9705 /* for v7M, same as 6; for R profile a reserved value */ 9706 if (arm_feature(env, ARM_FEATURE_M)) { 9707 *prot |= PAGE_READ | PAGE_EXEC; 9708 break; 9709 } 9710 /* fall through */ 9711 default: 9712 qemu_log_mask(LOG_GUEST_ERROR, 9713 "DRACR[%d]: Bad value for AP bits: 0x%" 9714 PRIx32 "\n", n, ap); 9715 } 9716 } 9717 9718 /* execute never */ 9719 if (xn) { 9720 *prot &= ~PAGE_EXEC; 9721 } 9722 } 9723 } 9724 9725 fi->type = ARMFault_Permission; 9726 fi->level = 1; 9727 return !(*prot & (1 << access_type)); 9728 } 9729 9730 static bool v8m_is_sau_exempt(CPUARMState *env, 9731 uint32_t address, MMUAccessType access_type) 9732 { 9733 /* The architecture specifies that certain address ranges are 9734 * exempt from v8M SAU/IDAU checks. 9735 */ 9736 return 9737 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 9738 (address >= 0xe0000000 && address <= 0xe0002fff) || 9739 (address >= 0xe000e000 && address <= 0xe000efff) || 9740 (address >= 0xe002e000 && address <= 0xe002efff) || 9741 (address >= 0xe0040000 && address <= 0xe0041fff) || 9742 (address >= 0xe00ff000 && address <= 0xe00fffff); 9743 } 9744 9745 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 9746 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9747 V8M_SAttributes *sattrs) 9748 { 9749 /* Look up the security attributes for this address. Compare the 9750 * pseudocode SecurityCheck() function. 9751 * We assume the caller has zero-initialized *sattrs. 9752 */ 9753 ARMCPU *cpu = arm_env_get_cpu(env); 9754 int r; 9755 9756 /* TODO: implement IDAU */ 9757 9758 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 9759 /* 0xf0000000..0xffffffff is always S for insn fetches */ 9760 return; 9761 } 9762 9763 if (v8m_is_sau_exempt(env, address, access_type)) { 9764 sattrs->ns = !regime_is_secure(env, mmu_idx); 9765 return; 9766 } 9767 9768 switch (env->sau.ctrl & 3) { 9769 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 9770 break; 9771 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 9772 sattrs->ns = true; 9773 break; 9774 default: /* SAU.ENABLE == 1 */ 9775 for (r = 0; r < cpu->sau_sregion; r++) { 9776 if (env->sau.rlar[r] & 1) { 9777 uint32_t base = env->sau.rbar[r] & ~0x1f; 9778 uint32_t limit = env->sau.rlar[r] | 0x1f; 9779 9780 if (base <= address && limit >= address) { 9781 if (sattrs->srvalid) { 9782 /* If we hit in more than one region then we must report 9783 * as Secure, not NS-Callable, with no valid region 9784 * number info. 9785 */ 9786 sattrs->ns = false; 9787 sattrs->nsc = false; 9788 sattrs->sregion = 0; 9789 sattrs->srvalid = false; 9790 break; 9791 } else { 9792 if (env->sau.rlar[r] & 2) { 9793 sattrs->nsc = true; 9794 } else { 9795 sattrs->ns = true; 9796 } 9797 sattrs->srvalid = true; 9798 sattrs->sregion = r; 9799 } 9800 } 9801 } 9802 } 9803 9804 /* TODO when we support the IDAU then it may override the result here */ 9805 break; 9806 } 9807 } 9808 9809 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 9810 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9811 hwaddr *phys_ptr, MemTxAttrs *txattrs, 9812 int *prot, ARMMMUFaultInfo *fi, uint32_t *mregion) 9813 { 9814 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 9815 * that a full phys-to-virt translation does). 9816 * mregion is (if not NULL) set to the region number which matched, 9817 * or -1 if no region number is returned (MPU off, address did not 9818 * hit a region, address hit in multiple regions). 9819 */ 9820 ARMCPU *cpu = arm_env_get_cpu(env); 9821 bool is_user = regime_is_user(env, mmu_idx); 9822 uint32_t secure = regime_is_secure(env, mmu_idx); 9823 int n; 9824 int matchregion = -1; 9825 bool hit = false; 9826 9827 *phys_ptr = address; 9828 *prot = 0; 9829 if (mregion) { 9830 *mregion = -1; 9831 } 9832 9833 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 9834 * was an exception vector read from the vector table (which is always 9835 * done using the default system address map), because those accesses 9836 * are done in arm_v7m_load_vector(), which always does a direct 9837 * read using address_space_ldl(), rather than going via this function. 9838 */ 9839 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 9840 hit = true; 9841 } else if (m_is_ppb_region(env, address)) { 9842 hit = true; 9843 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 9844 hit = true; 9845 } else { 9846 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9847 /* region search */ 9848 /* Note that the base address is bits [31:5] from the register 9849 * with bits [4:0] all zeroes, but the limit address is bits 9850 * [31:5] from the register with bits [4:0] all ones. 9851 */ 9852 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 9853 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 9854 9855 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 9856 /* Region disabled */ 9857 continue; 9858 } 9859 9860 if (address < base || address > limit) { 9861 continue; 9862 } 9863 9864 if (hit) { 9865 /* Multiple regions match -- always a failure (unlike 9866 * PMSAv7 where highest-numbered-region wins) 9867 */ 9868 fi->type = ARMFault_Permission; 9869 fi->level = 1; 9870 return true; 9871 } 9872 9873 matchregion = n; 9874 hit = true; 9875 9876 if (base & ~TARGET_PAGE_MASK) { 9877 qemu_log_mask(LOG_UNIMP, 9878 "MPU_RBAR[%d]: No support for MPU region base" 9879 "address of 0x%" PRIx32 ". Minimum alignment is " 9880 "%d\n", 9881 n, base, TARGET_PAGE_BITS); 9882 continue; 9883 } 9884 if ((limit + 1) & ~TARGET_PAGE_MASK) { 9885 qemu_log_mask(LOG_UNIMP, 9886 "MPU_RBAR[%d]: No support for MPU region limit" 9887 "address of 0x%" PRIx32 ". Minimum alignment is " 9888 "%d\n", 9889 n, limit, TARGET_PAGE_BITS); 9890 continue; 9891 } 9892 } 9893 } 9894 9895 if (!hit) { 9896 /* background fault */ 9897 fi->type = ARMFault_Background; 9898 return true; 9899 } 9900 9901 if (matchregion == -1) { 9902 /* hit using the background region */ 9903 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9904 } else { 9905 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 9906 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 9907 9908 if (m_is_system_region(env, address)) { 9909 /* System space is always execute never */ 9910 xn = 1; 9911 } 9912 9913 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 9914 if (*prot && !xn) { 9915 *prot |= PAGE_EXEC; 9916 } 9917 /* We don't need to look the attribute up in the MAIR0/MAIR1 9918 * registers because that only tells us about cacheability. 9919 */ 9920 if (mregion) { 9921 *mregion = matchregion; 9922 } 9923 } 9924 9925 fi->type = ARMFault_Permission; 9926 fi->level = 1; 9927 return !(*prot & (1 << access_type)); 9928 } 9929 9930 9931 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 9932 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9933 hwaddr *phys_ptr, MemTxAttrs *txattrs, 9934 int *prot, ARMMMUFaultInfo *fi) 9935 { 9936 uint32_t secure = regime_is_secure(env, mmu_idx); 9937 V8M_SAttributes sattrs = {}; 9938 9939 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 9940 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 9941 if (access_type == MMU_INST_FETCH) { 9942 /* Instruction fetches always use the MMU bank and the 9943 * transaction attribute determined by the fetch address, 9944 * regardless of CPU state. This is painful for QEMU 9945 * to handle, because it would mean we need to encode 9946 * into the mmu_idx not just the (user, negpri) information 9947 * for the current security state but also that for the 9948 * other security state, which would balloon the number 9949 * of mmu_idx values needed alarmingly. 9950 * Fortunately we can avoid this because it's not actually 9951 * possible to arbitrarily execute code from memory with 9952 * the wrong security attribute: it will always generate 9953 * an exception of some kind or another, apart from the 9954 * special case of an NS CPU executing an SG instruction 9955 * in S&NSC memory. So we always just fail the translation 9956 * here and sort things out in the exception handler 9957 * (including possibly emulating an SG instruction). 9958 */ 9959 if (sattrs.ns != !secure) { 9960 if (sattrs.nsc) { 9961 fi->type = ARMFault_QEMU_NSCExec; 9962 } else { 9963 fi->type = ARMFault_QEMU_SFault; 9964 } 9965 *phys_ptr = address; 9966 *prot = 0; 9967 return true; 9968 } 9969 } else { 9970 /* For data accesses we always use the MMU bank indicated 9971 * by the current CPU state, but the security attributes 9972 * might downgrade a secure access to nonsecure. 9973 */ 9974 if (sattrs.ns) { 9975 txattrs->secure = false; 9976 } else if (!secure) { 9977 /* NS access to S memory must fault. 9978 * Architecturally we should first check whether the 9979 * MPU information for this address indicates that we 9980 * are doing an unaligned access to Device memory, which 9981 * should generate a UsageFault instead. QEMU does not 9982 * currently check for that kind of unaligned access though. 9983 * If we added it we would need to do so as a special case 9984 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 9985 */ 9986 fi->type = ARMFault_QEMU_SFault; 9987 *phys_ptr = address; 9988 *prot = 0; 9989 return true; 9990 } 9991 } 9992 } 9993 9994 return pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 9995 txattrs, prot, fi, NULL); 9996 } 9997 9998 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 9999 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10000 hwaddr *phys_ptr, int *prot, 10001 ARMMMUFaultInfo *fi) 10002 { 10003 int n; 10004 uint32_t mask; 10005 uint32_t base; 10006 bool is_user = regime_is_user(env, mmu_idx); 10007 10008 if (regime_translation_disabled(env, mmu_idx)) { 10009 /* MPU disabled. */ 10010 *phys_ptr = address; 10011 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10012 return false; 10013 } 10014 10015 *phys_ptr = address; 10016 for (n = 7; n >= 0; n--) { 10017 base = env->cp15.c6_region[n]; 10018 if ((base & 1) == 0) { 10019 continue; 10020 } 10021 mask = 1 << ((base >> 1) & 0x1f); 10022 /* Keep this shift separate from the above to avoid an 10023 (undefined) << 32. */ 10024 mask = (mask << 1) - 1; 10025 if (((base ^ address) & ~mask) == 0) { 10026 break; 10027 } 10028 } 10029 if (n < 0) { 10030 fi->type = ARMFault_Background; 10031 return true; 10032 } 10033 10034 if (access_type == MMU_INST_FETCH) { 10035 mask = env->cp15.pmsav5_insn_ap; 10036 } else { 10037 mask = env->cp15.pmsav5_data_ap; 10038 } 10039 mask = (mask >> (n * 4)) & 0xf; 10040 switch (mask) { 10041 case 0: 10042 fi->type = ARMFault_Permission; 10043 fi->level = 1; 10044 return true; 10045 case 1: 10046 if (is_user) { 10047 fi->type = ARMFault_Permission; 10048 fi->level = 1; 10049 return true; 10050 } 10051 *prot = PAGE_READ | PAGE_WRITE; 10052 break; 10053 case 2: 10054 *prot = PAGE_READ; 10055 if (!is_user) { 10056 *prot |= PAGE_WRITE; 10057 } 10058 break; 10059 case 3: 10060 *prot = PAGE_READ | PAGE_WRITE; 10061 break; 10062 case 5: 10063 if (is_user) { 10064 fi->type = ARMFault_Permission; 10065 fi->level = 1; 10066 return true; 10067 } 10068 *prot = PAGE_READ; 10069 break; 10070 case 6: 10071 *prot = PAGE_READ; 10072 break; 10073 default: 10074 /* Bad permission. */ 10075 fi->type = ARMFault_Permission; 10076 fi->level = 1; 10077 return true; 10078 } 10079 *prot |= PAGE_EXEC; 10080 return false; 10081 } 10082 10083 /* Combine either inner or outer cacheability attributes for normal 10084 * memory, according to table D4-42 and pseudocode procedure 10085 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 10086 * 10087 * NB: only stage 1 includes allocation hints (RW bits), leading to 10088 * some asymmetry. 10089 */ 10090 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 10091 { 10092 if (s1 == 4 || s2 == 4) { 10093 /* non-cacheable has precedence */ 10094 return 4; 10095 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 10096 /* stage 1 write-through takes precedence */ 10097 return s1; 10098 } else if (extract32(s2, 2, 2) == 2) { 10099 /* stage 2 write-through takes precedence, but the allocation hint 10100 * is still taken from stage 1 10101 */ 10102 return (2 << 2) | extract32(s1, 0, 2); 10103 } else { /* write-back */ 10104 return s1; 10105 } 10106 } 10107 10108 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 10109 * and CombineS1S2Desc() 10110 * 10111 * @s1: Attributes from stage 1 walk 10112 * @s2: Attributes from stage 2 walk 10113 */ 10114 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 10115 { 10116 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 10117 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 10118 ARMCacheAttrs ret; 10119 10120 /* Combine shareability attributes (table D4-43) */ 10121 if (s1.shareability == 2 || s2.shareability == 2) { 10122 /* if either are outer-shareable, the result is outer-shareable */ 10123 ret.shareability = 2; 10124 } else if (s1.shareability == 3 || s2.shareability == 3) { 10125 /* if either are inner-shareable, the result is inner-shareable */ 10126 ret.shareability = 3; 10127 } else { 10128 /* both non-shareable */ 10129 ret.shareability = 0; 10130 } 10131 10132 /* Combine memory type and cacheability attributes */ 10133 if (s1hi == 0 || s2hi == 0) { 10134 /* Device has precedence over normal */ 10135 if (s1lo == 0 || s2lo == 0) { 10136 /* nGnRnE has precedence over anything */ 10137 ret.attrs = 0; 10138 } else if (s1lo == 4 || s2lo == 4) { 10139 /* non-Reordering has precedence over Reordering */ 10140 ret.attrs = 4; /* nGnRE */ 10141 } else if (s1lo == 8 || s2lo == 8) { 10142 /* non-Gathering has precedence over Gathering */ 10143 ret.attrs = 8; /* nGRE */ 10144 } else { 10145 ret.attrs = 0xc; /* GRE */ 10146 } 10147 10148 /* Any location for which the resultant memory type is any 10149 * type of Device memory is always treated as Outer Shareable. 10150 */ 10151 ret.shareability = 2; 10152 } else { /* Normal memory */ 10153 /* Outer/inner cacheability combine independently */ 10154 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 10155 | combine_cacheattr_nibble(s1lo, s2lo); 10156 10157 if (ret.attrs == 0x44) { 10158 /* Any location for which the resultant memory type is Normal 10159 * Inner Non-cacheable, Outer Non-cacheable is always treated 10160 * as Outer Shareable. 10161 */ 10162 ret.shareability = 2; 10163 } 10164 } 10165 10166 return ret; 10167 } 10168 10169 10170 /* get_phys_addr - get the physical address for this virtual address 10171 * 10172 * Find the physical address corresponding to the given virtual address, 10173 * by doing a translation table walk on MMU based systems or using the 10174 * MPU state on MPU based systems. 10175 * 10176 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 10177 * prot and page_size may not be filled in, and the populated fsr value provides 10178 * information on why the translation aborted, in the format of a 10179 * DFSR/IFSR fault register, with the following caveats: 10180 * * we honour the short vs long DFSR format differences. 10181 * * the WnR bit is never set (the caller must do this). 10182 * * for PSMAv5 based systems we don't bother to return a full FSR format 10183 * value. 10184 * 10185 * @env: CPUARMState 10186 * @address: virtual address to get physical address for 10187 * @access_type: 0 for read, 1 for write, 2 for execute 10188 * @mmu_idx: MMU index indicating required translation regime 10189 * @phys_ptr: set to the physical address corresponding to the virtual address 10190 * @attrs: set to the memory transaction attributes to use 10191 * @prot: set to the permissions for the page containing phys_ptr 10192 * @page_size: set to the size of the page containing phys_ptr 10193 * @fi: set to fault info if the translation fails 10194 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 10195 */ 10196 static bool get_phys_addr(CPUARMState *env, target_ulong address, 10197 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10198 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10199 target_ulong *page_size, 10200 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10201 { 10202 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 10203 /* Call ourselves recursively to do the stage 1 and then stage 2 10204 * translations. 10205 */ 10206 if (arm_feature(env, ARM_FEATURE_EL2)) { 10207 hwaddr ipa; 10208 int s2_prot; 10209 int ret; 10210 ARMCacheAttrs cacheattrs2 = {}; 10211 10212 ret = get_phys_addr(env, address, access_type, 10213 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 10214 prot, page_size, fi, cacheattrs); 10215 10216 /* If S1 fails or S2 is disabled, return early. */ 10217 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 10218 *phys_ptr = ipa; 10219 return ret; 10220 } 10221 10222 /* S1 is done. Now do S2 translation. */ 10223 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS, 10224 phys_ptr, attrs, &s2_prot, 10225 page_size, fi, 10226 cacheattrs != NULL ? &cacheattrs2 : NULL); 10227 fi->s2addr = ipa; 10228 /* Combine the S1 and S2 perms. */ 10229 *prot &= s2_prot; 10230 10231 /* Combine the S1 and S2 cache attributes, if needed */ 10232 if (!ret && cacheattrs != NULL) { 10233 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 10234 } 10235 10236 return ret; 10237 } else { 10238 /* 10239 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 10240 */ 10241 mmu_idx = stage_1_mmu_idx(mmu_idx); 10242 } 10243 } 10244 10245 /* The page table entries may downgrade secure to non-secure, but 10246 * cannot upgrade an non-secure translation regime's attributes 10247 * to secure. 10248 */ 10249 attrs->secure = regime_is_secure(env, mmu_idx); 10250 attrs->user = regime_is_user(env, mmu_idx); 10251 10252 /* Fast Context Switch Extension. This doesn't exist at all in v8. 10253 * In v7 and earlier it affects all stage 1 translations. 10254 */ 10255 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS 10256 && !arm_feature(env, ARM_FEATURE_V8)) { 10257 if (regime_el(env, mmu_idx) == 3) { 10258 address += env->cp15.fcseidr_s; 10259 } else { 10260 address += env->cp15.fcseidr_ns; 10261 } 10262 } 10263 10264 if (arm_feature(env, ARM_FEATURE_PMSA)) { 10265 bool ret; 10266 *page_size = TARGET_PAGE_SIZE; 10267 10268 if (arm_feature(env, ARM_FEATURE_V8)) { 10269 /* PMSAv8 */ 10270 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 10271 phys_ptr, attrs, prot, fi); 10272 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10273 /* PMSAv7 */ 10274 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 10275 phys_ptr, prot, fi); 10276 } else { 10277 /* Pre-v7 MPU */ 10278 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 10279 phys_ptr, prot, fi); 10280 } 10281 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 10282 " mmu_idx %u -> %s (prot %c%c%c)\n", 10283 access_type == MMU_DATA_LOAD ? "reading" : 10284 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 10285 (uint32_t)address, mmu_idx, 10286 ret ? "Miss" : "Hit", 10287 *prot & PAGE_READ ? 'r' : '-', 10288 *prot & PAGE_WRITE ? 'w' : '-', 10289 *prot & PAGE_EXEC ? 'x' : '-'); 10290 10291 return ret; 10292 } 10293 10294 /* Definitely a real MMU, not an MPU */ 10295 10296 if (regime_translation_disabled(env, mmu_idx)) { 10297 /* MMU disabled. */ 10298 *phys_ptr = address; 10299 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10300 *page_size = TARGET_PAGE_SIZE; 10301 return 0; 10302 } 10303 10304 if (regime_using_lpae_format(env, mmu_idx)) { 10305 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 10306 phys_ptr, attrs, prot, page_size, 10307 fi, cacheattrs); 10308 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 10309 return get_phys_addr_v6(env, address, access_type, mmu_idx, 10310 phys_ptr, attrs, prot, page_size, fi); 10311 } else { 10312 return get_phys_addr_v5(env, address, access_type, mmu_idx, 10313 phys_ptr, prot, page_size, fi); 10314 } 10315 } 10316 10317 /* Walk the page table and (if the mapping exists) add the page 10318 * to the TLB. Return false on success, or true on failure. Populate 10319 * fsr with ARM DFSR/IFSR fault register format value on failure. 10320 */ 10321 bool arm_tlb_fill(CPUState *cs, vaddr address, 10322 MMUAccessType access_type, int mmu_idx, 10323 ARMMMUFaultInfo *fi) 10324 { 10325 ARMCPU *cpu = ARM_CPU(cs); 10326 CPUARMState *env = &cpu->env; 10327 hwaddr phys_addr; 10328 target_ulong page_size; 10329 int prot; 10330 int ret; 10331 MemTxAttrs attrs = {}; 10332 10333 ret = get_phys_addr(env, address, access_type, 10334 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, 10335 &attrs, &prot, &page_size, fi, NULL); 10336 if (!ret) { 10337 /* Map a single [sub]page. */ 10338 phys_addr &= TARGET_PAGE_MASK; 10339 address &= TARGET_PAGE_MASK; 10340 tlb_set_page_with_attrs(cs, address, phys_addr, attrs, 10341 prot, mmu_idx, page_size); 10342 return 0; 10343 } 10344 10345 return ret; 10346 } 10347 10348 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 10349 MemTxAttrs *attrs) 10350 { 10351 ARMCPU *cpu = ARM_CPU(cs); 10352 CPUARMState *env = &cpu->env; 10353 hwaddr phys_addr; 10354 target_ulong page_size; 10355 int prot; 10356 bool ret; 10357 ARMMMUFaultInfo fi = {}; 10358 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 10359 10360 *attrs = (MemTxAttrs) {}; 10361 10362 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 10363 attrs, &prot, &page_size, &fi, NULL); 10364 10365 if (ret) { 10366 return -1; 10367 } 10368 return phys_addr; 10369 } 10370 10371 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 10372 { 10373 uint32_t mask; 10374 unsigned el = arm_current_el(env); 10375 10376 /* First handle registers which unprivileged can read */ 10377 10378 switch (reg) { 10379 case 0 ... 7: /* xPSR sub-fields */ 10380 mask = 0; 10381 if ((reg & 1) && el) { 10382 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ 10383 } 10384 if (!(reg & 4)) { 10385 mask |= XPSR_NZCV | XPSR_Q; /* APSR */ 10386 } 10387 /* EPSR reads as zero */ 10388 return xpsr_read(env) & mask; 10389 break; 10390 case 20: /* CONTROL */ 10391 return env->v7m.control[env->v7m.secure]; 10392 case 0x94: /* CONTROL_NS */ 10393 /* We have to handle this here because unprivileged Secure code 10394 * can read the NS CONTROL register. 10395 */ 10396 if (!env->v7m.secure) { 10397 return 0; 10398 } 10399 return env->v7m.control[M_REG_NS]; 10400 } 10401 10402 if (el == 0) { 10403 return 0; /* unprivileged reads others as zero */ 10404 } 10405 10406 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10407 switch (reg) { 10408 case 0x88: /* MSP_NS */ 10409 if (!env->v7m.secure) { 10410 return 0; 10411 } 10412 return env->v7m.other_ss_msp; 10413 case 0x89: /* PSP_NS */ 10414 if (!env->v7m.secure) { 10415 return 0; 10416 } 10417 return env->v7m.other_ss_psp; 10418 case 0x90: /* PRIMASK_NS */ 10419 if (!env->v7m.secure) { 10420 return 0; 10421 } 10422 return env->v7m.primask[M_REG_NS]; 10423 case 0x91: /* BASEPRI_NS */ 10424 if (!env->v7m.secure) { 10425 return 0; 10426 } 10427 return env->v7m.basepri[M_REG_NS]; 10428 case 0x93: /* FAULTMASK_NS */ 10429 if (!env->v7m.secure) { 10430 return 0; 10431 } 10432 return env->v7m.faultmask[M_REG_NS]; 10433 case 0x98: /* SP_NS */ 10434 { 10435 /* This gives the non-secure SP selected based on whether we're 10436 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10437 */ 10438 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10439 10440 if (!env->v7m.secure) { 10441 return 0; 10442 } 10443 if (!arm_v7m_is_handler_mode(env) && spsel) { 10444 return env->v7m.other_ss_psp; 10445 } else { 10446 return env->v7m.other_ss_msp; 10447 } 10448 } 10449 default: 10450 break; 10451 } 10452 } 10453 10454 switch (reg) { 10455 case 8: /* MSP */ 10456 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; 10457 case 9: /* PSP */ 10458 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; 10459 case 16: /* PRIMASK */ 10460 return env->v7m.primask[env->v7m.secure]; 10461 case 17: /* BASEPRI */ 10462 case 18: /* BASEPRI_MAX */ 10463 return env->v7m.basepri[env->v7m.secure]; 10464 case 19: /* FAULTMASK */ 10465 return env->v7m.faultmask[env->v7m.secure]; 10466 default: 10467 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" 10468 " register %d\n", reg); 10469 return 0; 10470 } 10471 } 10472 10473 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) 10474 { 10475 /* We're passed bits [11..0] of the instruction; extract 10476 * SYSm and the mask bits. 10477 * Invalid combinations of SYSm and mask are UNPREDICTABLE; 10478 * we choose to treat them as if the mask bits were valid. 10479 * NB that the pseudocode 'mask' variable is bits [11..10], 10480 * whereas ours is [11..8]. 10481 */ 10482 uint32_t mask = extract32(maskreg, 8, 4); 10483 uint32_t reg = extract32(maskreg, 0, 8); 10484 10485 if (arm_current_el(env) == 0 && reg > 7) { 10486 /* only xPSR sub-fields may be written by unprivileged */ 10487 return; 10488 } 10489 10490 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10491 switch (reg) { 10492 case 0x88: /* MSP_NS */ 10493 if (!env->v7m.secure) { 10494 return; 10495 } 10496 env->v7m.other_ss_msp = val; 10497 return; 10498 case 0x89: /* PSP_NS */ 10499 if (!env->v7m.secure) { 10500 return; 10501 } 10502 env->v7m.other_ss_psp = val; 10503 return; 10504 case 0x90: /* PRIMASK_NS */ 10505 if (!env->v7m.secure) { 10506 return; 10507 } 10508 env->v7m.primask[M_REG_NS] = val & 1; 10509 return; 10510 case 0x91: /* BASEPRI_NS */ 10511 if (!env->v7m.secure) { 10512 return; 10513 } 10514 env->v7m.basepri[M_REG_NS] = val & 0xff; 10515 return; 10516 case 0x93: /* FAULTMASK_NS */ 10517 if (!env->v7m.secure) { 10518 return; 10519 } 10520 env->v7m.faultmask[M_REG_NS] = val & 1; 10521 return; 10522 case 0x98: /* SP_NS */ 10523 { 10524 /* This gives the non-secure SP selected based on whether we're 10525 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10526 */ 10527 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10528 10529 if (!env->v7m.secure) { 10530 return; 10531 } 10532 if (!arm_v7m_is_handler_mode(env) && spsel) { 10533 env->v7m.other_ss_psp = val; 10534 } else { 10535 env->v7m.other_ss_msp = val; 10536 } 10537 return; 10538 } 10539 default: 10540 break; 10541 } 10542 } 10543 10544 switch (reg) { 10545 case 0 ... 7: /* xPSR sub-fields */ 10546 /* only APSR is actually writable */ 10547 if (!(reg & 4)) { 10548 uint32_t apsrmask = 0; 10549 10550 if (mask & 8) { 10551 apsrmask |= XPSR_NZCV | XPSR_Q; 10552 } 10553 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { 10554 apsrmask |= XPSR_GE; 10555 } 10556 xpsr_write(env, val, apsrmask); 10557 } 10558 break; 10559 case 8: /* MSP */ 10560 if (v7m_using_psp(env)) { 10561 env->v7m.other_sp = val; 10562 } else { 10563 env->regs[13] = val; 10564 } 10565 break; 10566 case 9: /* PSP */ 10567 if (v7m_using_psp(env)) { 10568 env->regs[13] = val; 10569 } else { 10570 env->v7m.other_sp = val; 10571 } 10572 break; 10573 case 16: /* PRIMASK */ 10574 env->v7m.primask[env->v7m.secure] = val & 1; 10575 break; 10576 case 17: /* BASEPRI */ 10577 env->v7m.basepri[env->v7m.secure] = val & 0xff; 10578 break; 10579 case 18: /* BASEPRI_MAX */ 10580 val &= 0xff; 10581 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] 10582 || env->v7m.basepri[env->v7m.secure] == 0)) { 10583 env->v7m.basepri[env->v7m.secure] = val; 10584 } 10585 break; 10586 case 19: /* FAULTMASK */ 10587 env->v7m.faultmask[env->v7m.secure] = val & 1; 10588 break; 10589 case 20: /* CONTROL */ 10590 /* Writing to the SPSEL bit only has an effect if we are in 10591 * thread mode; other bits can be updated by any privileged code. 10592 * write_v7m_control_spsel() deals with updating the SPSEL bit in 10593 * env->v7m.control, so we only need update the others. 10594 * For v7M, we must just ignore explicit writes to SPSEL in handler 10595 * mode; for v8M the write is permitted but will have no effect. 10596 */ 10597 if (arm_feature(env, ARM_FEATURE_V8) || 10598 !arm_v7m_is_handler_mode(env)) { 10599 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); 10600 } 10601 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; 10602 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; 10603 break; 10604 default: 10605 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" 10606 " register %d\n", reg); 10607 return; 10608 } 10609 } 10610 10611 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 10612 { 10613 /* Implement the TT instruction. op is bits [7:6] of the insn. */ 10614 bool forceunpriv = op & 1; 10615 bool alt = op & 2; 10616 V8M_SAttributes sattrs = {}; 10617 uint32_t tt_resp; 10618 bool r, rw, nsr, nsrw, mrvalid; 10619 int prot; 10620 ARMMMUFaultInfo fi = {}; 10621 MemTxAttrs attrs = {}; 10622 hwaddr phys_addr; 10623 ARMMMUIdx mmu_idx; 10624 uint32_t mregion; 10625 bool targetpriv; 10626 bool targetsec = env->v7m.secure; 10627 10628 /* Work out what the security state and privilege level we're 10629 * interested in is... 10630 */ 10631 if (alt) { 10632 targetsec = !targetsec; 10633 } 10634 10635 if (forceunpriv) { 10636 targetpriv = false; 10637 } else { 10638 targetpriv = arm_v7m_is_handler_mode(env) || 10639 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); 10640 } 10641 10642 /* ...and then figure out which MMU index this is */ 10643 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); 10644 10645 /* We know that the MPU and SAU don't care about the access type 10646 * for our purposes beyond that we don't want to claim to be 10647 * an insn fetch, so we arbitrarily call this a read. 10648 */ 10649 10650 /* MPU region info only available for privileged or if 10651 * inspecting the other MPU state. 10652 */ 10653 if (arm_current_el(env) != 0 || alt) { 10654 /* We can ignore the return value as prot is always set */ 10655 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, 10656 &phys_addr, &attrs, &prot, &fi, &mregion); 10657 if (mregion == -1) { 10658 mrvalid = false; 10659 mregion = 0; 10660 } else { 10661 mrvalid = true; 10662 } 10663 r = prot & PAGE_READ; 10664 rw = prot & PAGE_WRITE; 10665 } else { 10666 r = false; 10667 rw = false; 10668 mrvalid = false; 10669 mregion = 0; 10670 } 10671 10672 if (env->v7m.secure) { 10673 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 10674 nsr = sattrs.ns && r; 10675 nsrw = sattrs.ns && rw; 10676 } else { 10677 sattrs.ns = true; 10678 nsr = false; 10679 nsrw = false; 10680 } 10681 10682 tt_resp = (sattrs.iregion << 24) | 10683 (sattrs.irvalid << 23) | 10684 ((!sattrs.ns) << 22) | 10685 (nsrw << 21) | 10686 (nsr << 20) | 10687 (rw << 19) | 10688 (r << 18) | 10689 (sattrs.srvalid << 17) | 10690 (mrvalid << 16) | 10691 (sattrs.sregion << 8) | 10692 mregion; 10693 10694 return tt_resp; 10695 } 10696 10697 #endif 10698 10699 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) 10700 { 10701 /* Implement DC ZVA, which zeroes a fixed-length block of memory. 10702 * Note that we do not implement the (architecturally mandated) 10703 * alignment fault for attempts to use this on Device memory 10704 * (which matches the usual QEMU behaviour of not implementing either 10705 * alignment faults or any memory attribute handling). 10706 */ 10707 10708 ARMCPU *cpu = arm_env_get_cpu(env); 10709 uint64_t blocklen = 4 << cpu->dcz_blocksize; 10710 uint64_t vaddr = vaddr_in & ~(blocklen - 1); 10711 10712 #ifndef CONFIG_USER_ONLY 10713 { 10714 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than 10715 * the block size so we might have to do more than one TLB lookup. 10716 * We know that in fact for any v8 CPU the page size is at least 4K 10717 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only 10718 * 1K as an artefact of legacy v5 subpage support being present in the 10719 * same QEMU executable. 10720 */ 10721 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); 10722 void *hostaddr[maxidx]; 10723 int try, i; 10724 unsigned mmu_idx = cpu_mmu_index(env, false); 10725 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 10726 10727 for (try = 0; try < 2; try++) { 10728 10729 for (i = 0; i < maxidx; i++) { 10730 hostaddr[i] = tlb_vaddr_to_host(env, 10731 vaddr + TARGET_PAGE_SIZE * i, 10732 1, mmu_idx); 10733 if (!hostaddr[i]) { 10734 break; 10735 } 10736 } 10737 if (i == maxidx) { 10738 /* If it's all in the TLB it's fair game for just writing to; 10739 * we know we don't need to update dirty status, etc. 10740 */ 10741 for (i = 0; i < maxidx - 1; i++) { 10742 memset(hostaddr[i], 0, TARGET_PAGE_SIZE); 10743 } 10744 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); 10745 return; 10746 } 10747 /* OK, try a store and see if we can populate the tlb. This 10748 * might cause an exception if the memory isn't writable, 10749 * in which case we will longjmp out of here. We must for 10750 * this purpose use the actual register value passed to us 10751 * so that we get the fault address right. 10752 */ 10753 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); 10754 /* Now we can populate the other TLB entries, if any */ 10755 for (i = 0; i < maxidx; i++) { 10756 uint64_t va = vaddr + TARGET_PAGE_SIZE * i; 10757 if (va != (vaddr_in & TARGET_PAGE_MASK)) { 10758 helper_ret_stb_mmu(env, va, 0, oi, GETPC()); 10759 } 10760 } 10761 } 10762 10763 /* Slow path (probably attempt to do this to an I/O device or 10764 * similar, or clearing of a block of code we have translations 10765 * cached for). Just do a series of byte writes as the architecture 10766 * demands. It's not worth trying to use a cpu_physical_memory_map(), 10767 * memset(), unmap() sequence here because: 10768 * + we'd need to account for the blocksize being larger than a page 10769 * + the direct-RAM access case is almost always going to be dealt 10770 * with in the fastpath code above, so there's no speed benefit 10771 * + we would have to deal with the map returning NULL because the 10772 * bounce buffer was in use 10773 */ 10774 for (i = 0; i < blocklen; i++) { 10775 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); 10776 } 10777 } 10778 #else 10779 memset(g2h(vaddr), 0, blocklen); 10780 #endif 10781 } 10782 10783 /* Note that signed overflow is undefined in C. The following routines are 10784 careful to use unsigned types where modulo arithmetic is required. 10785 Failure to do so _will_ break on newer gcc. */ 10786 10787 /* Signed saturating arithmetic. */ 10788 10789 /* Perform 16-bit signed saturating addition. */ 10790 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 10791 { 10792 uint16_t res; 10793 10794 res = a + b; 10795 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 10796 if (a & 0x8000) 10797 res = 0x8000; 10798 else 10799 res = 0x7fff; 10800 } 10801 return res; 10802 } 10803 10804 /* Perform 8-bit signed saturating addition. */ 10805 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 10806 { 10807 uint8_t res; 10808 10809 res = a + b; 10810 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 10811 if (a & 0x80) 10812 res = 0x80; 10813 else 10814 res = 0x7f; 10815 } 10816 return res; 10817 } 10818 10819 /* Perform 16-bit signed saturating subtraction. */ 10820 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 10821 { 10822 uint16_t res; 10823 10824 res = a - b; 10825 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 10826 if (a & 0x8000) 10827 res = 0x8000; 10828 else 10829 res = 0x7fff; 10830 } 10831 return res; 10832 } 10833 10834 /* Perform 8-bit signed saturating subtraction. */ 10835 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 10836 { 10837 uint8_t res; 10838 10839 res = a - b; 10840 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 10841 if (a & 0x80) 10842 res = 0x80; 10843 else 10844 res = 0x7f; 10845 } 10846 return res; 10847 } 10848 10849 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 10850 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 10851 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 10852 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 10853 #define PFX q 10854 10855 #include "op_addsub.h" 10856 10857 /* Unsigned saturating arithmetic. */ 10858 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 10859 { 10860 uint16_t res; 10861 res = a + b; 10862 if (res < a) 10863 res = 0xffff; 10864 return res; 10865 } 10866 10867 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 10868 { 10869 if (a > b) 10870 return a - b; 10871 else 10872 return 0; 10873 } 10874 10875 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 10876 { 10877 uint8_t res; 10878 res = a + b; 10879 if (res < a) 10880 res = 0xff; 10881 return res; 10882 } 10883 10884 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 10885 { 10886 if (a > b) 10887 return a - b; 10888 else 10889 return 0; 10890 } 10891 10892 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 10893 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 10894 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 10895 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 10896 #define PFX uq 10897 10898 #include "op_addsub.h" 10899 10900 /* Signed modulo arithmetic. */ 10901 #define SARITH16(a, b, n, op) do { \ 10902 int32_t sum; \ 10903 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 10904 RESULT(sum, n, 16); \ 10905 if (sum >= 0) \ 10906 ge |= 3 << (n * 2); \ 10907 } while(0) 10908 10909 #define SARITH8(a, b, n, op) do { \ 10910 int32_t sum; \ 10911 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 10912 RESULT(sum, n, 8); \ 10913 if (sum >= 0) \ 10914 ge |= 1 << n; \ 10915 } while(0) 10916 10917 10918 #define ADD16(a, b, n) SARITH16(a, b, n, +) 10919 #define SUB16(a, b, n) SARITH16(a, b, n, -) 10920 #define ADD8(a, b, n) SARITH8(a, b, n, +) 10921 #define SUB8(a, b, n) SARITH8(a, b, n, -) 10922 #define PFX s 10923 #define ARITH_GE 10924 10925 #include "op_addsub.h" 10926 10927 /* Unsigned modulo arithmetic. */ 10928 #define ADD16(a, b, n) do { \ 10929 uint32_t sum; \ 10930 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 10931 RESULT(sum, n, 16); \ 10932 if ((sum >> 16) == 1) \ 10933 ge |= 3 << (n * 2); \ 10934 } while(0) 10935 10936 #define ADD8(a, b, n) do { \ 10937 uint32_t sum; \ 10938 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 10939 RESULT(sum, n, 8); \ 10940 if ((sum >> 8) == 1) \ 10941 ge |= 1 << n; \ 10942 } while(0) 10943 10944 #define SUB16(a, b, n) do { \ 10945 uint32_t sum; \ 10946 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 10947 RESULT(sum, n, 16); \ 10948 if ((sum >> 16) == 0) \ 10949 ge |= 3 << (n * 2); \ 10950 } while(0) 10951 10952 #define SUB8(a, b, n) do { \ 10953 uint32_t sum; \ 10954 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 10955 RESULT(sum, n, 8); \ 10956 if ((sum >> 8) == 0) \ 10957 ge |= 1 << n; \ 10958 } while(0) 10959 10960 #define PFX u 10961 #define ARITH_GE 10962 10963 #include "op_addsub.h" 10964 10965 /* Halved signed arithmetic. */ 10966 #define ADD16(a, b, n) \ 10967 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 10968 #define SUB16(a, b, n) \ 10969 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 10970 #define ADD8(a, b, n) \ 10971 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 10972 #define SUB8(a, b, n) \ 10973 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 10974 #define PFX sh 10975 10976 #include "op_addsub.h" 10977 10978 /* Halved unsigned arithmetic. */ 10979 #define ADD16(a, b, n) \ 10980 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 10981 #define SUB16(a, b, n) \ 10982 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 10983 #define ADD8(a, b, n) \ 10984 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 10985 #define SUB8(a, b, n) \ 10986 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 10987 #define PFX uh 10988 10989 #include "op_addsub.h" 10990 10991 static inline uint8_t do_usad(uint8_t a, uint8_t b) 10992 { 10993 if (a > b) 10994 return a - b; 10995 else 10996 return b - a; 10997 } 10998 10999 /* Unsigned sum of absolute byte differences. */ 11000 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 11001 { 11002 uint32_t sum; 11003 sum = do_usad(a, b); 11004 sum += do_usad(a >> 8, b >> 8); 11005 sum += do_usad(a >> 16, b >>16); 11006 sum += do_usad(a >> 24, b >> 24); 11007 return sum; 11008 } 11009 11010 /* For ARMv6 SEL instruction. */ 11011 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 11012 { 11013 uint32_t mask; 11014 11015 mask = 0; 11016 if (flags & 1) 11017 mask |= 0xff; 11018 if (flags & 2) 11019 mask |= 0xff00; 11020 if (flags & 4) 11021 mask |= 0xff0000; 11022 if (flags & 8) 11023 mask |= 0xff000000; 11024 return (a & mask) | (b & ~mask); 11025 } 11026 11027 /* VFP support. We follow the convention used for VFP instructions: 11028 Single precision routines have a "s" suffix, double precision a 11029 "d" suffix. */ 11030 11031 /* Convert host exception flags to vfp form. */ 11032 static inline int vfp_exceptbits_from_host(int host_bits) 11033 { 11034 int target_bits = 0; 11035 11036 if (host_bits & float_flag_invalid) 11037 target_bits |= 1; 11038 if (host_bits & float_flag_divbyzero) 11039 target_bits |= 2; 11040 if (host_bits & float_flag_overflow) 11041 target_bits |= 4; 11042 if (host_bits & (float_flag_underflow | float_flag_output_denormal)) 11043 target_bits |= 8; 11044 if (host_bits & float_flag_inexact) 11045 target_bits |= 0x10; 11046 if (host_bits & float_flag_input_denormal) 11047 target_bits |= 0x80; 11048 return target_bits; 11049 } 11050 11051 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) 11052 { 11053 int i; 11054 uint32_t fpscr; 11055 11056 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) 11057 | (env->vfp.vec_len << 16) 11058 | (env->vfp.vec_stride << 20); 11059 i = get_float_exception_flags(&env->vfp.fp_status); 11060 i |= get_float_exception_flags(&env->vfp.standard_fp_status); 11061 fpscr |= vfp_exceptbits_from_host(i); 11062 return fpscr; 11063 } 11064 11065 uint32_t vfp_get_fpscr(CPUARMState *env) 11066 { 11067 return HELPER(vfp_get_fpscr)(env); 11068 } 11069 11070 /* Convert vfp exception flags to target form. */ 11071 static inline int vfp_exceptbits_to_host(int target_bits) 11072 { 11073 int host_bits = 0; 11074 11075 if (target_bits & 1) 11076 host_bits |= float_flag_invalid; 11077 if (target_bits & 2) 11078 host_bits |= float_flag_divbyzero; 11079 if (target_bits & 4) 11080 host_bits |= float_flag_overflow; 11081 if (target_bits & 8) 11082 host_bits |= float_flag_underflow; 11083 if (target_bits & 0x10) 11084 host_bits |= float_flag_inexact; 11085 if (target_bits & 0x80) 11086 host_bits |= float_flag_input_denormal; 11087 return host_bits; 11088 } 11089 11090 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) 11091 { 11092 int i; 11093 uint32_t changed; 11094 11095 changed = env->vfp.xregs[ARM_VFP_FPSCR]; 11096 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); 11097 env->vfp.vec_len = (val >> 16) & 7; 11098 env->vfp.vec_stride = (val >> 20) & 3; 11099 11100 changed ^= val; 11101 if (changed & (3 << 22)) { 11102 i = (val >> 22) & 3; 11103 switch (i) { 11104 case FPROUNDING_TIEEVEN: 11105 i = float_round_nearest_even; 11106 break; 11107 case FPROUNDING_POSINF: 11108 i = float_round_up; 11109 break; 11110 case FPROUNDING_NEGINF: 11111 i = float_round_down; 11112 break; 11113 case FPROUNDING_ZERO: 11114 i = float_round_to_zero; 11115 break; 11116 } 11117 set_float_rounding_mode(i, &env->vfp.fp_status); 11118 } 11119 if (changed & (1 << 24)) { 11120 set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); 11121 set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status); 11122 } 11123 if (changed & (1 << 25)) 11124 set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status); 11125 11126 i = vfp_exceptbits_to_host(val); 11127 set_float_exception_flags(i, &env->vfp.fp_status); 11128 set_float_exception_flags(0, &env->vfp.standard_fp_status); 11129 } 11130 11131 void vfp_set_fpscr(CPUARMState *env, uint32_t val) 11132 { 11133 HELPER(vfp_set_fpscr)(env, val); 11134 } 11135 11136 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) 11137 11138 #define VFP_BINOP(name) \ 11139 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ 11140 { \ 11141 float_status *fpst = fpstp; \ 11142 return float32_ ## name(a, b, fpst); \ 11143 } \ 11144 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ 11145 { \ 11146 float_status *fpst = fpstp; \ 11147 return float64_ ## name(a, b, fpst); \ 11148 } 11149 VFP_BINOP(add) 11150 VFP_BINOP(sub) 11151 VFP_BINOP(mul) 11152 VFP_BINOP(div) 11153 VFP_BINOP(min) 11154 VFP_BINOP(max) 11155 VFP_BINOP(minnum) 11156 VFP_BINOP(maxnum) 11157 #undef VFP_BINOP 11158 11159 float32 VFP_HELPER(neg, s)(float32 a) 11160 { 11161 return float32_chs(a); 11162 } 11163 11164 float64 VFP_HELPER(neg, d)(float64 a) 11165 { 11166 return float64_chs(a); 11167 } 11168 11169 float32 VFP_HELPER(abs, s)(float32 a) 11170 { 11171 return float32_abs(a); 11172 } 11173 11174 float64 VFP_HELPER(abs, d)(float64 a) 11175 { 11176 return float64_abs(a); 11177 } 11178 11179 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) 11180 { 11181 return float32_sqrt(a, &env->vfp.fp_status); 11182 } 11183 11184 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) 11185 { 11186 return float64_sqrt(a, &env->vfp.fp_status); 11187 } 11188 11189 /* XXX: check quiet/signaling case */ 11190 #define DO_VFP_cmp(p, type) \ 11191 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ 11192 { \ 11193 uint32_t flags; \ 11194 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \ 11195 case 0: flags = 0x6; break; \ 11196 case -1: flags = 0x8; break; \ 11197 case 1: flags = 0x2; break; \ 11198 default: case 2: flags = 0x3; break; \ 11199 } \ 11200 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11201 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11202 } \ 11203 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ 11204 { \ 11205 uint32_t flags; \ 11206 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \ 11207 case 0: flags = 0x6; break; \ 11208 case -1: flags = 0x8; break; \ 11209 case 1: flags = 0x2; break; \ 11210 default: case 2: flags = 0x3; break; \ 11211 } \ 11212 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11213 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11214 } 11215 DO_VFP_cmp(s, float32) 11216 DO_VFP_cmp(d, float64) 11217 #undef DO_VFP_cmp 11218 11219 /* Integer to float and float to integer conversions */ 11220 11221 #define CONV_ITOF(name, fsz, sign) \ 11222 float##fsz HELPER(name)(uint32_t x, void *fpstp) \ 11223 { \ 11224 float_status *fpst = fpstp; \ 11225 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ 11226 } 11227 11228 #define CONV_FTOI(name, fsz, sign, round) \ 11229 uint32_t HELPER(name)(float##fsz x, void *fpstp) \ 11230 { \ 11231 float_status *fpst = fpstp; \ 11232 if (float##fsz##_is_any_nan(x)) { \ 11233 float_raise(float_flag_invalid, fpst); \ 11234 return 0; \ 11235 } \ 11236 return float##fsz##_to_##sign##int32##round(x, fpst); \ 11237 } 11238 11239 #define FLOAT_CONVS(name, p, fsz, sign) \ 11240 CONV_ITOF(vfp_##name##to##p, fsz, sign) \ 11241 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \ 11242 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero) 11243 11244 FLOAT_CONVS(si, s, 32, ) 11245 FLOAT_CONVS(si, d, 64, ) 11246 FLOAT_CONVS(ui, s, 32, u) 11247 FLOAT_CONVS(ui, d, 64, u) 11248 11249 #undef CONV_ITOF 11250 #undef CONV_FTOI 11251 #undef FLOAT_CONVS 11252 11253 /* floating point conversion */ 11254 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) 11255 { 11256 float64 r = float32_to_float64(x, &env->vfp.fp_status); 11257 /* ARM requires that S<->D conversion of any kind of NaN generates 11258 * a quiet NaN by forcing the most significant frac bit to 1. 11259 */ 11260 return float64_maybe_silence_nan(r, &env->vfp.fp_status); 11261 } 11262 11263 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) 11264 { 11265 float32 r = float64_to_float32(x, &env->vfp.fp_status); 11266 /* ARM requires that S<->D conversion of any kind of NaN generates 11267 * a quiet NaN by forcing the most significant frac bit to 1. 11268 */ 11269 return float32_maybe_silence_nan(r, &env->vfp.fp_status); 11270 } 11271 11272 /* VFP3 fixed point conversion. */ 11273 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11274 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ 11275 void *fpstp) \ 11276 { \ 11277 float_status *fpst = fpstp; \ 11278 float##fsz tmp; \ 11279 tmp = itype##_to_##float##fsz(x, fpst); \ 11280 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \ 11281 } 11282 11283 /* Notice that we want only input-denormal exception flags from the 11284 * scalbn operation: the other possible flags (overflow+inexact if 11285 * we overflow to infinity, output-denormal) aren't correct for the 11286 * complete scale-and-convert operation. 11287 */ 11288 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \ 11289 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \ 11290 uint32_t shift, \ 11291 void *fpstp) \ 11292 { \ 11293 float_status *fpst = fpstp; \ 11294 int old_exc_flags = get_float_exception_flags(fpst); \ 11295 float##fsz tmp; \ 11296 if (float##fsz##_is_any_nan(x)) { \ 11297 float_raise(float_flag_invalid, fpst); \ 11298 return 0; \ 11299 } \ 11300 tmp = float##fsz##_scalbn(x, shift, fpst); \ 11301 old_exc_flags |= get_float_exception_flags(fpst) \ 11302 & float_flag_input_denormal; \ 11303 set_float_exception_flags(old_exc_flags, fpst); \ 11304 return float##fsz##_to_##itype##round(tmp, fpst); \ 11305 } 11306 11307 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ 11308 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11309 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \ 11310 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) 11311 11312 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ 11313 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11314 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) 11315 11316 VFP_CONV_FIX(sh, d, 64, 64, int16) 11317 VFP_CONV_FIX(sl, d, 64, 64, int32) 11318 VFP_CONV_FIX_A64(sq, d, 64, 64, int64) 11319 VFP_CONV_FIX(uh, d, 64, 64, uint16) 11320 VFP_CONV_FIX(ul, d, 64, 64, uint32) 11321 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) 11322 VFP_CONV_FIX(sh, s, 32, 32, int16) 11323 VFP_CONV_FIX(sl, s, 32, 32, int32) 11324 VFP_CONV_FIX_A64(sq, s, 32, 64, int64) 11325 VFP_CONV_FIX(uh, s, 32, 32, uint16) 11326 VFP_CONV_FIX(ul, s, 32, 32, uint32) 11327 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) 11328 #undef VFP_CONV_FIX 11329 #undef VFP_CONV_FIX_FLOAT 11330 #undef VFP_CONV_FLOAT_FIX_ROUND 11331 11332 /* Set the current fp rounding mode and return the old one. 11333 * The argument is a softfloat float_round_ value. 11334 */ 11335 uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env) 11336 { 11337 float_status *fp_status = &env->vfp.fp_status; 11338 11339 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11340 set_float_rounding_mode(rmode, fp_status); 11341 11342 return prev_rmode; 11343 } 11344 11345 /* Set the current fp rounding mode in the standard fp status and return 11346 * the old one. This is for NEON instructions that need to change the 11347 * rounding mode but wish to use the standard FPSCR values for everything 11348 * else. Always set the rounding mode back to the correct value after 11349 * modifying it. 11350 * The argument is a softfloat float_round_ value. 11351 */ 11352 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) 11353 { 11354 float_status *fp_status = &env->vfp.standard_fp_status; 11355 11356 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11357 set_float_rounding_mode(rmode, fp_status); 11358 11359 return prev_rmode; 11360 } 11361 11362 /* Half precision conversions. */ 11363 static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s) 11364 { 11365 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; 11366 float32 r = float16_to_float32(make_float16(a), ieee, s); 11367 if (ieee) { 11368 return float32_maybe_silence_nan(r, s); 11369 } 11370 return r; 11371 } 11372 11373 static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s) 11374 { 11375 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; 11376 float16 r = float32_to_float16(a, ieee, s); 11377 if (ieee) { 11378 r = float16_maybe_silence_nan(r, s); 11379 } 11380 return float16_val(r); 11381 } 11382 11383 float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env) 11384 { 11385 return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status); 11386 } 11387 11388 uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env) 11389 { 11390 return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status); 11391 } 11392 11393 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env) 11394 { 11395 return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status); 11396 } 11397 11398 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env) 11399 { 11400 return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status); 11401 } 11402 11403 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env) 11404 { 11405 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; 11406 float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status); 11407 if (ieee) { 11408 return float64_maybe_silence_nan(r, &env->vfp.fp_status); 11409 } 11410 return r; 11411 } 11412 11413 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env) 11414 { 11415 int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0; 11416 float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status); 11417 if (ieee) { 11418 r = float16_maybe_silence_nan(r, &env->vfp.fp_status); 11419 } 11420 return float16_val(r); 11421 } 11422 11423 #define float32_two make_float32(0x40000000) 11424 #define float32_three make_float32(0x40400000) 11425 #define float32_one_point_five make_float32(0x3fc00000) 11426 11427 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) 11428 { 11429 float_status *s = &env->vfp.standard_fp_status; 11430 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 11431 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 11432 if (!(float32_is_zero(a) || float32_is_zero(b))) { 11433 float_raise(float_flag_input_denormal, s); 11434 } 11435 return float32_two; 11436 } 11437 return float32_sub(float32_two, float32_mul(a, b, s), s); 11438 } 11439 11440 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) 11441 { 11442 float_status *s = &env->vfp.standard_fp_status; 11443 float32 product; 11444 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 11445 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 11446 if (!(float32_is_zero(a) || float32_is_zero(b))) { 11447 float_raise(float_flag_input_denormal, s); 11448 } 11449 return float32_one_point_five; 11450 } 11451 product = float32_mul(a, b, s); 11452 return float32_div(float32_sub(float32_three, product, s), float32_two, s); 11453 } 11454 11455 /* NEON helpers. */ 11456 11457 /* Constants 256 and 512 are used in some helpers; we avoid relying on 11458 * int->float conversions at run-time. */ 11459 #define float64_256 make_float64(0x4070000000000000LL) 11460 #define float64_512 make_float64(0x4080000000000000LL) 11461 #define float32_maxnorm make_float32(0x7f7fffff) 11462 #define float64_maxnorm make_float64(0x7fefffffffffffffLL) 11463 11464 /* Reciprocal functions 11465 * 11466 * The algorithm that must be used to calculate the estimate 11467 * is specified by the ARM ARM, see FPRecipEstimate() 11468 */ 11469 11470 static float64 recip_estimate(float64 a, float_status *real_fp_status) 11471 { 11472 /* These calculations mustn't set any fp exception flags, 11473 * so we use a local copy of the fp_status. 11474 */ 11475 float_status dummy_status = *real_fp_status; 11476 float_status *s = &dummy_status; 11477 /* q = (int)(a * 512.0) */ 11478 float64 q = float64_mul(float64_512, a, s); 11479 int64_t q_int = float64_to_int64_round_to_zero(q, s); 11480 11481 /* r = 1.0 / (((double)q + 0.5) / 512.0) */ 11482 q = int64_to_float64(q_int, s); 11483 q = float64_add(q, float64_half, s); 11484 q = float64_div(q, float64_512, s); 11485 q = float64_div(float64_one, q, s); 11486 11487 /* s = (int)(256.0 * r + 0.5) */ 11488 q = float64_mul(q, float64_256, s); 11489 q = float64_add(q, float64_half, s); 11490 q_int = float64_to_int64_round_to_zero(q, s); 11491 11492 /* return (double)s / 256.0 */ 11493 return float64_div(int64_to_float64(q_int, s), float64_256, s); 11494 } 11495 11496 /* Common wrapper to call recip_estimate */ 11497 static float64 call_recip_estimate(float64 num, int off, float_status *fpst) 11498 { 11499 uint64_t val64 = float64_val(num); 11500 uint64_t frac = extract64(val64, 0, 52); 11501 int64_t exp = extract64(val64, 52, 11); 11502 uint64_t sbit; 11503 float64 scaled, estimate; 11504 11505 /* Generate the scaled number for the estimate function */ 11506 if (exp == 0) { 11507 if (extract64(frac, 51, 1) == 0) { 11508 exp = -1; 11509 frac = extract64(frac, 0, 50) << 2; 11510 } else { 11511 frac = extract64(frac, 0, 51) << 1; 11512 } 11513 } 11514 11515 /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */ 11516 scaled = make_float64((0x3feULL << 52) 11517 | extract64(frac, 44, 8) << 44); 11518 11519 estimate = recip_estimate(scaled, fpst); 11520 11521 /* Build new result */ 11522 val64 = float64_val(estimate); 11523 sbit = 0x8000000000000000ULL & val64; 11524 exp = off - exp; 11525 frac = extract64(val64, 0, 52); 11526 11527 if (exp == 0) { 11528 frac = 1ULL << 51 | extract64(frac, 1, 51); 11529 } else if (exp == -1) { 11530 frac = 1ULL << 50 | extract64(frac, 2, 50); 11531 exp = 0; 11532 } 11533 11534 return make_float64(sbit | (exp << 52) | frac); 11535 } 11536 11537 static bool round_to_inf(float_status *fpst, bool sign_bit) 11538 { 11539 switch (fpst->float_rounding_mode) { 11540 case float_round_nearest_even: /* Round to Nearest */ 11541 return true; 11542 case float_round_up: /* Round to +Inf */ 11543 return !sign_bit; 11544 case float_round_down: /* Round to -Inf */ 11545 return sign_bit; 11546 case float_round_to_zero: /* Round to Zero */ 11547 return false; 11548 } 11549 11550 g_assert_not_reached(); 11551 } 11552 11553 float32 HELPER(recpe_f32)(float32 input, void *fpstp) 11554 { 11555 float_status *fpst = fpstp; 11556 float32 f32 = float32_squash_input_denormal(input, fpst); 11557 uint32_t f32_val = float32_val(f32); 11558 uint32_t f32_sbit = 0x80000000ULL & f32_val; 11559 int32_t f32_exp = extract32(f32_val, 23, 8); 11560 uint32_t f32_frac = extract32(f32_val, 0, 23); 11561 float64 f64, r64; 11562 uint64_t r64_val; 11563 int64_t r64_exp; 11564 uint64_t r64_frac; 11565 11566 if (float32_is_any_nan(f32)) { 11567 float32 nan = f32; 11568 if (float32_is_signaling_nan(f32, fpst)) { 11569 float_raise(float_flag_invalid, fpst); 11570 nan = float32_maybe_silence_nan(f32, fpst); 11571 } 11572 if (fpst->default_nan_mode) { 11573 nan = float32_default_nan(fpst); 11574 } 11575 return nan; 11576 } else if (float32_is_infinity(f32)) { 11577 return float32_set_sign(float32_zero, float32_is_neg(f32)); 11578 } else if (float32_is_zero(f32)) { 11579 float_raise(float_flag_divbyzero, fpst); 11580 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 11581 } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) { 11582 /* Abs(value) < 2.0^-128 */ 11583 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11584 if (round_to_inf(fpst, f32_sbit)) { 11585 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 11586 } else { 11587 return float32_set_sign(float32_maxnorm, float32_is_neg(f32)); 11588 } 11589 } else if (f32_exp >= 253 && fpst->flush_to_zero) { 11590 float_raise(float_flag_underflow, fpst); 11591 return float32_set_sign(float32_zero, float32_is_neg(f32)); 11592 } 11593 11594 11595 f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29); 11596 r64 = call_recip_estimate(f64, 253, fpst); 11597 r64_val = float64_val(r64); 11598 r64_exp = extract64(r64_val, 52, 11); 11599 r64_frac = extract64(r64_val, 0, 52); 11600 11601 /* result = sign : result_exp<7:0> : fraction<51:29>; */ 11602 return make_float32(f32_sbit | 11603 (r64_exp & 0xff) << 23 | 11604 extract64(r64_frac, 29, 24)); 11605 } 11606 11607 float64 HELPER(recpe_f64)(float64 input, void *fpstp) 11608 { 11609 float_status *fpst = fpstp; 11610 float64 f64 = float64_squash_input_denormal(input, fpst); 11611 uint64_t f64_val = float64_val(f64); 11612 uint64_t f64_sbit = 0x8000000000000000ULL & f64_val; 11613 int64_t f64_exp = extract64(f64_val, 52, 11); 11614 float64 r64; 11615 uint64_t r64_val; 11616 int64_t r64_exp; 11617 uint64_t r64_frac; 11618 11619 /* Deal with any special cases */ 11620 if (float64_is_any_nan(f64)) { 11621 float64 nan = f64; 11622 if (float64_is_signaling_nan(f64, fpst)) { 11623 float_raise(float_flag_invalid, fpst); 11624 nan = float64_maybe_silence_nan(f64, fpst); 11625 } 11626 if (fpst->default_nan_mode) { 11627 nan = float64_default_nan(fpst); 11628 } 11629 return nan; 11630 } else if (float64_is_infinity(f64)) { 11631 return float64_set_sign(float64_zero, float64_is_neg(f64)); 11632 } else if (float64_is_zero(f64)) { 11633 float_raise(float_flag_divbyzero, fpst); 11634 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 11635 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { 11636 /* Abs(value) < 2.0^-1024 */ 11637 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11638 if (round_to_inf(fpst, f64_sbit)) { 11639 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 11640 } else { 11641 return float64_set_sign(float64_maxnorm, float64_is_neg(f64)); 11642 } 11643 } else if (f64_exp >= 2045 && fpst->flush_to_zero) { 11644 float_raise(float_flag_underflow, fpst); 11645 return float64_set_sign(float64_zero, float64_is_neg(f64)); 11646 } 11647 11648 r64 = call_recip_estimate(f64, 2045, fpst); 11649 r64_val = float64_val(r64); 11650 r64_exp = extract64(r64_val, 52, 11); 11651 r64_frac = extract64(r64_val, 0, 52); 11652 11653 /* result = sign : result_exp<10:0> : fraction<51:0> */ 11654 return make_float64(f64_sbit | 11655 ((r64_exp & 0x7ff) << 52) | 11656 r64_frac); 11657 } 11658 11659 /* The algorithm that must be used to calculate the estimate 11660 * is specified by the ARM ARM. 11661 */ 11662 static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status) 11663 { 11664 /* These calculations mustn't set any fp exception flags, 11665 * so we use a local copy of the fp_status. 11666 */ 11667 float_status dummy_status = *real_fp_status; 11668 float_status *s = &dummy_status; 11669 float64 q; 11670 int64_t q_int; 11671 11672 if (float64_lt(a, float64_half, s)) { 11673 /* range 0.25 <= a < 0.5 */ 11674 11675 /* a in units of 1/512 rounded down */ 11676 /* q0 = (int)(a * 512.0); */ 11677 q = float64_mul(float64_512, a, s); 11678 q_int = float64_to_int64_round_to_zero(q, s); 11679 11680 /* reciprocal root r */ 11681 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */ 11682 q = int64_to_float64(q_int, s); 11683 q = float64_add(q, float64_half, s); 11684 q = float64_div(q, float64_512, s); 11685 q = float64_sqrt(q, s); 11686 q = float64_div(float64_one, q, s); 11687 } else { 11688 /* range 0.5 <= a < 1.0 */ 11689 11690 /* a in units of 1/256 rounded down */ 11691 /* q1 = (int)(a * 256.0); */ 11692 q = float64_mul(float64_256, a, s); 11693 int64_t q_int = float64_to_int64_round_to_zero(q, s); 11694 11695 /* reciprocal root r */ 11696 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */ 11697 q = int64_to_float64(q_int, s); 11698 q = float64_add(q, float64_half, s); 11699 q = float64_div(q, float64_256, s); 11700 q = float64_sqrt(q, s); 11701 q = float64_div(float64_one, q, s); 11702 } 11703 /* r in units of 1/256 rounded to nearest */ 11704 /* s = (int)(256.0 * r + 0.5); */ 11705 11706 q = float64_mul(q, float64_256,s ); 11707 q = float64_add(q, float64_half, s); 11708 q_int = float64_to_int64_round_to_zero(q, s); 11709 11710 /* return (double)s / 256.0;*/ 11711 return float64_div(int64_to_float64(q_int, s), float64_256, s); 11712 } 11713 11714 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) 11715 { 11716 float_status *s = fpstp; 11717 float32 f32 = float32_squash_input_denormal(input, s); 11718 uint32_t val = float32_val(f32); 11719 uint32_t f32_sbit = 0x80000000 & val; 11720 int32_t f32_exp = extract32(val, 23, 8); 11721 uint32_t f32_frac = extract32(val, 0, 23); 11722 uint64_t f64_frac; 11723 uint64_t val64; 11724 int result_exp; 11725 float64 f64; 11726 11727 if (float32_is_any_nan(f32)) { 11728 float32 nan = f32; 11729 if (float32_is_signaling_nan(f32, s)) { 11730 float_raise(float_flag_invalid, s); 11731 nan = float32_maybe_silence_nan(f32, s); 11732 } 11733 if (s->default_nan_mode) { 11734 nan = float32_default_nan(s); 11735 } 11736 return nan; 11737 } else if (float32_is_zero(f32)) { 11738 float_raise(float_flag_divbyzero, s); 11739 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 11740 } else if (float32_is_neg(f32)) { 11741 float_raise(float_flag_invalid, s); 11742 return float32_default_nan(s); 11743 } else if (float32_is_infinity(f32)) { 11744 return float32_zero; 11745 } 11746 11747 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 11748 * preserving the parity of the exponent. */ 11749 11750 f64_frac = ((uint64_t) f32_frac) << 29; 11751 if (f32_exp == 0) { 11752 while (extract64(f64_frac, 51, 1) == 0) { 11753 f64_frac = f64_frac << 1; 11754 f32_exp = f32_exp-1; 11755 } 11756 f64_frac = extract64(f64_frac, 0, 51) << 1; 11757 } 11758 11759 if (extract64(f32_exp, 0, 1) == 0) { 11760 f64 = make_float64(((uint64_t) f32_sbit) << 32 11761 | (0x3feULL << 52) 11762 | f64_frac); 11763 } else { 11764 f64 = make_float64(((uint64_t) f32_sbit) << 32 11765 | (0x3fdULL << 52) 11766 | f64_frac); 11767 } 11768 11769 result_exp = (380 - f32_exp) / 2; 11770 11771 f64 = recip_sqrt_estimate(f64, s); 11772 11773 val64 = float64_val(f64); 11774 11775 val = ((result_exp & 0xff) << 23) 11776 | ((val64 >> 29) & 0x7fffff); 11777 return make_float32(val); 11778 } 11779 11780 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) 11781 { 11782 float_status *s = fpstp; 11783 float64 f64 = float64_squash_input_denormal(input, s); 11784 uint64_t val = float64_val(f64); 11785 uint64_t f64_sbit = 0x8000000000000000ULL & val; 11786 int64_t f64_exp = extract64(val, 52, 11); 11787 uint64_t f64_frac = extract64(val, 0, 52); 11788 int64_t result_exp; 11789 uint64_t result_frac; 11790 11791 if (float64_is_any_nan(f64)) { 11792 float64 nan = f64; 11793 if (float64_is_signaling_nan(f64, s)) { 11794 float_raise(float_flag_invalid, s); 11795 nan = float64_maybe_silence_nan(f64, s); 11796 } 11797 if (s->default_nan_mode) { 11798 nan = float64_default_nan(s); 11799 } 11800 return nan; 11801 } else if (float64_is_zero(f64)) { 11802 float_raise(float_flag_divbyzero, s); 11803 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 11804 } else if (float64_is_neg(f64)) { 11805 float_raise(float_flag_invalid, s); 11806 return float64_default_nan(s); 11807 } else if (float64_is_infinity(f64)) { 11808 return float64_zero; 11809 } 11810 11811 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 11812 * preserving the parity of the exponent. */ 11813 11814 if (f64_exp == 0) { 11815 while (extract64(f64_frac, 51, 1) == 0) { 11816 f64_frac = f64_frac << 1; 11817 f64_exp = f64_exp - 1; 11818 } 11819 f64_frac = extract64(f64_frac, 0, 51) << 1; 11820 } 11821 11822 if (extract64(f64_exp, 0, 1) == 0) { 11823 f64 = make_float64(f64_sbit 11824 | (0x3feULL << 52) 11825 | f64_frac); 11826 } else { 11827 f64 = make_float64(f64_sbit 11828 | (0x3fdULL << 52) 11829 | f64_frac); 11830 } 11831 11832 result_exp = (3068 - f64_exp) / 2; 11833 11834 f64 = recip_sqrt_estimate(f64, s); 11835 11836 result_frac = extract64(float64_val(f64), 0, 52); 11837 11838 return make_float64(f64_sbit | 11839 ((result_exp & 0x7ff) << 52) | 11840 result_frac); 11841 } 11842 11843 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) 11844 { 11845 float_status *s = fpstp; 11846 float64 f64; 11847 11848 if ((a & 0x80000000) == 0) { 11849 return 0xffffffff; 11850 } 11851 11852 f64 = make_float64((0x3feULL << 52) 11853 | ((int64_t)(a & 0x7fffffff) << 21)); 11854 11855 f64 = recip_estimate(f64, s); 11856 11857 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); 11858 } 11859 11860 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) 11861 { 11862 float_status *fpst = fpstp; 11863 float64 f64; 11864 11865 if ((a & 0xc0000000) == 0) { 11866 return 0xffffffff; 11867 } 11868 11869 if (a & 0x80000000) { 11870 f64 = make_float64((0x3feULL << 52) 11871 | ((uint64_t)(a & 0x7fffffff) << 21)); 11872 } else { /* bits 31-30 == '01' */ 11873 f64 = make_float64((0x3fdULL << 52) 11874 | ((uint64_t)(a & 0x3fffffff) << 22)); 11875 } 11876 11877 f64 = recip_sqrt_estimate(f64, fpst); 11878 11879 return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff); 11880 } 11881 11882 /* VFPv4 fused multiply-accumulate */ 11883 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) 11884 { 11885 float_status *fpst = fpstp; 11886 return float32_muladd(a, b, c, 0, fpst); 11887 } 11888 11889 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) 11890 { 11891 float_status *fpst = fpstp; 11892 return float64_muladd(a, b, c, 0, fpst); 11893 } 11894 11895 /* ARMv8 round to integral */ 11896 float32 HELPER(rints_exact)(float32 x, void *fp_status) 11897 { 11898 return float32_round_to_int(x, fp_status); 11899 } 11900 11901 float64 HELPER(rintd_exact)(float64 x, void *fp_status) 11902 { 11903 return float64_round_to_int(x, fp_status); 11904 } 11905 11906 float32 HELPER(rints)(float32 x, void *fp_status) 11907 { 11908 int old_flags = get_float_exception_flags(fp_status), new_flags; 11909 float32 ret; 11910 11911 ret = float32_round_to_int(x, fp_status); 11912 11913 /* Suppress any inexact exceptions the conversion produced */ 11914 if (!(old_flags & float_flag_inexact)) { 11915 new_flags = get_float_exception_flags(fp_status); 11916 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 11917 } 11918 11919 return ret; 11920 } 11921 11922 float64 HELPER(rintd)(float64 x, void *fp_status) 11923 { 11924 int old_flags = get_float_exception_flags(fp_status), new_flags; 11925 float64 ret; 11926 11927 ret = float64_round_to_int(x, fp_status); 11928 11929 new_flags = get_float_exception_flags(fp_status); 11930 11931 /* Suppress any inexact exceptions the conversion produced */ 11932 if (!(old_flags & float_flag_inexact)) { 11933 new_flags = get_float_exception_flags(fp_status); 11934 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 11935 } 11936 11937 return ret; 11938 } 11939 11940 /* Convert ARM rounding mode to softfloat */ 11941 int arm_rmode_to_sf(int rmode) 11942 { 11943 switch (rmode) { 11944 case FPROUNDING_TIEAWAY: 11945 rmode = float_round_ties_away; 11946 break; 11947 case FPROUNDING_ODD: 11948 /* FIXME: add support for TIEAWAY and ODD */ 11949 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", 11950 rmode); 11951 case FPROUNDING_TIEEVEN: 11952 default: 11953 rmode = float_round_nearest_even; 11954 break; 11955 case FPROUNDING_POSINF: 11956 rmode = float_round_up; 11957 break; 11958 case FPROUNDING_NEGINF: 11959 rmode = float_round_down; 11960 break; 11961 case FPROUNDING_ZERO: 11962 rmode = float_round_to_zero; 11963 break; 11964 } 11965 return rmode; 11966 } 11967 11968 /* CRC helpers. 11969 * The upper bytes of val (above the number specified by 'bytes') must have 11970 * been zeroed out by the caller. 11971 */ 11972 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 11973 { 11974 uint8_t buf[4]; 11975 11976 stl_le_p(buf, val); 11977 11978 /* zlib crc32 converts the accumulator and output to one's complement. */ 11979 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 11980 } 11981 11982 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 11983 { 11984 uint8_t buf[4]; 11985 11986 stl_le_p(buf, val); 11987 11988 /* Linux crc32c converts the output to one's complement. */ 11989 return crc32c(acc, buf, bytes) ^ 0xffffffff; 11990 } 11991 11992 /* Return the exception level to which FP-disabled exceptions should 11993 * be taken, or 0 if FP is enabled. 11994 */ 11995 static inline int fp_exception_el(CPUARMState *env) 11996 { 11997 #ifndef CONFIG_USER_ONLY 11998 int fpen; 11999 int cur_el = arm_current_el(env); 12000 12001 /* CPACR and the CPTR registers don't exist before v6, so FP is 12002 * always accessible 12003 */ 12004 if (!arm_feature(env, ARM_FEATURE_V6)) { 12005 return 0; 12006 } 12007 12008 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 12009 * 0, 2 : trap EL0 and EL1/PL1 accesses 12010 * 1 : trap only EL0 accesses 12011 * 3 : trap no accesses 12012 */ 12013 fpen = extract32(env->cp15.cpacr_el1, 20, 2); 12014 switch (fpen) { 12015 case 0: 12016 case 2: 12017 if (cur_el == 0 || cur_el == 1) { 12018 /* Trap to PL1, which might be EL1 or EL3 */ 12019 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 12020 return 3; 12021 } 12022 return 1; 12023 } 12024 if (cur_el == 3 && !is_a64(env)) { 12025 /* Secure PL1 running at EL3 */ 12026 return 3; 12027 } 12028 break; 12029 case 1: 12030 if (cur_el == 0) { 12031 return 1; 12032 } 12033 break; 12034 case 3: 12035 break; 12036 } 12037 12038 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 12039 * check because zero bits in the registers mean "don't trap". 12040 */ 12041 12042 /* CPTR_EL2 : present in v7VE or v8 */ 12043 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 12044 && !arm_is_secure_below_el3(env)) { 12045 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 12046 return 2; 12047 } 12048 12049 /* CPTR_EL3 : present in v8 */ 12050 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 12051 /* Trap all FP ops to EL3 */ 12052 return 3; 12053 } 12054 #endif 12055 return 0; 12056 } 12057 12058 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12059 target_ulong *cs_base, uint32_t *pflags) 12060 { 12061 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 12062 int fp_el = fp_exception_el(env); 12063 uint32_t flags; 12064 12065 if (is_a64(env)) { 12066 int sve_el = sve_exception_el(env); 12067 uint32_t zcr_len; 12068 12069 *pc = env->pc; 12070 flags = ARM_TBFLAG_AARCH64_STATE_MASK; 12071 /* Get control bits for tagged addresses */ 12072 flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); 12073 flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); 12074 flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT; 12075 12076 /* If SVE is disabled, but FP is enabled, 12077 then the effective len is 0. */ 12078 if (sve_el != 0 && fp_el == 0) { 12079 zcr_len = 0; 12080 } else { 12081 int current_el = arm_current_el(env); 12082 12083 zcr_len = env->vfp.zcr_el[current_el <= 1 ? 1 : current_el]; 12084 zcr_len &= 0xf; 12085 if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { 12086 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 12087 } 12088 if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { 12089 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 12090 } 12091 } 12092 flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT; 12093 } else { 12094 *pc = env->regs[15]; 12095 flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) 12096 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) 12097 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) 12098 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) 12099 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT); 12100 if (!(access_secure_reg(env))) { 12101 flags |= ARM_TBFLAG_NS_MASK; 12102 } 12103 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) 12104 || arm_el_is_aa64(env, 1)) { 12105 flags |= ARM_TBFLAG_VFPEN_MASK; 12106 } 12107 flags |= (extract32(env->cp15.c15_cpar, 0, 2) 12108 << ARM_TBFLAG_XSCALE_CPAR_SHIFT); 12109 } 12110 12111 flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT); 12112 12113 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12114 * states defined in the ARM ARM for software singlestep: 12115 * SS_ACTIVE PSTATE.SS State 12116 * 0 x Inactive (the TB flag for SS is always 0) 12117 * 1 0 Active-pending 12118 * 1 1 Active-not-pending 12119 */ 12120 if (arm_singlestep_active(env)) { 12121 flags |= ARM_TBFLAG_SS_ACTIVE_MASK; 12122 if (is_a64(env)) { 12123 if (env->pstate & PSTATE_SS) { 12124 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12125 } 12126 } else { 12127 if (env->uncached_cpsr & PSTATE_SS) { 12128 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12129 } 12130 } 12131 } 12132 if (arm_cpu_data_is_big_endian(env)) { 12133 flags |= ARM_TBFLAG_BE_DATA_MASK; 12134 } 12135 flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT; 12136 12137 if (arm_v7m_is_handler_mode(env)) { 12138 flags |= ARM_TBFLAG_HANDLER_MASK; 12139 } 12140 12141 *pflags = flags; 12142 *cs_base = 0; 12143 } 12144