1 #include "qemu/osdep.h" 2 #include "target/arm/idau.h" 3 #include "trace.h" 4 #include "cpu.h" 5 #include "internals.h" 6 #include "exec/gdbstub.h" 7 #include "exec/helper-proto.h" 8 #include "qemu/host-utils.h" 9 #include "sysemu/arch_init.h" 10 #include "sysemu/sysemu.h" 11 #include "qemu/bitops.h" 12 #include "qemu/crc32c.h" 13 #include "exec/exec-all.h" 14 #include "exec/cpu_ldst.h" 15 #include "arm_ldst.h" 16 #include <zlib.h> /* For crc32 */ 17 #include "exec/semihost.h" 18 #include "sysemu/kvm.h" 19 #include "fpu/softfloat.h" 20 21 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 22 23 #ifndef CONFIG_USER_ONLY 24 /* Cacheability and shareability attributes for a memory access */ 25 typedef struct ARMCacheAttrs { 26 unsigned int attrs:8; /* as in the MAIR register encoding */ 27 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 28 } ARMCacheAttrs; 29 30 static bool get_phys_addr(CPUARMState *env, target_ulong address, 31 MMUAccessType access_type, ARMMMUIdx mmu_idx, 32 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 33 target_ulong *page_size, 34 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 35 36 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 37 MMUAccessType access_type, ARMMMUIdx mmu_idx, 38 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 39 target_ulong *page_size_ptr, 40 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 41 42 /* Security attributes for an address, as returned by v8m_security_lookup. */ 43 typedef struct V8M_SAttributes { 44 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 45 bool ns; 46 bool nsc; 47 uint8_t sregion; 48 bool srvalid; 49 uint8_t iregion; 50 bool irvalid; 51 } V8M_SAttributes; 52 53 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 54 MMUAccessType access_type, ARMMMUIdx mmu_idx, 55 V8M_SAttributes *sattrs); 56 #endif 57 58 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 59 { 60 int nregs; 61 62 /* VFP data registers are always little-endian. */ 63 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 64 if (reg < nregs) { 65 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 66 return 8; 67 } 68 if (arm_feature(env, ARM_FEATURE_NEON)) { 69 /* Aliases for Q regs. */ 70 nregs += 16; 71 if (reg < nregs) { 72 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 73 stq_le_p(buf, q[0]); 74 stq_le_p(buf + 8, q[1]); 75 return 16; 76 } 77 } 78 switch (reg - nregs) { 79 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 80 case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4; 81 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 82 } 83 return 0; 84 } 85 86 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 87 { 88 int nregs; 89 90 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 91 if (reg < nregs) { 92 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 93 return 8; 94 } 95 if (arm_feature(env, ARM_FEATURE_NEON)) { 96 nregs += 16; 97 if (reg < nregs) { 98 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 99 q[0] = ldq_le_p(buf); 100 q[1] = ldq_le_p(buf + 8); 101 return 16; 102 } 103 } 104 switch (reg - nregs) { 105 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 106 case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4; 107 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 108 } 109 return 0; 110 } 111 112 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 113 { 114 switch (reg) { 115 case 0 ... 31: 116 /* 128 bit FP register */ 117 { 118 uint64_t *q = aa64_vfp_qreg(env, reg); 119 stq_le_p(buf, q[0]); 120 stq_le_p(buf + 8, q[1]); 121 return 16; 122 } 123 case 32: 124 /* FPSR */ 125 stl_p(buf, vfp_get_fpsr(env)); 126 return 4; 127 case 33: 128 /* FPCR */ 129 stl_p(buf, vfp_get_fpcr(env)); 130 return 4; 131 default: 132 return 0; 133 } 134 } 135 136 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 137 { 138 switch (reg) { 139 case 0 ... 31: 140 /* 128 bit FP register */ 141 { 142 uint64_t *q = aa64_vfp_qreg(env, reg); 143 q[0] = ldq_le_p(buf); 144 q[1] = ldq_le_p(buf + 8); 145 return 16; 146 } 147 case 32: 148 /* FPSR */ 149 vfp_set_fpsr(env, ldl_p(buf)); 150 return 4; 151 case 33: 152 /* FPCR */ 153 vfp_set_fpcr(env, ldl_p(buf)); 154 return 4; 155 default: 156 return 0; 157 } 158 } 159 160 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 161 { 162 assert(ri->fieldoffset); 163 if (cpreg_field_is_64bit(ri)) { 164 return CPREG_FIELD64(env, ri); 165 } else { 166 return CPREG_FIELD32(env, ri); 167 } 168 } 169 170 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 171 uint64_t value) 172 { 173 assert(ri->fieldoffset); 174 if (cpreg_field_is_64bit(ri)) { 175 CPREG_FIELD64(env, ri) = value; 176 } else { 177 CPREG_FIELD32(env, ri) = value; 178 } 179 } 180 181 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 182 { 183 return (char *)env + ri->fieldoffset; 184 } 185 186 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 187 { 188 /* Raw read of a coprocessor register (as needed for migration, etc). */ 189 if (ri->type & ARM_CP_CONST) { 190 return ri->resetvalue; 191 } else if (ri->raw_readfn) { 192 return ri->raw_readfn(env, ri); 193 } else if (ri->readfn) { 194 return ri->readfn(env, ri); 195 } else { 196 return raw_read(env, ri); 197 } 198 } 199 200 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 201 uint64_t v) 202 { 203 /* Raw write of a coprocessor register (as needed for migration, etc). 204 * Note that constant registers are treated as write-ignored; the 205 * caller should check for success by whether a readback gives the 206 * value written. 207 */ 208 if (ri->type & ARM_CP_CONST) { 209 return; 210 } else if (ri->raw_writefn) { 211 ri->raw_writefn(env, ri, v); 212 } else if (ri->writefn) { 213 ri->writefn(env, ri, v); 214 } else { 215 raw_write(env, ri, v); 216 } 217 } 218 219 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 220 { 221 ARMCPU *cpu = arm_env_get_cpu(env); 222 const ARMCPRegInfo *ri; 223 uint32_t key; 224 225 key = cpu->dyn_xml.cpregs_keys[reg]; 226 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 227 if (ri) { 228 if (cpreg_field_is_64bit(ri)) { 229 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 230 } else { 231 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 232 } 233 } 234 return 0; 235 } 236 237 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 238 { 239 return 0; 240 } 241 242 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 243 { 244 /* Return true if the regdef would cause an assertion if you called 245 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 246 * program bug for it not to have the NO_RAW flag). 247 * NB that returning false here doesn't necessarily mean that calling 248 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 249 * read/write access functions which are safe for raw use" from "has 250 * read/write access functions which have side effects but has forgotten 251 * to provide raw access functions". 252 * The tests here line up with the conditions in read/write_raw_cp_reg() 253 * and assertions in raw_read()/raw_write(). 254 */ 255 if ((ri->type & ARM_CP_CONST) || 256 ri->fieldoffset || 257 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 258 return false; 259 } 260 return true; 261 } 262 263 bool write_cpustate_to_list(ARMCPU *cpu) 264 { 265 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 266 int i; 267 bool ok = true; 268 269 for (i = 0; i < cpu->cpreg_array_len; i++) { 270 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 271 const ARMCPRegInfo *ri; 272 273 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 274 if (!ri) { 275 ok = false; 276 continue; 277 } 278 if (ri->type & ARM_CP_NO_RAW) { 279 continue; 280 } 281 cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri); 282 } 283 return ok; 284 } 285 286 bool write_list_to_cpustate(ARMCPU *cpu) 287 { 288 int i; 289 bool ok = true; 290 291 for (i = 0; i < cpu->cpreg_array_len; i++) { 292 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 293 uint64_t v = cpu->cpreg_values[i]; 294 const ARMCPRegInfo *ri; 295 296 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 297 if (!ri) { 298 ok = false; 299 continue; 300 } 301 if (ri->type & ARM_CP_NO_RAW) { 302 continue; 303 } 304 /* Write value and confirm it reads back as written 305 * (to catch read-only registers and partially read-only 306 * registers where the incoming migration value doesn't match) 307 */ 308 write_raw_cp_reg(&cpu->env, ri, v); 309 if (read_raw_cp_reg(&cpu->env, ri) != v) { 310 ok = false; 311 } 312 } 313 return ok; 314 } 315 316 static void add_cpreg_to_list(gpointer key, gpointer opaque) 317 { 318 ARMCPU *cpu = opaque; 319 uint64_t regidx; 320 const ARMCPRegInfo *ri; 321 322 regidx = *(uint32_t *)key; 323 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 324 325 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 326 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 327 /* The value array need not be initialized at this point */ 328 cpu->cpreg_array_len++; 329 } 330 } 331 332 static void count_cpreg(gpointer key, gpointer opaque) 333 { 334 ARMCPU *cpu = opaque; 335 uint64_t regidx; 336 const ARMCPRegInfo *ri; 337 338 regidx = *(uint32_t *)key; 339 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 340 341 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 342 cpu->cpreg_array_len++; 343 } 344 } 345 346 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 347 { 348 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 349 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 350 351 if (aidx > bidx) { 352 return 1; 353 } 354 if (aidx < bidx) { 355 return -1; 356 } 357 return 0; 358 } 359 360 void init_cpreg_list(ARMCPU *cpu) 361 { 362 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 363 * Note that we require cpreg_tuples[] to be sorted by key ID. 364 */ 365 GList *keys; 366 int arraylen; 367 368 keys = g_hash_table_get_keys(cpu->cp_regs); 369 keys = g_list_sort(keys, cpreg_key_compare); 370 371 cpu->cpreg_array_len = 0; 372 373 g_list_foreach(keys, count_cpreg, cpu); 374 375 arraylen = cpu->cpreg_array_len; 376 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 377 cpu->cpreg_values = g_new(uint64_t, arraylen); 378 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 379 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 380 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 381 cpu->cpreg_array_len = 0; 382 383 g_list_foreach(keys, add_cpreg_to_list, cpu); 384 385 assert(cpu->cpreg_array_len == arraylen); 386 387 g_list_free(keys); 388 } 389 390 /* 391 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 392 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 393 * 394 * access_el3_aa32ns: Used to check AArch32 register views. 395 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 396 */ 397 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 398 const ARMCPRegInfo *ri, 399 bool isread) 400 { 401 bool secure = arm_is_secure_below_el3(env); 402 403 assert(!arm_el_is_aa64(env, 3)); 404 if (secure) { 405 return CP_ACCESS_TRAP_UNCATEGORIZED; 406 } 407 return CP_ACCESS_OK; 408 } 409 410 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 411 const ARMCPRegInfo *ri, 412 bool isread) 413 { 414 if (!arm_el_is_aa64(env, 3)) { 415 return access_el3_aa32ns(env, ri, isread); 416 } 417 return CP_ACCESS_OK; 418 } 419 420 /* Some secure-only AArch32 registers trap to EL3 if used from 421 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 422 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 423 * We assume that the .access field is set to PL1_RW. 424 */ 425 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 426 const ARMCPRegInfo *ri, 427 bool isread) 428 { 429 if (arm_current_el(env) == 3) { 430 return CP_ACCESS_OK; 431 } 432 if (arm_is_secure_below_el3(env)) { 433 return CP_ACCESS_TRAP_EL3; 434 } 435 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 436 return CP_ACCESS_TRAP_UNCATEGORIZED; 437 } 438 439 /* Check for traps to "powerdown debug" registers, which are controlled 440 * by MDCR.TDOSA 441 */ 442 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 443 bool isread) 444 { 445 int el = arm_current_el(env); 446 447 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA) 448 && !arm_is_secure_below_el3(env)) { 449 return CP_ACCESS_TRAP_EL2; 450 } 451 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 452 return CP_ACCESS_TRAP_EL3; 453 } 454 return CP_ACCESS_OK; 455 } 456 457 /* Check for traps to "debug ROM" registers, which are controlled 458 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 459 */ 460 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 461 bool isread) 462 { 463 int el = arm_current_el(env); 464 465 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA) 466 && !arm_is_secure_below_el3(env)) { 467 return CP_ACCESS_TRAP_EL2; 468 } 469 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 470 return CP_ACCESS_TRAP_EL3; 471 } 472 return CP_ACCESS_OK; 473 } 474 475 /* Check for traps to general debug registers, which are controlled 476 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 477 */ 478 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 479 bool isread) 480 { 481 int el = arm_current_el(env); 482 483 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA) 484 && !arm_is_secure_below_el3(env)) { 485 return CP_ACCESS_TRAP_EL2; 486 } 487 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 488 return CP_ACCESS_TRAP_EL3; 489 } 490 return CP_ACCESS_OK; 491 } 492 493 /* Check for traps to performance monitor registers, which are controlled 494 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 495 */ 496 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 497 bool isread) 498 { 499 int el = arm_current_el(env); 500 501 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 502 && !arm_is_secure_below_el3(env)) { 503 return CP_ACCESS_TRAP_EL2; 504 } 505 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 506 return CP_ACCESS_TRAP_EL3; 507 } 508 return CP_ACCESS_OK; 509 } 510 511 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 512 { 513 ARMCPU *cpu = arm_env_get_cpu(env); 514 515 raw_write(env, ri, value); 516 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 517 } 518 519 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 520 { 521 ARMCPU *cpu = arm_env_get_cpu(env); 522 523 if (raw_read(env, ri) != value) { 524 /* Unlike real hardware the qemu TLB uses virtual addresses, 525 * not modified virtual addresses, so this causes a TLB flush. 526 */ 527 tlb_flush(CPU(cpu)); 528 raw_write(env, ri, value); 529 } 530 } 531 532 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 533 uint64_t value) 534 { 535 ARMCPU *cpu = arm_env_get_cpu(env); 536 537 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 538 && !extended_addresses_enabled(env)) { 539 /* For VMSA (when not using the LPAE long descriptor page table 540 * format) this register includes the ASID, so do a TLB flush. 541 * For PMSA it is purely a process ID and no action is needed. 542 */ 543 tlb_flush(CPU(cpu)); 544 } 545 raw_write(env, ri, value); 546 } 547 548 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 549 uint64_t value) 550 { 551 /* Invalidate all (TLBIALL) */ 552 ARMCPU *cpu = arm_env_get_cpu(env); 553 554 tlb_flush(CPU(cpu)); 555 } 556 557 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 558 uint64_t value) 559 { 560 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 561 ARMCPU *cpu = arm_env_get_cpu(env); 562 563 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 564 } 565 566 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 567 uint64_t value) 568 { 569 /* Invalidate by ASID (TLBIASID) */ 570 ARMCPU *cpu = arm_env_get_cpu(env); 571 572 tlb_flush(CPU(cpu)); 573 } 574 575 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 576 uint64_t value) 577 { 578 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 579 ARMCPU *cpu = arm_env_get_cpu(env); 580 581 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 582 } 583 584 /* IS variants of TLB operations must affect all cores */ 585 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 586 uint64_t value) 587 { 588 CPUState *cs = ENV_GET_CPU(env); 589 590 tlb_flush_all_cpus_synced(cs); 591 } 592 593 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 594 uint64_t value) 595 { 596 CPUState *cs = ENV_GET_CPU(env); 597 598 tlb_flush_all_cpus_synced(cs); 599 } 600 601 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 602 uint64_t value) 603 { 604 CPUState *cs = ENV_GET_CPU(env); 605 606 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 607 } 608 609 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 610 uint64_t value) 611 { 612 CPUState *cs = ENV_GET_CPU(env); 613 614 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 615 } 616 617 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 618 uint64_t value) 619 { 620 CPUState *cs = ENV_GET_CPU(env); 621 622 tlb_flush_by_mmuidx(cs, 623 ARMMMUIdxBit_S12NSE1 | 624 ARMMMUIdxBit_S12NSE0 | 625 ARMMMUIdxBit_S2NS); 626 } 627 628 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 629 uint64_t value) 630 { 631 CPUState *cs = ENV_GET_CPU(env); 632 633 tlb_flush_by_mmuidx_all_cpus_synced(cs, 634 ARMMMUIdxBit_S12NSE1 | 635 ARMMMUIdxBit_S12NSE0 | 636 ARMMMUIdxBit_S2NS); 637 } 638 639 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 640 uint64_t value) 641 { 642 /* Invalidate by IPA. This has to invalidate any structures that 643 * contain only stage 2 translation information, but does not need 644 * to apply to structures that contain combined stage 1 and stage 2 645 * translation information. 646 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 647 */ 648 CPUState *cs = ENV_GET_CPU(env); 649 uint64_t pageaddr; 650 651 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 652 return; 653 } 654 655 pageaddr = sextract64(value << 12, 0, 40); 656 657 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 658 } 659 660 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 661 uint64_t value) 662 { 663 CPUState *cs = ENV_GET_CPU(env); 664 uint64_t pageaddr; 665 666 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 667 return; 668 } 669 670 pageaddr = sextract64(value << 12, 0, 40); 671 672 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 673 ARMMMUIdxBit_S2NS); 674 } 675 676 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 677 uint64_t value) 678 { 679 CPUState *cs = ENV_GET_CPU(env); 680 681 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 682 } 683 684 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 685 uint64_t value) 686 { 687 CPUState *cs = ENV_GET_CPU(env); 688 689 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 690 } 691 692 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 693 uint64_t value) 694 { 695 CPUState *cs = ENV_GET_CPU(env); 696 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 697 698 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 699 } 700 701 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 702 uint64_t value) 703 { 704 CPUState *cs = ENV_GET_CPU(env); 705 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 706 707 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 708 ARMMMUIdxBit_S1E2); 709 } 710 711 static const ARMCPRegInfo cp_reginfo[] = { 712 /* Define the secure and non-secure FCSE identifier CP registers 713 * separately because there is no secure bank in V8 (no _EL3). This allows 714 * the secure register to be properly reset and migrated. There is also no 715 * v8 EL1 version of the register so the non-secure instance stands alone. 716 */ 717 { .name = "FCSEIDR", 718 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 719 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 720 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 721 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 722 { .name = "FCSEIDR_S", 723 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 724 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 725 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 726 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 727 /* Define the secure and non-secure context identifier CP registers 728 * separately because there is no secure bank in V8 (no _EL3). This allows 729 * the secure register to be properly reset and migrated. In the 730 * non-secure case, the 32-bit register will have reset and migration 731 * disabled during registration as it is handled by the 64-bit instance. 732 */ 733 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 734 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 735 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 736 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 737 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 738 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 739 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 740 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 741 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 742 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 743 REGINFO_SENTINEL 744 }; 745 746 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 747 /* NB: Some of these registers exist in v8 but with more precise 748 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 749 */ 750 /* MMU Domain access control / MPU write buffer control */ 751 { .name = "DACR", 752 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 753 .access = PL1_RW, .resetvalue = 0, 754 .writefn = dacr_write, .raw_writefn = raw_write, 755 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 756 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 757 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 758 * For v6 and v5, these mappings are overly broad. 759 */ 760 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 761 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 762 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 763 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 764 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 765 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 766 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 767 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 768 /* Cache maintenance ops; some of this space may be overridden later. */ 769 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 770 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 771 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 772 REGINFO_SENTINEL 773 }; 774 775 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 776 /* Not all pre-v6 cores implemented this WFI, so this is slightly 777 * over-broad. 778 */ 779 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 780 .access = PL1_W, .type = ARM_CP_WFI }, 781 REGINFO_SENTINEL 782 }; 783 784 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 785 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 786 * is UNPREDICTABLE; we choose to NOP as most implementations do). 787 */ 788 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 789 .access = PL1_W, .type = ARM_CP_WFI }, 790 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 791 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 792 * OMAPCP will override this space. 793 */ 794 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 795 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 796 .resetvalue = 0 }, 797 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 798 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 799 .resetvalue = 0 }, 800 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 801 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 802 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 803 .resetvalue = 0 }, 804 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 805 * implementing it as RAZ means the "debug architecture version" bits 806 * will read as a reserved value, which should cause Linux to not try 807 * to use the debug hardware. 808 */ 809 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 810 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 811 /* MMU TLB control. Note that the wildcarding means we cover not just 812 * the unified TLB ops but also the dside/iside/inner-shareable variants. 813 */ 814 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 815 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 816 .type = ARM_CP_NO_RAW }, 817 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 818 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 819 .type = ARM_CP_NO_RAW }, 820 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 821 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 822 .type = ARM_CP_NO_RAW }, 823 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 824 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 825 .type = ARM_CP_NO_RAW }, 826 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 827 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 828 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 829 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 830 REGINFO_SENTINEL 831 }; 832 833 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 834 uint64_t value) 835 { 836 uint32_t mask = 0; 837 838 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 839 if (!arm_feature(env, ARM_FEATURE_V8)) { 840 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 841 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 842 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 843 */ 844 if (arm_feature(env, ARM_FEATURE_VFP)) { 845 /* VFP coprocessor: cp10 & cp11 [23:20] */ 846 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 847 848 if (!arm_feature(env, ARM_FEATURE_NEON)) { 849 /* ASEDIS [31] bit is RAO/WI */ 850 value |= (1 << 31); 851 } 852 853 /* VFPv3 and upwards with NEON implement 32 double precision 854 * registers (D0-D31). 855 */ 856 if (!arm_feature(env, ARM_FEATURE_NEON) || 857 !arm_feature(env, ARM_FEATURE_VFP3)) { 858 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 859 value |= (1 << 30); 860 } 861 } 862 value &= mask; 863 } 864 env->cp15.cpacr_el1 = value; 865 } 866 867 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 868 { 869 /* Call cpacr_write() so that we reset with the correct RAO bits set 870 * for our CPU features. 871 */ 872 cpacr_write(env, ri, 0); 873 } 874 875 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 876 bool isread) 877 { 878 if (arm_feature(env, ARM_FEATURE_V8)) { 879 /* Check if CPACR accesses are to be trapped to EL2 */ 880 if (arm_current_el(env) == 1 && 881 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 882 return CP_ACCESS_TRAP_EL2; 883 /* Check if CPACR accesses are to be trapped to EL3 */ 884 } else if (arm_current_el(env) < 3 && 885 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 886 return CP_ACCESS_TRAP_EL3; 887 } 888 } 889 890 return CP_ACCESS_OK; 891 } 892 893 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 894 bool isread) 895 { 896 /* Check if CPTR accesses are set to trap to EL3 */ 897 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 898 return CP_ACCESS_TRAP_EL3; 899 } 900 901 return CP_ACCESS_OK; 902 } 903 904 static const ARMCPRegInfo v6_cp_reginfo[] = { 905 /* prefetch by MVA in v6, NOP in v7 */ 906 { .name = "MVA_prefetch", 907 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 908 .access = PL1_W, .type = ARM_CP_NOP }, 909 /* We need to break the TB after ISB to execute self-modifying code 910 * correctly and also to take any pending interrupts immediately. 911 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 912 */ 913 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 914 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 915 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 916 .access = PL0_W, .type = ARM_CP_NOP }, 917 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 918 .access = PL0_W, .type = ARM_CP_NOP }, 919 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 920 .access = PL1_RW, 921 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 922 offsetof(CPUARMState, cp15.ifar_ns) }, 923 .resetvalue = 0, }, 924 /* Watchpoint Fault Address Register : should actually only be present 925 * for 1136, 1176, 11MPCore. 926 */ 927 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 928 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 929 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 930 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 931 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 932 .resetfn = cpacr_reset, .writefn = cpacr_write }, 933 REGINFO_SENTINEL 934 }; 935 936 /* Definitions for the PMU registers */ 937 #define PMCRN_MASK 0xf800 938 #define PMCRN_SHIFT 11 939 #define PMCRD 0x8 940 #define PMCRC 0x4 941 #define PMCRE 0x1 942 943 static inline uint32_t pmu_num_counters(CPUARMState *env) 944 { 945 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 946 } 947 948 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 949 static inline uint64_t pmu_counter_mask(CPUARMState *env) 950 { 951 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 952 } 953 954 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 955 bool isread) 956 { 957 /* Performance monitor registers user accessibility is controlled 958 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 959 * trapping to EL2 or EL3 for other accesses. 960 */ 961 int el = arm_current_el(env); 962 963 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 964 return CP_ACCESS_TRAP; 965 } 966 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 967 && !arm_is_secure_below_el3(env)) { 968 return CP_ACCESS_TRAP_EL2; 969 } 970 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 971 return CP_ACCESS_TRAP_EL3; 972 } 973 974 return CP_ACCESS_OK; 975 } 976 977 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 978 const ARMCPRegInfo *ri, 979 bool isread) 980 { 981 /* ER: event counter read trap control */ 982 if (arm_feature(env, ARM_FEATURE_V8) 983 && arm_current_el(env) == 0 984 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 985 && isread) { 986 return CP_ACCESS_OK; 987 } 988 989 return pmreg_access(env, ri, isread); 990 } 991 992 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 993 const ARMCPRegInfo *ri, 994 bool isread) 995 { 996 /* SW: software increment write trap control */ 997 if (arm_feature(env, ARM_FEATURE_V8) 998 && arm_current_el(env) == 0 999 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1000 && !isread) { 1001 return CP_ACCESS_OK; 1002 } 1003 1004 return pmreg_access(env, ri, isread); 1005 } 1006 1007 #ifndef CONFIG_USER_ONLY 1008 1009 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1010 const ARMCPRegInfo *ri, 1011 bool isread) 1012 { 1013 /* ER: event counter read trap control */ 1014 if (arm_feature(env, ARM_FEATURE_V8) 1015 && arm_current_el(env) == 0 1016 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1017 return CP_ACCESS_OK; 1018 } 1019 1020 return pmreg_access(env, ri, isread); 1021 } 1022 1023 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1024 const ARMCPRegInfo *ri, 1025 bool isread) 1026 { 1027 /* CR: cycle counter read trap control */ 1028 if (arm_feature(env, ARM_FEATURE_V8) 1029 && arm_current_el(env) == 0 1030 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1031 && isread) { 1032 return CP_ACCESS_OK; 1033 } 1034 1035 return pmreg_access(env, ri, isread); 1036 } 1037 1038 static inline bool arm_ccnt_enabled(CPUARMState *env) 1039 { 1040 /* This does not support checking PMCCFILTR_EL0 register */ 1041 1042 if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) { 1043 return false; 1044 } 1045 1046 return true; 1047 } 1048 1049 void pmccntr_sync(CPUARMState *env) 1050 { 1051 uint64_t temp_ticks; 1052 1053 temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1054 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1055 1056 if (env->cp15.c9_pmcr & PMCRD) { 1057 /* Increment once every 64 processor clock cycles */ 1058 temp_ticks /= 64; 1059 } 1060 1061 if (arm_ccnt_enabled(env)) { 1062 env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt; 1063 } 1064 } 1065 1066 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1067 uint64_t value) 1068 { 1069 pmccntr_sync(env); 1070 1071 if (value & PMCRC) { 1072 /* The counter has been reset */ 1073 env->cp15.c15_ccnt = 0; 1074 } 1075 1076 /* only the DP, X, D and E bits are writable */ 1077 env->cp15.c9_pmcr &= ~0x39; 1078 env->cp15.c9_pmcr |= (value & 0x39); 1079 1080 pmccntr_sync(env); 1081 } 1082 1083 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1084 { 1085 uint64_t total_ticks; 1086 1087 if (!arm_ccnt_enabled(env)) { 1088 /* Counter is disabled, do not change value */ 1089 return env->cp15.c15_ccnt; 1090 } 1091 1092 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1093 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1094 1095 if (env->cp15.c9_pmcr & PMCRD) { 1096 /* Increment once every 64 processor clock cycles */ 1097 total_ticks /= 64; 1098 } 1099 return total_ticks - env->cp15.c15_ccnt; 1100 } 1101 1102 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1103 uint64_t value) 1104 { 1105 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1106 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1107 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1108 * accessed. 1109 */ 1110 env->cp15.c9_pmselr = value & 0x1f; 1111 } 1112 1113 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1114 uint64_t value) 1115 { 1116 uint64_t total_ticks; 1117 1118 if (!arm_ccnt_enabled(env)) { 1119 /* Counter is disabled, set the absolute value */ 1120 env->cp15.c15_ccnt = value; 1121 return; 1122 } 1123 1124 total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1125 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1126 1127 if (env->cp15.c9_pmcr & PMCRD) { 1128 /* Increment once every 64 processor clock cycles */ 1129 total_ticks /= 64; 1130 } 1131 env->cp15.c15_ccnt = total_ticks - value; 1132 } 1133 1134 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1135 uint64_t value) 1136 { 1137 uint64_t cur_val = pmccntr_read(env, NULL); 1138 1139 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1140 } 1141 1142 #else /* CONFIG_USER_ONLY */ 1143 1144 void pmccntr_sync(CPUARMState *env) 1145 { 1146 } 1147 1148 #endif 1149 1150 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1151 uint64_t value) 1152 { 1153 pmccntr_sync(env); 1154 env->cp15.pmccfiltr_el0 = value & 0xfc000000; 1155 pmccntr_sync(env); 1156 } 1157 1158 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1159 uint64_t value) 1160 { 1161 value &= pmu_counter_mask(env); 1162 env->cp15.c9_pmcnten |= value; 1163 } 1164 1165 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1166 uint64_t value) 1167 { 1168 value &= pmu_counter_mask(env); 1169 env->cp15.c9_pmcnten &= ~value; 1170 } 1171 1172 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1173 uint64_t value) 1174 { 1175 env->cp15.c9_pmovsr &= ~value; 1176 } 1177 1178 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1179 uint64_t value) 1180 { 1181 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1182 * PMSELR value is equal to or greater than the number of implemented 1183 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1184 */ 1185 if (env->cp15.c9_pmselr == 0x1f) { 1186 pmccfiltr_write(env, ri, value); 1187 } 1188 } 1189 1190 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1191 { 1192 /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1193 * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write(). 1194 */ 1195 if (env->cp15.c9_pmselr == 0x1f) { 1196 return env->cp15.pmccfiltr_el0; 1197 } else { 1198 return 0; 1199 } 1200 } 1201 1202 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1203 uint64_t value) 1204 { 1205 if (arm_feature(env, ARM_FEATURE_V8)) { 1206 env->cp15.c9_pmuserenr = value & 0xf; 1207 } else { 1208 env->cp15.c9_pmuserenr = value & 1; 1209 } 1210 } 1211 1212 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1213 uint64_t value) 1214 { 1215 /* We have no event counters so only the C bit can be changed */ 1216 value &= pmu_counter_mask(env); 1217 env->cp15.c9_pminten |= value; 1218 } 1219 1220 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1221 uint64_t value) 1222 { 1223 value &= pmu_counter_mask(env); 1224 env->cp15.c9_pminten &= ~value; 1225 } 1226 1227 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1228 uint64_t value) 1229 { 1230 /* Note that even though the AArch64 view of this register has bits 1231 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1232 * architectural requirements for bits which are RES0 only in some 1233 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1234 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1235 */ 1236 raw_write(env, ri, value & ~0x1FULL); 1237 } 1238 1239 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1240 { 1241 /* We only mask off bits that are RES0 both for AArch64 and AArch32. 1242 * For bits that vary between AArch32/64, code needs to check the 1243 * current execution mode before directly using the feature bit. 1244 */ 1245 uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK; 1246 1247 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1248 valid_mask &= ~SCR_HCE; 1249 1250 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1251 * supported if EL2 exists. The bit is UNK/SBZP when 1252 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1253 * when EL2 is unavailable. 1254 * On ARMv8, this bit is always available. 1255 */ 1256 if (arm_feature(env, ARM_FEATURE_V7) && 1257 !arm_feature(env, ARM_FEATURE_V8)) { 1258 valid_mask &= ~SCR_SMD; 1259 } 1260 } 1261 1262 /* Clear all-context RES0 bits. */ 1263 value &= valid_mask; 1264 raw_write(env, ri, value); 1265 } 1266 1267 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1268 { 1269 ARMCPU *cpu = arm_env_get_cpu(env); 1270 1271 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1272 * bank 1273 */ 1274 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1275 ri->secure & ARM_CP_SECSTATE_S); 1276 1277 return cpu->ccsidr[index]; 1278 } 1279 1280 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1281 uint64_t value) 1282 { 1283 raw_write(env, ri, value & 0xf); 1284 } 1285 1286 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1287 { 1288 CPUState *cs = ENV_GET_CPU(env); 1289 uint64_t ret = 0; 1290 1291 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1292 ret |= CPSR_I; 1293 } 1294 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1295 ret |= CPSR_F; 1296 } 1297 /* External aborts are not possible in QEMU so A bit is always clear */ 1298 return ret; 1299 } 1300 1301 static const ARMCPRegInfo v7_cp_reginfo[] = { 1302 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1303 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1304 .access = PL1_W, .type = ARM_CP_NOP }, 1305 /* Performance monitors are implementation defined in v7, 1306 * but with an ARM recommended set of registers, which we 1307 * follow (although we don't actually implement any counters) 1308 * 1309 * Performance registers fall into three categories: 1310 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1311 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1312 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1313 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1314 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1315 */ 1316 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1317 .access = PL0_RW, .type = ARM_CP_ALIAS, 1318 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1319 .writefn = pmcntenset_write, 1320 .accessfn = pmreg_access, 1321 .raw_writefn = raw_write }, 1322 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 1323 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1324 .access = PL0_RW, .accessfn = pmreg_access, 1325 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1326 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1327 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1328 .access = PL0_RW, 1329 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1330 .accessfn = pmreg_access, 1331 .writefn = pmcntenclr_write, 1332 .type = ARM_CP_ALIAS }, 1333 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1334 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1335 .access = PL0_RW, .accessfn = pmreg_access, 1336 .type = ARM_CP_ALIAS, 1337 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 1338 .writefn = pmcntenclr_write }, 1339 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 1340 .access = PL0_RW, 1341 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 1342 .accessfn = pmreg_access, 1343 .writefn = pmovsr_write, 1344 .raw_writefn = raw_write }, 1345 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 1346 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 1347 .access = PL0_RW, .accessfn = pmreg_access, 1348 .type = ARM_CP_ALIAS, 1349 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1350 .writefn = pmovsr_write, 1351 .raw_writefn = raw_write }, 1352 /* Unimplemented so WI. */ 1353 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 1354 .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP }, 1355 #ifndef CONFIG_USER_ONLY 1356 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 1357 .access = PL0_RW, .type = ARM_CP_ALIAS, 1358 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 1359 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 1360 .raw_writefn = raw_write}, 1361 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 1362 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 1363 .access = PL0_RW, .accessfn = pmreg_access_selr, 1364 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 1365 .writefn = pmselr_write, .raw_writefn = raw_write, }, 1366 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 1367 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 1368 .readfn = pmccntr_read, .writefn = pmccntr_write32, 1369 .accessfn = pmreg_access_ccntr }, 1370 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 1371 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 1372 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 1373 .type = ARM_CP_IO, 1374 .readfn = pmccntr_read, .writefn = pmccntr_write, }, 1375 #endif 1376 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 1377 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 1378 .writefn = pmccfiltr_write, 1379 .access = PL0_RW, .accessfn = pmreg_access, 1380 .type = ARM_CP_IO, 1381 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 1382 .resetvalue = 0, }, 1383 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 1384 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1385 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1386 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 1387 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 1388 .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access, 1389 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1390 /* Unimplemented, RAZ/WI. */ 1391 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 1392 .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0, 1393 .accessfn = pmreg_access_xevcntr }, 1394 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 1395 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 1396 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 1397 .resetvalue = 0, 1398 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1399 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 1400 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 1401 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1402 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 1403 .resetvalue = 0, 1404 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 1405 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 1406 .access = PL1_RW, .accessfn = access_tpm, 1407 .type = ARM_CP_ALIAS, 1408 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 1409 .resetvalue = 0, 1410 .writefn = pmintenset_write, .raw_writefn = raw_write }, 1411 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 1412 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 1413 .access = PL1_RW, .accessfn = access_tpm, 1414 .type = ARM_CP_IO, 1415 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1416 .writefn = pmintenset_write, .raw_writefn = raw_write, 1417 .resetvalue = 0x0 }, 1418 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 1419 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1420 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1421 .writefn = pmintenclr_write, }, 1422 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 1423 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 1424 .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 1425 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 1426 .writefn = pmintenclr_write }, 1427 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 1428 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 1429 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 1430 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 1431 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 1432 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, 1433 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 1434 offsetof(CPUARMState, cp15.csselr_ns) } }, 1435 /* Auxiliary ID register: this actually has an IMPDEF value but for now 1436 * just RAZ for all cores: 1437 */ 1438 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 1439 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 1440 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 1441 /* Auxiliary fault status registers: these also are IMPDEF, and we 1442 * choose to RAZ/WI for all cores. 1443 */ 1444 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 1445 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 1446 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1447 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 1448 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 1449 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 1450 /* MAIR can just read-as-written because we don't implement caches 1451 * and so don't need to care about memory attributes. 1452 */ 1453 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 1454 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 1455 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 1456 .resetvalue = 0 }, 1457 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 1458 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 1459 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 1460 .resetvalue = 0 }, 1461 /* For non-long-descriptor page tables these are PRRR and NMRR; 1462 * regardless they still act as reads-as-written for QEMU. 1463 */ 1464 /* MAIR0/1 are defined separately from their 64-bit counterpart which 1465 * allows them to assign the correct fieldoffset based on the endianness 1466 * handled in the field definitions. 1467 */ 1468 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 1469 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 1470 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 1471 offsetof(CPUARMState, cp15.mair0_ns) }, 1472 .resetfn = arm_cp_reset_ignore }, 1473 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 1474 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 1475 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 1476 offsetof(CPUARMState, cp15.mair1_ns) }, 1477 .resetfn = arm_cp_reset_ignore }, 1478 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 1479 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 1480 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 1481 /* 32 bit ITLB invalidates */ 1482 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 1483 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1484 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 1485 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1486 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 1487 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1488 /* 32 bit DTLB invalidates */ 1489 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 1490 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1491 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 1492 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1493 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 1494 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1495 /* 32 bit TLB invalidates */ 1496 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 1497 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 1498 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 1499 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 1500 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 1501 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 1502 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 1503 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 1504 REGINFO_SENTINEL 1505 }; 1506 1507 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 1508 /* 32 bit TLB invalidates, Inner Shareable */ 1509 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 1510 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 1511 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 1512 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 1513 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 1514 .type = ARM_CP_NO_RAW, .access = PL1_W, 1515 .writefn = tlbiasid_is_write }, 1516 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 1517 .type = ARM_CP_NO_RAW, .access = PL1_W, 1518 .writefn = tlbimvaa_is_write }, 1519 REGINFO_SENTINEL 1520 }; 1521 1522 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1523 uint64_t value) 1524 { 1525 value &= 1; 1526 env->teecr = value; 1527 } 1528 1529 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1530 bool isread) 1531 { 1532 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 1533 return CP_ACCESS_TRAP; 1534 } 1535 return CP_ACCESS_OK; 1536 } 1537 1538 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 1539 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 1540 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 1541 .resetvalue = 0, 1542 .writefn = teecr_write }, 1543 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 1544 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 1545 .accessfn = teehbr_access, .resetvalue = 0 }, 1546 REGINFO_SENTINEL 1547 }; 1548 1549 static const ARMCPRegInfo v6k_cp_reginfo[] = { 1550 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 1551 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 1552 .access = PL0_RW, 1553 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 1554 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 1555 .access = PL0_RW, 1556 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 1557 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 1558 .resetfn = arm_cp_reset_ignore }, 1559 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 1560 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 1561 .access = PL0_R|PL1_W, 1562 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 1563 .resetvalue = 0}, 1564 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 1565 .access = PL0_R|PL1_W, 1566 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 1567 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 1568 .resetfn = arm_cp_reset_ignore }, 1569 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 1570 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 1571 .access = PL1_RW, 1572 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 1573 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 1574 .access = PL1_RW, 1575 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 1576 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 1577 .resetvalue = 0 }, 1578 REGINFO_SENTINEL 1579 }; 1580 1581 #ifndef CONFIG_USER_ONLY 1582 1583 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 1584 bool isread) 1585 { 1586 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 1587 * Writable only at the highest implemented exception level. 1588 */ 1589 int el = arm_current_el(env); 1590 1591 switch (el) { 1592 case 0: 1593 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) { 1594 return CP_ACCESS_TRAP; 1595 } 1596 break; 1597 case 1: 1598 if (!isread && ri->state == ARM_CP_STATE_AA32 && 1599 arm_is_secure_below_el3(env)) { 1600 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 1601 return CP_ACCESS_TRAP_UNCATEGORIZED; 1602 } 1603 break; 1604 case 2: 1605 case 3: 1606 break; 1607 } 1608 1609 if (!isread && el < arm_highest_el(env)) { 1610 return CP_ACCESS_TRAP_UNCATEGORIZED; 1611 } 1612 1613 return CP_ACCESS_OK; 1614 } 1615 1616 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 1617 bool isread) 1618 { 1619 unsigned int cur_el = arm_current_el(env); 1620 bool secure = arm_is_secure(env); 1621 1622 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ 1623 if (cur_el == 0 && 1624 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 1625 return CP_ACCESS_TRAP; 1626 } 1627 1628 if (arm_feature(env, ARM_FEATURE_EL2) && 1629 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1630 !extract32(env->cp15.cnthctl_el2, 0, 1)) { 1631 return CP_ACCESS_TRAP_EL2; 1632 } 1633 return CP_ACCESS_OK; 1634 } 1635 1636 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 1637 bool isread) 1638 { 1639 unsigned int cur_el = arm_current_el(env); 1640 bool secure = arm_is_secure(env); 1641 1642 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if 1643 * EL0[PV]TEN is zero. 1644 */ 1645 if (cur_el == 0 && 1646 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 1647 return CP_ACCESS_TRAP; 1648 } 1649 1650 if (arm_feature(env, ARM_FEATURE_EL2) && 1651 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 1652 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 1653 return CP_ACCESS_TRAP_EL2; 1654 } 1655 return CP_ACCESS_OK; 1656 } 1657 1658 static CPAccessResult gt_pct_access(CPUARMState *env, 1659 const ARMCPRegInfo *ri, 1660 bool isread) 1661 { 1662 return gt_counter_access(env, GTIMER_PHYS, isread); 1663 } 1664 1665 static CPAccessResult gt_vct_access(CPUARMState *env, 1666 const ARMCPRegInfo *ri, 1667 bool isread) 1668 { 1669 return gt_counter_access(env, GTIMER_VIRT, isread); 1670 } 1671 1672 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1673 bool isread) 1674 { 1675 return gt_timer_access(env, GTIMER_PHYS, isread); 1676 } 1677 1678 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 1679 bool isread) 1680 { 1681 return gt_timer_access(env, GTIMER_VIRT, isread); 1682 } 1683 1684 static CPAccessResult gt_stimer_access(CPUARMState *env, 1685 const ARMCPRegInfo *ri, 1686 bool isread) 1687 { 1688 /* The AArch64 register view of the secure physical timer is 1689 * always accessible from EL3, and configurably accessible from 1690 * Secure EL1. 1691 */ 1692 switch (arm_current_el(env)) { 1693 case 1: 1694 if (!arm_is_secure(env)) { 1695 return CP_ACCESS_TRAP; 1696 } 1697 if (!(env->cp15.scr_el3 & SCR_ST)) { 1698 return CP_ACCESS_TRAP_EL3; 1699 } 1700 return CP_ACCESS_OK; 1701 case 0: 1702 case 2: 1703 return CP_ACCESS_TRAP; 1704 case 3: 1705 return CP_ACCESS_OK; 1706 default: 1707 g_assert_not_reached(); 1708 } 1709 } 1710 1711 static uint64_t gt_get_countervalue(CPUARMState *env) 1712 { 1713 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; 1714 } 1715 1716 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 1717 { 1718 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 1719 1720 if (gt->ctl & 1) { 1721 /* Timer enabled: calculate and set current ISTATUS, irq, and 1722 * reset timer to when ISTATUS next has to change 1723 */ 1724 uint64_t offset = timeridx == GTIMER_VIRT ? 1725 cpu->env.cp15.cntvoff_el2 : 0; 1726 uint64_t count = gt_get_countervalue(&cpu->env); 1727 /* Note that this must be unsigned 64 bit arithmetic: */ 1728 int istatus = count - offset >= gt->cval; 1729 uint64_t nexttick; 1730 int irqstate; 1731 1732 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 1733 1734 irqstate = (istatus && !(gt->ctl & 2)); 1735 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1736 1737 if (istatus) { 1738 /* Next transition is when count rolls back over to zero */ 1739 nexttick = UINT64_MAX; 1740 } else { 1741 /* Next transition is when we hit cval */ 1742 nexttick = gt->cval + offset; 1743 } 1744 /* Note that the desired next expiry time might be beyond the 1745 * signed-64-bit range of a QEMUTimer -- in this case we just 1746 * set the timer for as far in the future as possible. When the 1747 * timer expires we will reset the timer for any remaining period. 1748 */ 1749 if (nexttick > INT64_MAX / GTIMER_SCALE) { 1750 nexttick = INT64_MAX / GTIMER_SCALE; 1751 } 1752 timer_mod(cpu->gt_timer[timeridx], nexttick); 1753 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 1754 } else { 1755 /* Timer disabled: ISTATUS and timer output always clear */ 1756 gt->ctl &= ~4; 1757 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 1758 timer_del(cpu->gt_timer[timeridx]); 1759 trace_arm_gt_recalc_disabled(timeridx); 1760 } 1761 } 1762 1763 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 1764 int timeridx) 1765 { 1766 ARMCPU *cpu = arm_env_get_cpu(env); 1767 1768 timer_del(cpu->gt_timer[timeridx]); 1769 } 1770 1771 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1772 { 1773 return gt_get_countervalue(env); 1774 } 1775 1776 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 1777 { 1778 return gt_get_countervalue(env) - env->cp15.cntvoff_el2; 1779 } 1780 1781 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1782 int timeridx, 1783 uint64_t value) 1784 { 1785 trace_arm_gt_cval_write(timeridx, value); 1786 env->cp15.c14_timer[timeridx].cval = value; 1787 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1788 } 1789 1790 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 1791 int timeridx) 1792 { 1793 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1794 1795 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 1796 (gt_get_countervalue(env) - offset)); 1797 } 1798 1799 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1800 int timeridx, 1801 uint64_t value) 1802 { 1803 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 1804 1805 trace_arm_gt_tval_write(timeridx, value); 1806 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 1807 sextract64(value, 0, 32); 1808 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 1809 } 1810 1811 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1812 int timeridx, 1813 uint64_t value) 1814 { 1815 ARMCPU *cpu = arm_env_get_cpu(env); 1816 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 1817 1818 trace_arm_gt_ctl_write(timeridx, value); 1819 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 1820 if ((oldval ^ value) & 1) { 1821 /* Enable toggled */ 1822 gt_recalc_timer(cpu, timeridx); 1823 } else if ((oldval ^ value) & 2) { 1824 /* IMASK toggled: don't need to recalculate, 1825 * just set the interrupt line based on ISTATUS 1826 */ 1827 int irqstate = (oldval & 4) && !(value & 2); 1828 1829 trace_arm_gt_imask_toggle(timeridx, irqstate); 1830 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 1831 } 1832 } 1833 1834 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1835 { 1836 gt_timer_reset(env, ri, GTIMER_PHYS); 1837 } 1838 1839 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1840 uint64_t value) 1841 { 1842 gt_cval_write(env, ri, GTIMER_PHYS, value); 1843 } 1844 1845 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1846 { 1847 return gt_tval_read(env, ri, GTIMER_PHYS); 1848 } 1849 1850 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1851 uint64_t value) 1852 { 1853 gt_tval_write(env, ri, GTIMER_PHYS, value); 1854 } 1855 1856 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1857 uint64_t value) 1858 { 1859 gt_ctl_write(env, ri, GTIMER_PHYS, value); 1860 } 1861 1862 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1863 { 1864 gt_timer_reset(env, ri, GTIMER_VIRT); 1865 } 1866 1867 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1868 uint64_t value) 1869 { 1870 gt_cval_write(env, ri, GTIMER_VIRT, value); 1871 } 1872 1873 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1874 { 1875 return gt_tval_read(env, ri, GTIMER_VIRT); 1876 } 1877 1878 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1879 uint64_t value) 1880 { 1881 gt_tval_write(env, ri, GTIMER_VIRT, value); 1882 } 1883 1884 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1885 uint64_t value) 1886 { 1887 gt_ctl_write(env, ri, GTIMER_VIRT, value); 1888 } 1889 1890 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 1891 uint64_t value) 1892 { 1893 ARMCPU *cpu = arm_env_get_cpu(env); 1894 1895 trace_arm_gt_cntvoff_write(value); 1896 raw_write(env, ri, value); 1897 gt_recalc_timer(cpu, GTIMER_VIRT); 1898 } 1899 1900 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1901 { 1902 gt_timer_reset(env, ri, GTIMER_HYP); 1903 } 1904 1905 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1906 uint64_t value) 1907 { 1908 gt_cval_write(env, ri, GTIMER_HYP, value); 1909 } 1910 1911 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1912 { 1913 return gt_tval_read(env, ri, GTIMER_HYP); 1914 } 1915 1916 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1917 uint64_t value) 1918 { 1919 gt_tval_write(env, ri, GTIMER_HYP, value); 1920 } 1921 1922 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1923 uint64_t value) 1924 { 1925 gt_ctl_write(env, ri, GTIMER_HYP, value); 1926 } 1927 1928 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1929 { 1930 gt_timer_reset(env, ri, GTIMER_SEC); 1931 } 1932 1933 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1934 uint64_t value) 1935 { 1936 gt_cval_write(env, ri, GTIMER_SEC, value); 1937 } 1938 1939 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 1940 { 1941 return gt_tval_read(env, ri, GTIMER_SEC); 1942 } 1943 1944 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 1945 uint64_t value) 1946 { 1947 gt_tval_write(env, ri, GTIMER_SEC, value); 1948 } 1949 1950 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 1951 uint64_t value) 1952 { 1953 gt_ctl_write(env, ri, GTIMER_SEC, value); 1954 } 1955 1956 void arm_gt_ptimer_cb(void *opaque) 1957 { 1958 ARMCPU *cpu = opaque; 1959 1960 gt_recalc_timer(cpu, GTIMER_PHYS); 1961 } 1962 1963 void arm_gt_vtimer_cb(void *opaque) 1964 { 1965 ARMCPU *cpu = opaque; 1966 1967 gt_recalc_timer(cpu, GTIMER_VIRT); 1968 } 1969 1970 void arm_gt_htimer_cb(void *opaque) 1971 { 1972 ARMCPU *cpu = opaque; 1973 1974 gt_recalc_timer(cpu, GTIMER_HYP); 1975 } 1976 1977 void arm_gt_stimer_cb(void *opaque) 1978 { 1979 ARMCPU *cpu = opaque; 1980 1981 gt_recalc_timer(cpu, GTIMER_SEC); 1982 } 1983 1984 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 1985 /* Note that CNTFRQ is purely reads-as-written for the benefit 1986 * of software; writing it doesn't actually change the timer frequency. 1987 * Our reset value matches the fixed frequency we implement the timer at. 1988 */ 1989 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 1990 .type = ARM_CP_ALIAS, 1991 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 1992 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 1993 }, 1994 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 1995 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 1996 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 1997 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 1998 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE, 1999 }, 2000 /* overall control: mostly access permissions */ 2001 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2002 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2003 .access = PL1_RW, 2004 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2005 .resetvalue = 0, 2006 }, 2007 /* per-timer control */ 2008 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2009 .secure = ARM_CP_SECSTATE_NS, 2010 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2011 .accessfn = gt_ptimer_access, 2012 .fieldoffset = offsetoflow32(CPUARMState, 2013 cp15.c14_timer[GTIMER_PHYS].ctl), 2014 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2015 }, 2016 { .name = "CNTP_CTL_S", 2017 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2018 .secure = ARM_CP_SECSTATE_S, 2019 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2020 .accessfn = gt_ptimer_access, 2021 .fieldoffset = offsetoflow32(CPUARMState, 2022 cp15.c14_timer[GTIMER_SEC].ctl), 2023 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2024 }, 2025 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 2026 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 2027 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2028 .accessfn = gt_ptimer_access, 2029 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 2030 .resetvalue = 0, 2031 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2032 }, 2033 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 2034 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2035 .accessfn = gt_vtimer_access, 2036 .fieldoffset = offsetoflow32(CPUARMState, 2037 cp15.c14_timer[GTIMER_VIRT].ctl), 2038 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2039 }, 2040 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 2041 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 2042 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2043 .accessfn = gt_vtimer_access, 2044 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 2045 .resetvalue = 0, 2046 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2047 }, 2048 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 2049 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2050 .secure = ARM_CP_SECSTATE_NS, 2051 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2052 .accessfn = gt_ptimer_access, 2053 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2054 }, 2055 { .name = "CNTP_TVAL_S", 2056 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2057 .secure = ARM_CP_SECSTATE_S, 2058 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2059 .accessfn = gt_ptimer_access, 2060 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 2061 }, 2062 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2063 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 2064 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2065 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 2066 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2067 }, 2068 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 2069 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2070 .accessfn = gt_vtimer_access, 2071 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2072 }, 2073 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2074 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 2075 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2076 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 2077 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2078 }, 2079 /* The counter itself */ 2080 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 2081 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2082 .accessfn = gt_pct_access, 2083 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 2084 }, 2085 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 2086 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 2087 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2088 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 2089 }, 2090 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 2091 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2092 .accessfn = gt_vct_access, 2093 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 2094 }, 2095 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2096 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2097 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2098 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 2099 }, 2100 /* Comparison value, indicating when the timer goes off */ 2101 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 2102 .secure = ARM_CP_SECSTATE_NS, 2103 .access = PL1_RW | PL0_R, 2104 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2105 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2106 .accessfn = gt_ptimer_access, 2107 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2108 }, 2109 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 2110 .secure = ARM_CP_SECSTATE_S, 2111 .access = PL1_RW | PL0_R, 2112 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2113 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2114 .accessfn = gt_ptimer_access, 2115 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2116 }, 2117 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2118 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 2119 .access = PL1_RW | PL0_R, 2120 .type = ARM_CP_IO, 2121 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2122 .resetvalue = 0, .accessfn = gt_ptimer_access, 2123 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2124 }, 2125 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 2126 .access = PL1_RW | PL0_R, 2127 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2128 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2129 .accessfn = gt_vtimer_access, 2130 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2131 }, 2132 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2133 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 2134 .access = PL1_RW | PL0_R, 2135 .type = ARM_CP_IO, 2136 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2137 .resetvalue = 0, .accessfn = gt_vtimer_access, 2138 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2139 }, 2140 /* Secure timer -- this is actually restricted to only EL3 2141 * and configurably Secure-EL1 via the accessfn. 2142 */ 2143 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 2144 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 2145 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 2146 .accessfn = gt_stimer_access, 2147 .readfn = gt_sec_tval_read, 2148 .writefn = gt_sec_tval_write, 2149 .resetfn = gt_sec_timer_reset, 2150 }, 2151 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 2152 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 2153 .type = ARM_CP_IO, .access = PL1_RW, 2154 .accessfn = gt_stimer_access, 2155 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 2156 .resetvalue = 0, 2157 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2158 }, 2159 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 2160 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 2161 .type = ARM_CP_IO, .access = PL1_RW, 2162 .accessfn = gt_stimer_access, 2163 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2164 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2165 }, 2166 REGINFO_SENTINEL 2167 }; 2168 2169 #else 2170 /* In user-mode none of the generic timer registers are accessible, 2171 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs, 2172 * so instead just don't register any of them. 2173 */ 2174 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2175 REGINFO_SENTINEL 2176 }; 2177 2178 #endif 2179 2180 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2181 { 2182 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2183 raw_write(env, ri, value); 2184 } else if (arm_feature(env, ARM_FEATURE_V7)) { 2185 raw_write(env, ri, value & 0xfffff6ff); 2186 } else { 2187 raw_write(env, ri, value & 0xfffff1ff); 2188 } 2189 } 2190 2191 #ifndef CONFIG_USER_ONLY 2192 /* get_phys_addr() isn't present for user-mode-only targets */ 2193 2194 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 2195 bool isread) 2196 { 2197 if (ri->opc2 & 4) { 2198 /* The ATS12NSO* operations must trap to EL3 if executed in 2199 * Secure EL1 (which can only happen if EL3 is AArch64). 2200 * They are simply UNDEF if executed from NS EL1. 2201 * They function normally from EL2 or EL3. 2202 */ 2203 if (arm_current_el(env) == 1) { 2204 if (arm_is_secure_below_el3(env)) { 2205 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 2206 } 2207 return CP_ACCESS_TRAP_UNCATEGORIZED; 2208 } 2209 } 2210 return CP_ACCESS_OK; 2211 } 2212 2213 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 2214 MMUAccessType access_type, ARMMMUIdx mmu_idx) 2215 { 2216 hwaddr phys_addr; 2217 target_ulong page_size; 2218 int prot; 2219 bool ret; 2220 uint64_t par64; 2221 bool format64 = false; 2222 MemTxAttrs attrs = {}; 2223 ARMMMUFaultInfo fi = {}; 2224 ARMCacheAttrs cacheattrs = {}; 2225 2226 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 2227 &prot, &page_size, &fi, &cacheattrs); 2228 2229 if (is_a64(env)) { 2230 format64 = true; 2231 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 2232 /* 2233 * ATS1Cxx: 2234 * * TTBCR.EAE determines whether the result is returned using the 2235 * 32-bit or the 64-bit PAR format 2236 * * Instructions executed in Hyp mode always use the 64bit format 2237 * 2238 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 2239 * * The Non-secure TTBCR.EAE bit is set to 1 2240 * * The implementation includes EL2, and the value of HCR.VM is 1 2241 * 2242 * ATS1Hx always uses the 64bit format (not supported yet). 2243 */ 2244 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 2245 2246 if (arm_feature(env, ARM_FEATURE_EL2)) { 2247 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 2248 format64 |= env->cp15.hcr_el2 & HCR_VM; 2249 } else { 2250 format64 |= arm_current_el(env) == 2; 2251 } 2252 } 2253 } 2254 2255 if (format64) { 2256 /* Create a 64-bit PAR */ 2257 par64 = (1 << 11); /* LPAE bit always set */ 2258 if (!ret) { 2259 par64 |= phys_addr & ~0xfffULL; 2260 if (!attrs.secure) { 2261 par64 |= (1 << 9); /* NS */ 2262 } 2263 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 2264 par64 |= cacheattrs.shareability << 7; /* SH */ 2265 } else { 2266 uint32_t fsr = arm_fi_to_lfsc(&fi); 2267 2268 par64 |= 1; /* F */ 2269 par64 |= (fsr & 0x3f) << 1; /* FS */ 2270 /* Note that S2WLK and FSTAGE are always zero, because we don't 2271 * implement virtualization and therefore there can't be a stage 2 2272 * fault. 2273 */ 2274 } 2275 } else { 2276 /* fsr is a DFSR/IFSR value for the short descriptor 2277 * translation table format (with WnR always clear). 2278 * Convert it to a 32-bit PAR. 2279 */ 2280 if (!ret) { 2281 /* We do not set any attribute bits in the PAR */ 2282 if (page_size == (1 << 24) 2283 && arm_feature(env, ARM_FEATURE_V7)) { 2284 par64 = (phys_addr & 0xff000000) | (1 << 1); 2285 } else { 2286 par64 = phys_addr & 0xfffff000; 2287 } 2288 if (!attrs.secure) { 2289 par64 |= (1 << 9); /* NS */ 2290 } 2291 } else { 2292 uint32_t fsr = arm_fi_to_sfsc(&fi); 2293 2294 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 2295 ((fsr & 0xf) << 1) | 1; 2296 } 2297 } 2298 return par64; 2299 } 2300 2301 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2302 { 2303 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2304 uint64_t par64; 2305 ARMMMUIdx mmu_idx; 2306 int el = arm_current_el(env); 2307 bool secure = arm_is_secure_below_el3(env); 2308 2309 switch (ri->opc2 & 6) { 2310 case 0: 2311 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */ 2312 switch (el) { 2313 case 3: 2314 mmu_idx = ARMMMUIdx_S1E3; 2315 break; 2316 case 2: 2317 mmu_idx = ARMMMUIdx_S1NSE1; 2318 break; 2319 case 1: 2320 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2321 break; 2322 default: 2323 g_assert_not_reached(); 2324 } 2325 break; 2326 case 2: 2327 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 2328 switch (el) { 2329 case 3: 2330 mmu_idx = ARMMMUIdx_S1SE0; 2331 break; 2332 case 2: 2333 mmu_idx = ARMMMUIdx_S1NSE0; 2334 break; 2335 case 1: 2336 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2337 break; 2338 default: 2339 g_assert_not_reached(); 2340 } 2341 break; 2342 case 4: 2343 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 2344 mmu_idx = ARMMMUIdx_S12NSE1; 2345 break; 2346 case 6: 2347 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 2348 mmu_idx = ARMMMUIdx_S12NSE0; 2349 break; 2350 default: 2351 g_assert_not_reached(); 2352 } 2353 2354 par64 = do_ats_write(env, value, access_type, mmu_idx); 2355 2356 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2357 } 2358 2359 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 2360 uint64_t value) 2361 { 2362 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2363 uint64_t par64; 2364 2365 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS); 2366 2367 A32_BANKED_CURRENT_REG_SET(env, par, par64); 2368 } 2369 2370 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 2371 bool isread) 2372 { 2373 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 2374 return CP_ACCESS_TRAP; 2375 } 2376 return CP_ACCESS_OK; 2377 } 2378 2379 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 2380 uint64_t value) 2381 { 2382 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 2383 ARMMMUIdx mmu_idx; 2384 int secure = arm_is_secure_below_el3(env); 2385 2386 switch (ri->opc2 & 6) { 2387 case 0: 2388 switch (ri->opc1) { 2389 case 0: /* AT S1E1R, AT S1E1W */ 2390 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 2391 break; 2392 case 4: /* AT S1E2R, AT S1E2W */ 2393 mmu_idx = ARMMMUIdx_S1E2; 2394 break; 2395 case 6: /* AT S1E3R, AT S1E3W */ 2396 mmu_idx = ARMMMUIdx_S1E3; 2397 break; 2398 default: 2399 g_assert_not_reached(); 2400 } 2401 break; 2402 case 2: /* AT S1E0R, AT S1E0W */ 2403 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 2404 break; 2405 case 4: /* AT S12E1R, AT S12E1W */ 2406 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1; 2407 break; 2408 case 6: /* AT S12E0R, AT S12E0W */ 2409 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0; 2410 break; 2411 default: 2412 g_assert_not_reached(); 2413 } 2414 2415 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 2416 } 2417 #endif 2418 2419 static const ARMCPRegInfo vapa_cp_reginfo[] = { 2420 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 2421 .access = PL1_RW, .resetvalue = 0, 2422 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 2423 offsetoflow32(CPUARMState, cp15.par_ns) }, 2424 .writefn = par_write }, 2425 #ifndef CONFIG_USER_ONLY 2426 /* This underdecoding is safe because the reginfo is NO_RAW. */ 2427 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 2428 .access = PL1_W, .accessfn = ats_access, 2429 .writefn = ats_write, .type = ARM_CP_NO_RAW }, 2430 #endif 2431 REGINFO_SENTINEL 2432 }; 2433 2434 /* Return basic MPU access permission bits. */ 2435 static uint32_t simple_mpu_ap_bits(uint32_t val) 2436 { 2437 uint32_t ret; 2438 uint32_t mask; 2439 int i; 2440 ret = 0; 2441 mask = 3; 2442 for (i = 0; i < 16; i += 2) { 2443 ret |= (val >> i) & mask; 2444 mask <<= 2; 2445 } 2446 return ret; 2447 } 2448 2449 /* Pad basic MPU access permission bits to extended format. */ 2450 static uint32_t extended_mpu_ap_bits(uint32_t val) 2451 { 2452 uint32_t ret; 2453 uint32_t mask; 2454 int i; 2455 ret = 0; 2456 mask = 3; 2457 for (i = 0; i < 16; i += 2) { 2458 ret |= (val & mask) << i; 2459 mask <<= 2; 2460 } 2461 return ret; 2462 } 2463 2464 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2465 uint64_t value) 2466 { 2467 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 2468 } 2469 2470 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2471 { 2472 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 2473 } 2474 2475 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 2476 uint64_t value) 2477 { 2478 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 2479 } 2480 2481 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 2482 { 2483 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 2484 } 2485 2486 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 2487 { 2488 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2489 2490 if (!u32p) { 2491 return 0; 2492 } 2493 2494 u32p += env->pmsav7.rnr[M_REG_NS]; 2495 return *u32p; 2496 } 2497 2498 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 2499 uint64_t value) 2500 { 2501 ARMCPU *cpu = arm_env_get_cpu(env); 2502 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 2503 2504 if (!u32p) { 2505 return; 2506 } 2507 2508 u32p += env->pmsav7.rnr[M_REG_NS]; 2509 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 2510 *u32p = value; 2511 } 2512 2513 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2514 uint64_t value) 2515 { 2516 ARMCPU *cpu = arm_env_get_cpu(env); 2517 uint32_t nrgs = cpu->pmsav7_dregion; 2518 2519 if (value >= nrgs) { 2520 qemu_log_mask(LOG_GUEST_ERROR, 2521 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 2522 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 2523 return; 2524 } 2525 2526 raw_write(env, ri, value); 2527 } 2528 2529 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 2530 /* Reset for all these registers is handled in arm_cpu_reset(), 2531 * because the PMSAv7 is also used by M-profile CPUs, which do 2532 * not register cpregs but still need the state to be reset. 2533 */ 2534 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 2535 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2536 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 2537 .readfn = pmsav7_read, .writefn = pmsav7_write, 2538 .resetfn = arm_cp_reset_ignore }, 2539 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 2540 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2541 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 2542 .readfn = pmsav7_read, .writefn = pmsav7_write, 2543 .resetfn = arm_cp_reset_ignore }, 2544 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 2545 .access = PL1_RW, .type = ARM_CP_NO_RAW, 2546 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 2547 .readfn = pmsav7_read, .writefn = pmsav7_write, 2548 .resetfn = arm_cp_reset_ignore }, 2549 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 2550 .access = PL1_RW, 2551 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 2552 .writefn = pmsav7_rgnr_write, 2553 .resetfn = arm_cp_reset_ignore }, 2554 REGINFO_SENTINEL 2555 }; 2556 2557 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 2558 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2559 .access = PL1_RW, .type = ARM_CP_ALIAS, 2560 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2561 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 2562 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2563 .access = PL1_RW, .type = ARM_CP_ALIAS, 2564 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2565 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 2566 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 2567 .access = PL1_RW, 2568 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 2569 .resetvalue = 0, }, 2570 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 2571 .access = PL1_RW, 2572 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 2573 .resetvalue = 0, }, 2574 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 2575 .access = PL1_RW, 2576 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 2577 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 2578 .access = PL1_RW, 2579 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 2580 /* Protection region base and size registers */ 2581 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 2582 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2583 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 2584 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 2585 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2586 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 2587 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 2588 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2589 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 2590 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 2591 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2592 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 2593 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 2594 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2595 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 2596 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 2597 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2598 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 2599 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 2600 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2601 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 2602 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 2603 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 2604 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 2605 REGINFO_SENTINEL 2606 }; 2607 2608 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 2609 uint64_t value) 2610 { 2611 TCR *tcr = raw_ptr(env, ri); 2612 int maskshift = extract32(value, 0, 3); 2613 2614 if (!arm_feature(env, ARM_FEATURE_V8)) { 2615 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 2616 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 2617 * using Long-desciptor translation table format */ 2618 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 2619 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 2620 /* In an implementation that includes the Security Extensions 2621 * TTBCR has additional fields PD0 [4] and PD1 [5] for 2622 * Short-descriptor translation table format. 2623 */ 2624 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 2625 } else { 2626 value &= TTBCR_N; 2627 } 2628 } 2629 2630 /* Update the masks corresponding to the TCR bank being written 2631 * Note that we always calculate mask and base_mask, but 2632 * they are only used for short-descriptor tables (ie if EAE is 0); 2633 * for long-descriptor tables the TCR fields are used differently 2634 * and the mask and base_mask values are meaningless. 2635 */ 2636 tcr->raw_tcr = value; 2637 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 2638 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 2639 } 2640 2641 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2642 uint64_t value) 2643 { 2644 ARMCPU *cpu = arm_env_get_cpu(env); 2645 2646 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2647 /* With LPAE the TTBCR could result in a change of ASID 2648 * via the TTBCR.A1 bit, so do a TLB flush. 2649 */ 2650 tlb_flush(CPU(cpu)); 2651 } 2652 vmsa_ttbcr_raw_write(env, ri, value); 2653 } 2654 2655 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2656 { 2657 TCR *tcr = raw_ptr(env, ri); 2658 2659 /* Reset both the TCR as well as the masks corresponding to the bank of 2660 * the TCR being reset. 2661 */ 2662 tcr->raw_tcr = 0; 2663 tcr->mask = 0; 2664 tcr->base_mask = 0xffffc000u; 2665 } 2666 2667 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 2668 uint64_t value) 2669 { 2670 ARMCPU *cpu = arm_env_get_cpu(env); 2671 TCR *tcr = raw_ptr(env, ri); 2672 2673 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 2674 tlb_flush(CPU(cpu)); 2675 tcr->raw_tcr = value; 2676 } 2677 2678 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2679 uint64_t value) 2680 { 2681 /* 64 bit accesses to the TTBRs can change the ASID and so we 2682 * must flush the TLB. 2683 */ 2684 if (cpreg_field_is_64bit(ri)) { 2685 ARMCPU *cpu = arm_env_get_cpu(env); 2686 2687 tlb_flush(CPU(cpu)); 2688 } 2689 raw_write(env, ri, value); 2690 } 2691 2692 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2693 uint64_t value) 2694 { 2695 ARMCPU *cpu = arm_env_get_cpu(env); 2696 CPUState *cs = CPU(cpu); 2697 2698 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ 2699 if (raw_read(env, ri) != value) { 2700 tlb_flush_by_mmuidx(cs, 2701 ARMMMUIdxBit_S12NSE1 | 2702 ARMMMUIdxBit_S12NSE0 | 2703 ARMMMUIdxBit_S2NS); 2704 raw_write(env, ri, value); 2705 } 2706 } 2707 2708 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 2709 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 2710 .access = PL1_RW, .type = ARM_CP_ALIAS, 2711 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 2712 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 2713 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 2714 .access = PL1_RW, .resetvalue = 0, 2715 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 2716 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 2717 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 2718 .access = PL1_RW, .resetvalue = 0, 2719 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 2720 offsetof(CPUARMState, cp15.dfar_ns) } }, 2721 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 2722 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 2723 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 2724 .resetvalue = 0, }, 2725 REGINFO_SENTINEL 2726 }; 2727 2728 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 2729 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 2730 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 2731 .access = PL1_RW, 2732 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 2733 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 2734 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 2735 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2736 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 2737 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 2738 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 2739 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 2740 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 2741 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 2742 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 2743 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 2744 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2745 .access = PL1_RW, .writefn = vmsa_tcr_el1_write, 2746 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 2747 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 2748 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 2749 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 2750 .raw_writefn = vmsa_ttbcr_raw_write, 2751 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 2752 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 2753 REGINFO_SENTINEL 2754 }; 2755 2756 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 2757 uint64_t value) 2758 { 2759 env->cp15.c15_ticonfig = value & 0xe7; 2760 /* The OS_TYPE bit in this register changes the reported CPUID! */ 2761 env->cp15.c0_cpuid = (value & (1 << 5)) ? 2762 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 2763 } 2764 2765 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 2766 uint64_t value) 2767 { 2768 env->cp15.c15_threadid = value & 0xffff; 2769 } 2770 2771 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 2772 uint64_t value) 2773 { 2774 /* Wait-for-interrupt (deprecated) */ 2775 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); 2776 } 2777 2778 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 2779 uint64_t value) 2780 { 2781 /* On OMAP there are registers indicating the max/min index of dcache lines 2782 * containing a dirty line; cache flush operations have to reset these. 2783 */ 2784 env->cp15.c15_i_max = 0x000; 2785 env->cp15.c15_i_min = 0xff0; 2786 } 2787 2788 static const ARMCPRegInfo omap_cp_reginfo[] = { 2789 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 2790 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 2791 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 2792 .resetvalue = 0, }, 2793 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 2794 .access = PL1_RW, .type = ARM_CP_NOP }, 2795 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 2796 .access = PL1_RW, 2797 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 2798 .writefn = omap_ticonfig_write }, 2799 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 2800 .access = PL1_RW, 2801 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 2802 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 2803 .access = PL1_RW, .resetvalue = 0xff0, 2804 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 2805 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 2806 .access = PL1_RW, 2807 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 2808 .writefn = omap_threadid_write }, 2809 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 2810 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2811 .type = ARM_CP_NO_RAW, 2812 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 2813 /* TODO: Peripheral port remap register: 2814 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 2815 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 2816 * when MMU is off. 2817 */ 2818 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 2819 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 2820 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 2821 .writefn = omap_cachemaint_write }, 2822 { .name = "C9", .cp = 15, .crn = 9, 2823 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 2824 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 2825 REGINFO_SENTINEL 2826 }; 2827 2828 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 2829 uint64_t value) 2830 { 2831 env->cp15.c15_cpar = value & 0x3fff; 2832 } 2833 2834 static const ARMCPRegInfo xscale_cp_reginfo[] = { 2835 { .name = "XSCALE_CPAR", 2836 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 2837 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 2838 .writefn = xscale_cpar_write, }, 2839 { .name = "XSCALE_AUXCR", 2840 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 2841 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 2842 .resetvalue = 0, }, 2843 /* XScale specific cache-lockdown: since we have no cache we NOP these 2844 * and hope the guest does not really rely on cache behaviour. 2845 */ 2846 { .name = "XSCALE_LOCK_ICACHE_LINE", 2847 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 2848 .access = PL1_W, .type = ARM_CP_NOP }, 2849 { .name = "XSCALE_UNLOCK_ICACHE", 2850 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 2851 .access = PL1_W, .type = ARM_CP_NOP }, 2852 { .name = "XSCALE_DCACHE_LOCK", 2853 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 2854 .access = PL1_RW, .type = ARM_CP_NOP }, 2855 { .name = "XSCALE_UNLOCK_DCACHE", 2856 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 2857 .access = PL1_W, .type = ARM_CP_NOP }, 2858 REGINFO_SENTINEL 2859 }; 2860 2861 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 2862 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 2863 * implementation of this implementation-defined space. 2864 * Ideally this should eventually disappear in favour of actually 2865 * implementing the correct behaviour for all cores. 2866 */ 2867 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 2868 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2869 .access = PL1_RW, 2870 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 2871 .resetvalue = 0 }, 2872 REGINFO_SENTINEL 2873 }; 2874 2875 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 2876 /* Cache status: RAZ because we have no cache so it's always clean */ 2877 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 2878 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2879 .resetvalue = 0 }, 2880 REGINFO_SENTINEL 2881 }; 2882 2883 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 2884 /* We never have a a block transfer operation in progress */ 2885 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 2886 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2887 .resetvalue = 0 }, 2888 /* The cache ops themselves: these all NOP for QEMU */ 2889 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 2890 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2891 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 2892 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2893 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 2894 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2895 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 2896 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2897 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 2898 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2899 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 2900 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 2901 REGINFO_SENTINEL 2902 }; 2903 2904 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 2905 /* The cache test-and-clean instructions always return (1 << 30) 2906 * to indicate that there are no dirty cache lines. 2907 */ 2908 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 2909 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2910 .resetvalue = (1 << 30) }, 2911 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 2912 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 2913 .resetvalue = (1 << 30) }, 2914 REGINFO_SENTINEL 2915 }; 2916 2917 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 2918 /* Ignore ReadBuffer accesses */ 2919 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 2920 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 2921 .access = PL1_RW, .resetvalue = 0, 2922 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 2923 REGINFO_SENTINEL 2924 }; 2925 2926 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2927 { 2928 ARMCPU *cpu = arm_env_get_cpu(env); 2929 unsigned int cur_el = arm_current_el(env); 2930 bool secure = arm_is_secure(env); 2931 2932 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2933 return env->cp15.vpidr_el2; 2934 } 2935 return raw_read(env, ri); 2936 } 2937 2938 static uint64_t mpidr_read_val(CPUARMState *env) 2939 { 2940 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env)); 2941 uint64_t mpidr = cpu->mp_affinity; 2942 2943 if (arm_feature(env, ARM_FEATURE_V7MP)) { 2944 mpidr |= (1U << 31); 2945 /* Cores which are uniprocessor (non-coherent) 2946 * but still implement the MP extensions set 2947 * bit 30. (For instance, Cortex-R5). 2948 */ 2949 if (cpu->mp_is_up) { 2950 mpidr |= (1u << 30); 2951 } 2952 } 2953 return mpidr; 2954 } 2955 2956 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2957 { 2958 unsigned int cur_el = arm_current_el(env); 2959 bool secure = arm_is_secure(env); 2960 2961 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 2962 return env->cp15.vmpidr_el2; 2963 } 2964 return mpidr_read_val(env); 2965 } 2966 2967 static const ARMCPRegInfo mpidr_cp_reginfo[] = { 2968 { .name = "MPIDR", .state = ARM_CP_STATE_BOTH, 2969 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 2970 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 2971 REGINFO_SENTINEL 2972 }; 2973 2974 static const ARMCPRegInfo lpae_cp_reginfo[] = { 2975 /* NOP AMAIR0/1 */ 2976 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 2977 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 2978 .access = PL1_RW, .type = ARM_CP_CONST, 2979 .resetvalue = 0 }, 2980 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 2981 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 2982 .access = PL1_RW, .type = ARM_CP_CONST, 2983 .resetvalue = 0 }, 2984 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 2985 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 2986 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 2987 offsetof(CPUARMState, cp15.par_ns)} }, 2988 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 2989 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 2990 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 2991 offsetof(CPUARMState, cp15.ttbr0_ns) }, 2992 .writefn = vmsa_ttbr_write, }, 2993 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 2994 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 2995 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 2996 offsetof(CPUARMState, cp15.ttbr1_ns) }, 2997 .writefn = vmsa_ttbr_write, }, 2998 REGINFO_SENTINEL 2999 }; 3000 3001 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3002 { 3003 return vfp_get_fpcr(env); 3004 } 3005 3006 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3007 uint64_t value) 3008 { 3009 vfp_set_fpcr(env, value); 3010 } 3011 3012 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3013 { 3014 return vfp_get_fpsr(env); 3015 } 3016 3017 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3018 uint64_t value) 3019 { 3020 vfp_set_fpsr(env, value); 3021 } 3022 3023 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 3024 bool isread) 3025 { 3026 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { 3027 return CP_ACCESS_TRAP; 3028 } 3029 return CP_ACCESS_OK; 3030 } 3031 3032 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 3033 uint64_t value) 3034 { 3035 env->daif = value & PSTATE_DAIF; 3036 } 3037 3038 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 3039 const ARMCPRegInfo *ri, 3040 bool isread) 3041 { 3042 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 3043 * SCTLR_EL1.UCI is set. 3044 */ 3045 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) { 3046 return CP_ACCESS_TRAP; 3047 } 3048 return CP_ACCESS_OK; 3049 } 3050 3051 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 3052 * Page D4-1736 (DDI0487A.b) 3053 */ 3054 3055 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3056 uint64_t value) 3057 { 3058 CPUState *cs = ENV_GET_CPU(env); 3059 3060 if (arm_is_secure_below_el3(env)) { 3061 tlb_flush_by_mmuidx(cs, 3062 ARMMMUIdxBit_S1SE1 | 3063 ARMMMUIdxBit_S1SE0); 3064 } else { 3065 tlb_flush_by_mmuidx(cs, 3066 ARMMMUIdxBit_S12NSE1 | 3067 ARMMMUIdxBit_S12NSE0); 3068 } 3069 } 3070 3071 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3072 uint64_t value) 3073 { 3074 CPUState *cs = ENV_GET_CPU(env); 3075 bool sec = arm_is_secure_below_el3(env); 3076 3077 if (sec) { 3078 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3079 ARMMMUIdxBit_S1SE1 | 3080 ARMMMUIdxBit_S1SE0); 3081 } else { 3082 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3083 ARMMMUIdxBit_S12NSE1 | 3084 ARMMMUIdxBit_S12NSE0); 3085 } 3086 } 3087 3088 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3089 uint64_t value) 3090 { 3091 /* Note that the 'ALL' scope must invalidate both stage 1 and 3092 * stage 2 translations, whereas most other scopes only invalidate 3093 * stage 1 translations. 3094 */ 3095 ARMCPU *cpu = arm_env_get_cpu(env); 3096 CPUState *cs = CPU(cpu); 3097 3098 if (arm_is_secure_below_el3(env)) { 3099 tlb_flush_by_mmuidx(cs, 3100 ARMMMUIdxBit_S1SE1 | 3101 ARMMMUIdxBit_S1SE0); 3102 } else { 3103 if (arm_feature(env, ARM_FEATURE_EL2)) { 3104 tlb_flush_by_mmuidx(cs, 3105 ARMMMUIdxBit_S12NSE1 | 3106 ARMMMUIdxBit_S12NSE0 | 3107 ARMMMUIdxBit_S2NS); 3108 } else { 3109 tlb_flush_by_mmuidx(cs, 3110 ARMMMUIdxBit_S12NSE1 | 3111 ARMMMUIdxBit_S12NSE0); 3112 } 3113 } 3114 } 3115 3116 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3117 uint64_t value) 3118 { 3119 ARMCPU *cpu = arm_env_get_cpu(env); 3120 CPUState *cs = CPU(cpu); 3121 3122 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 3123 } 3124 3125 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3126 uint64_t value) 3127 { 3128 ARMCPU *cpu = arm_env_get_cpu(env); 3129 CPUState *cs = CPU(cpu); 3130 3131 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); 3132 } 3133 3134 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3135 uint64_t value) 3136 { 3137 /* Note that the 'ALL' scope must invalidate both stage 1 and 3138 * stage 2 translations, whereas most other scopes only invalidate 3139 * stage 1 translations. 3140 */ 3141 CPUState *cs = ENV_GET_CPU(env); 3142 bool sec = arm_is_secure_below_el3(env); 3143 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); 3144 3145 if (sec) { 3146 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3147 ARMMMUIdxBit_S1SE1 | 3148 ARMMMUIdxBit_S1SE0); 3149 } else if (has_el2) { 3150 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3151 ARMMMUIdxBit_S12NSE1 | 3152 ARMMMUIdxBit_S12NSE0 | 3153 ARMMMUIdxBit_S2NS); 3154 } else { 3155 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3156 ARMMMUIdxBit_S12NSE1 | 3157 ARMMMUIdxBit_S12NSE0); 3158 } 3159 } 3160 3161 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3162 uint64_t value) 3163 { 3164 CPUState *cs = ENV_GET_CPU(env); 3165 3166 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 3167 } 3168 3169 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3170 uint64_t value) 3171 { 3172 CPUState *cs = ENV_GET_CPU(env); 3173 3174 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); 3175 } 3176 3177 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3178 uint64_t value) 3179 { 3180 /* Invalidate by VA, EL1&0 (AArch64 version). 3181 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 3182 * since we don't support flush-for-specific-ASID-only or 3183 * flush-last-level-only. 3184 */ 3185 ARMCPU *cpu = arm_env_get_cpu(env); 3186 CPUState *cs = CPU(cpu); 3187 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3188 3189 if (arm_is_secure_below_el3(env)) { 3190 tlb_flush_page_by_mmuidx(cs, pageaddr, 3191 ARMMMUIdxBit_S1SE1 | 3192 ARMMMUIdxBit_S1SE0); 3193 } else { 3194 tlb_flush_page_by_mmuidx(cs, pageaddr, 3195 ARMMMUIdxBit_S12NSE1 | 3196 ARMMMUIdxBit_S12NSE0); 3197 } 3198 } 3199 3200 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3201 uint64_t value) 3202 { 3203 /* Invalidate by VA, EL2 3204 * Currently handles both VAE2 and VALE2, since we don't support 3205 * flush-last-level-only. 3206 */ 3207 ARMCPU *cpu = arm_env_get_cpu(env); 3208 CPUState *cs = CPU(cpu); 3209 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3210 3211 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 3212 } 3213 3214 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3215 uint64_t value) 3216 { 3217 /* Invalidate by VA, EL3 3218 * Currently handles both VAE3 and VALE3, since we don't support 3219 * flush-last-level-only. 3220 */ 3221 ARMCPU *cpu = arm_env_get_cpu(env); 3222 CPUState *cs = CPU(cpu); 3223 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3224 3225 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3); 3226 } 3227 3228 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3229 uint64_t value) 3230 { 3231 ARMCPU *cpu = arm_env_get_cpu(env); 3232 CPUState *cs = CPU(cpu); 3233 bool sec = arm_is_secure_below_el3(env); 3234 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3235 3236 if (sec) { 3237 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3238 ARMMMUIdxBit_S1SE1 | 3239 ARMMMUIdxBit_S1SE0); 3240 } else { 3241 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3242 ARMMMUIdxBit_S12NSE1 | 3243 ARMMMUIdxBit_S12NSE0); 3244 } 3245 } 3246 3247 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3248 uint64_t value) 3249 { 3250 CPUState *cs = ENV_GET_CPU(env); 3251 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3252 3253 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3254 ARMMMUIdxBit_S1E2); 3255 } 3256 3257 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3258 uint64_t value) 3259 { 3260 CPUState *cs = ENV_GET_CPU(env); 3261 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3262 3263 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3264 ARMMMUIdxBit_S1E3); 3265 } 3266 3267 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3268 uint64_t value) 3269 { 3270 /* Invalidate by IPA. This has to invalidate any structures that 3271 * contain only stage 2 translation information, but does not need 3272 * to apply to structures that contain combined stage 1 and stage 2 3273 * translation information. 3274 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 3275 */ 3276 ARMCPU *cpu = arm_env_get_cpu(env); 3277 CPUState *cs = CPU(cpu); 3278 uint64_t pageaddr; 3279 3280 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3281 return; 3282 } 3283 3284 pageaddr = sextract64(value << 12, 0, 48); 3285 3286 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 3287 } 3288 3289 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3290 uint64_t value) 3291 { 3292 CPUState *cs = ENV_GET_CPU(env); 3293 uint64_t pageaddr; 3294 3295 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 3296 return; 3297 } 3298 3299 pageaddr = sextract64(value << 12, 0, 48); 3300 3301 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3302 ARMMMUIdxBit_S2NS); 3303 } 3304 3305 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 3306 bool isread) 3307 { 3308 /* We don't implement EL2, so the only control on DC ZVA is the 3309 * bit in the SCTLR which can prohibit access for EL0. 3310 */ 3311 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 3312 return CP_ACCESS_TRAP; 3313 } 3314 return CP_ACCESS_OK; 3315 } 3316 3317 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 3318 { 3319 ARMCPU *cpu = arm_env_get_cpu(env); 3320 int dzp_bit = 1 << 4; 3321 3322 /* DZP indicates whether DC ZVA access is allowed */ 3323 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 3324 dzp_bit = 0; 3325 } 3326 return cpu->dcz_blocksize | dzp_bit; 3327 } 3328 3329 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 3330 bool isread) 3331 { 3332 if (!(env->pstate & PSTATE_SP)) { 3333 /* Access to SP_EL0 is undefined if it's being used as 3334 * the stack pointer. 3335 */ 3336 return CP_ACCESS_TRAP_UNCATEGORIZED; 3337 } 3338 return CP_ACCESS_OK; 3339 } 3340 3341 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 3342 { 3343 return env->pstate & PSTATE_SP; 3344 } 3345 3346 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 3347 { 3348 update_spsel(env, val); 3349 } 3350 3351 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3352 uint64_t value) 3353 { 3354 ARMCPU *cpu = arm_env_get_cpu(env); 3355 3356 if (raw_read(env, ri) == value) { 3357 /* Skip the TLB flush if nothing actually changed; Linux likes 3358 * to do a lot of pointless SCTLR writes. 3359 */ 3360 return; 3361 } 3362 3363 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 3364 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 3365 value &= ~SCTLR_M; 3366 } 3367 3368 raw_write(env, ri, value); 3369 /* ??? Lots of these bits are not implemented. */ 3370 /* This may enable/disable the MMU, so do a TLB flush. */ 3371 tlb_flush(CPU(cpu)); 3372 } 3373 3374 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 3375 bool isread) 3376 { 3377 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 3378 return CP_ACCESS_TRAP_FP_EL2; 3379 } 3380 if (env->cp15.cptr_el[3] & CPTR_TFP) { 3381 return CP_ACCESS_TRAP_FP_EL3; 3382 } 3383 return CP_ACCESS_OK; 3384 } 3385 3386 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3387 uint64_t value) 3388 { 3389 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 3390 } 3391 3392 static const ARMCPRegInfo v8_cp_reginfo[] = { 3393 /* Minimal set of EL0-visible registers. This will need to be expanded 3394 * significantly for system emulation of AArch64 CPUs. 3395 */ 3396 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 3397 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 3398 .access = PL0_RW, .type = ARM_CP_NZCV }, 3399 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 3400 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 3401 .type = ARM_CP_NO_RAW, 3402 .access = PL0_RW, .accessfn = aa64_daif_access, 3403 .fieldoffset = offsetof(CPUARMState, daif), 3404 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 3405 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 3406 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 3407 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 3408 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 3409 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 3410 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 3411 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 3412 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 3413 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 3414 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 3415 .access = PL0_R, .type = ARM_CP_NO_RAW, 3416 .readfn = aa64_dczid_read }, 3417 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 3418 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 3419 .access = PL0_W, .type = ARM_CP_DC_ZVA, 3420 #ifndef CONFIG_USER_ONLY 3421 /* Avoid overhead of an access check that always passes in user-mode */ 3422 .accessfn = aa64_zva_access, 3423 #endif 3424 }, 3425 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 3426 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 3427 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 3428 /* Cache ops: all NOPs since we don't emulate caches */ 3429 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 3430 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3431 .access = PL1_W, .type = ARM_CP_NOP }, 3432 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 3433 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3434 .access = PL1_W, .type = ARM_CP_NOP }, 3435 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 3436 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 3437 .access = PL0_W, .type = ARM_CP_NOP, 3438 .accessfn = aa64_cacheop_access }, 3439 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 3440 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3441 .access = PL1_W, .type = ARM_CP_NOP }, 3442 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 3443 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3444 .access = PL1_W, .type = ARM_CP_NOP }, 3445 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 3446 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 3447 .access = PL0_W, .type = ARM_CP_NOP, 3448 .accessfn = aa64_cacheop_access }, 3449 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 3450 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3451 .access = PL1_W, .type = ARM_CP_NOP }, 3452 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 3453 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 3454 .access = PL0_W, .type = ARM_CP_NOP, 3455 .accessfn = aa64_cacheop_access }, 3456 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 3457 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 3458 .access = PL0_W, .type = ARM_CP_NOP, 3459 .accessfn = aa64_cacheop_access }, 3460 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 3461 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3462 .access = PL1_W, .type = ARM_CP_NOP }, 3463 /* TLBI operations */ 3464 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 3465 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 3466 .access = PL1_W, .type = ARM_CP_NO_RAW, 3467 .writefn = tlbi_aa64_vmalle1is_write }, 3468 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 3469 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 3470 .access = PL1_W, .type = ARM_CP_NO_RAW, 3471 .writefn = tlbi_aa64_vae1is_write }, 3472 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 3473 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 3474 .access = PL1_W, .type = ARM_CP_NO_RAW, 3475 .writefn = tlbi_aa64_vmalle1is_write }, 3476 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 3477 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 3478 .access = PL1_W, .type = ARM_CP_NO_RAW, 3479 .writefn = tlbi_aa64_vae1is_write }, 3480 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 3481 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3482 .access = PL1_W, .type = ARM_CP_NO_RAW, 3483 .writefn = tlbi_aa64_vae1is_write }, 3484 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 3485 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3486 .access = PL1_W, .type = ARM_CP_NO_RAW, 3487 .writefn = tlbi_aa64_vae1is_write }, 3488 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 3489 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 3490 .access = PL1_W, .type = ARM_CP_NO_RAW, 3491 .writefn = tlbi_aa64_vmalle1_write }, 3492 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 3493 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 3494 .access = PL1_W, .type = ARM_CP_NO_RAW, 3495 .writefn = tlbi_aa64_vae1_write }, 3496 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 3497 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 3498 .access = PL1_W, .type = ARM_CP_NO_RAW, 3499 .writefn = tlbi_aa64_vmalle1_write }, 3500 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 3501 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 3502 .access = PL1_W, .type = ARM_CP_NO_RAW, 3503 .writefn = tlbi_aa64_vae1_write }, 3504 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 3505 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3506 .access = PL1_W, .type = ARM_CP_NO_RAW, 3507 .writefn = tlbi_aa64_vae1_write }, 3508 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 3509 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3510 .access = PL1_W, .type = ARM_CP_NO_RAW, 3511 .writefn = tlbi_aa64_vae1_write }, 3512 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 3513 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3514 .access = PL2_W, .type = ARM_CP_NO_RAW, 3515 .writefn = tlbi_aa64_ipas2e1is_write }, 3516 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 3517 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3518 .access = PL2_W, .type = ARM_CP_NO_RAW, 3519 .writefn = tlbi_aa64_ipas2e1is_write }, 3520 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 3521 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 3522 .access = PL2_W, .type = ARM_CP_NO_RAW, 3523 .writefn = tlbi_aa64_alle1is_write }, 3524 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 3525 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 3526 .access = PL2_W, .type = ARM_CP_NO_RAW, 3527 .writefn = tlbi_aa64_alle1is_write }, 3528 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 3529 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3530 .access = PL2_W, .type = ARM_CP_NO_RAW, 3531 .writefn = tlbi_aa64_ipas2e1_write }, 3532 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 3533 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3534 .access = PL2_W, .type = ARM_CP_NO_RAW, 3535 .writefn = tlbi_aa64_ipas2e1_write }, 3536 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 3537 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 3538 .access = PL2_W, .type = ARM_CP_NO_RAW, 3539 .writefn = tlbi_aa64_alle1_write }, 3540 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 3541 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 3542 .access = PL2_W, .type = ARM_CP_NO_RAW, 3543 .writefn = tlbi_aa64_alle1is_write }, 3544 #ifndef CONFIG_USER_ONLY 3545 /* 64 bit address translation operations */ 3546 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 3547 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 3548 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3549 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 3550 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 3551 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3552 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 3553 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 3554 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3555 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 3556 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 3557 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3558 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 3559 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 3560 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3561 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 3562 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 3563 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3564 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 3565 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 3566 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3567 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 3568 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 3569 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3570 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 3571 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 3572 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 3573 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3574 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 3575 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 3576 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 3577 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 3578 .type = ARM_CP_ALIAS, 3579 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 3580 .access = PL1_RW, .resetvalue = 0, 3581 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 3582 .writefn = par_write }, 3583 #endif 3584 /* TLB invalidate last level of translation table walk */ 3585 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 3586 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 3587 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 3588 .type = ARM_CP_NO_RAW, .access = PL1_W, 3589 .writefn = tlbimvaa_is_write }, 3590 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 3591 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 3592 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 3593 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 3594 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 3595 .type = ARM_CP_NO_RAW, .access = PL2_W, 3596 .writefn = tlbimva_hyp_write }, 3597 { .name = "TLBIMVALHIS", 3598 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 3599 .type = ARM_CP_NO_RAW, .access = PL2_W, 3600 .writefn = tlbimva_hyp_is_write }, 3601 { .name = "TLBIIPAS2", 3602 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 3603 .type = ARM_CP_NO_RAW, .access = PL2_W, 3604 .writefn = tlbiipas2_write }, 3605 { .name = "TLBIIPAS2IS", 3606 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 3607 .type = ARM_CP_NO_RAW, .access = PL2_W, 3608 .writefn = tlbiipas2_is_write }, 3609 { .name = "TLBIIPAS2L", 3610 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 3611 .type = ARM_CP_NO_RAW, .access = PL2_W, 3612 .writefn = tlbiipas2_write }, 3613 { .name = "TLBIIPAS2LIS", 3614 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 3615 .type = ARM_CP_NO_RAW, .access = PL2_W, 3616 .writefn = tlbiipas2_is_write }, 3617 /* 32 bit cache operations */ 3618 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 3619 .type = ARM_CP_NOP, .access = PL1_W }, 3620 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 3621 .type = ARM_CP_NOP, .access = PL1_W }, 3622 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 3623 .type = ARM_CP_NOP, .access = PL1_W }, 3624 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 3625 .type = ARM_CP_NOP, .access = PL1_W }, 3626 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 3627 .type = ARM_CP_NOP, .access = PL1_W }, 3628 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 3629 .type = ARM_CP_NOP, .access = PL1_W }, 3630 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 3631 .type = ARM_CP_NOP, .access = PL1_W }, 3632 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 3633 .type = ARM_CP_NOP, .access = PL1_W }, 3634 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 3635 .type = ARM_CP_NOP, .access = PL1_W }, 3636 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 3637 .type = ARM_CP_NOP, .access = PL1_W }, 3638 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 3639 .type = ARM_CP_NOP, .access = PL1_W }, 3640 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 3641 .type = ARM_CP_NOP, .access = PL1_W }, 3642 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 3643 .type = ARM_CP_NOP, .access = PL1_W }, 3644 /* MMU Domain access control / MPU write buffer control */ 3645 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 3646 .access = PL1_RW, .resetvalue = 0, 3647 .writefn = dacr_write, .raw_writefn = raw_write, 3648 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 3649 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 3650 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 3651 .type = ARM_CP_ALIAS, 3652 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 3653 .access = PL1_RW, 3654 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 3655 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 3656 .type = ARM_CP_ALIAS, 3657 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 3658 .access = PL1_RW, 3659 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 3660 /* We rely on the access checks not allowing the guest to write to the 3661 * state field when SPSel indicates that it's being used as the stack 3662 * pointer. 3663 */ 3664 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 3665 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 3666 .access = PL1_RW, .accessfn = sp_el0_access, 3667 .type = ARM_CP_ALIAS, 3668 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 3669 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 3670 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 3671 .access = PL2_RW, .type = ARM_CP_ALIAS, 3672 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 3673 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 3674 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 3675 .type = ARM_CP_NO_RAW, 3676 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 3677 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 3678 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 3679 .type = ARM_CP_ALIAS, 3680 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 3681 .access = PL2_RW, .accessfn = fpexc32_access }, 3682 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 3683 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 3684 .access = PL2_RW, .resetvalue = 0, 3685 .writefn = dacr_write, .raw_writefn = raw_write, 3686 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 3687 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 3688 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 3689 .access = PL2_RW, .resetvalue = 0, 3690 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 3691 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 3692 .type = ARM_CP_ALIAS, 3693 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 3694 .access = PL2_RW, 3695 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 3696 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 3697 .type = ARM_CP_ALIAS, 3698 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 3699 .access = PL2_RW, 3700 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 3701 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 3702 .type = ARM_CP_ALIAS, 3703 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 3704 .access = PL2_RW, 3705 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 3706 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 3707 .type = ARM_CP_ALIAS, 3708 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 3709 .access = PL2_RW, 3710 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 3711 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 3712 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 3713 .resetvalue = 0, 3714 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 3715 { .name = "SDCR", .type = ARM_CP_ALIAS, 3716 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 3717 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 3718 .writefn = sdcr_write, 3719 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 3720 REGINFO_SENTINEL 3721 }; 3722 3723 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 3724 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 3725 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64, 3726 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3727 .access = PL2_RW, 3728 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3729 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3730 .type = ARM_CP_NO_RAW, 3731 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3732 .access = PL2_RW, 3733 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 3734 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3735 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3736 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3737 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3738 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3739 .access = PL2_RW, .type = ARM_CP_CONST, 3740 .resetvalue = 0 }, 3741 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3742 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3743 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3744 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3745 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3746 .access = PL2_RW, .type = ARM_CP_CONST, 3747 .resetvalue = 0 }, 3748 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3749 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3750 .access = PL2_RW, .type = ARM_CP_CONST, 3751 .resetvalue = 0 }, 3752 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3753 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3754 .access = PL2_RW, .type = ARM_CP_CONST, 3755 .resetvalue = 0 }, 3756 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3757 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3758 .access = PL2_RW, .type = ARM_CP_CONST, 3759 .resetvalue = 0 }, 3760 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3761 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3762 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3763 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 3764 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3765 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3766 .type = ARM_CP_CONST, .resetvalue = 0 }, 3767 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3768 .cp = 15, .opc1 = 6, .crm = 2, 3769 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3770 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 3771 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3772 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3773 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3774 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3775 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3776 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3777 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3778 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3779 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3780 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3781 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3782 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3783 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3784 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3785 .resetvalue = 0 }, 3786 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 3787 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 3788 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3789 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 3790 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 3791 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3792 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 3793 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3794 .resetvalue = 0 }, 3795 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 3796 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 3797 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3798 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 3799 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 3800 .resetvalue = 0 }, 3801 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 3802 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 3803 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3804 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 3805 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 3806 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3807 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 3808 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 3809 .access = PL2_RW, .accessfn = access_tda, 3810 .type = ARM_CP_CONST, .resetvalue = 0 }, 3811 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 3812 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 3813 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 3814 .type = ARM_CP_CONST, .resetvalue = 0 }, 3815 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 3816 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 3817 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 3818 REGINFO_SENTINEL 3819 }; 3820 3821 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3822 { 3823 ARMCPU *cpu = arm_env_get_cpu(env); 3824 uint64_t valid_mask = HCR_MASK; 3825 3826 if (arm_feature(env, ARM_FEATURE_EL3)) { 3827 valid_mask &= ~HCR_HCD; 3828 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 3829 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 3830 * However, if we're using the SMC PSCI conduit then QEMU is 3831 * effectively acting like EL3 firmware and so the guest at 3832 * EL2 should retain the ability to prevent EL1 from being 3833 * able to make SMC calls into the ersatz firmware, so in 3834 * that case HCR.TSC should be read/write. 3835 */ 3836 valid_mask &= ~HCR_TSC; 3837 } 3838 3839 /* Clear RES0 bits. */ 3840 value &= valid_mask; 3841 3842 /* These bits change the MMU setup: 3843 * HCR_VM enables stage 2 translation 3844 * HCR_PTW forbids certain page-table setups 3845 * HCR_DC Disables stage1 and enables stage2 translation 3846 */ 3847 if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 3848 tlb_flush(CPU(cpu)); 3849 } 3850 raw_write(env, ri, value); 3851 } 3852 3853 static const ARMCPRegInfo el2_cp_reginfo[] = { 3854 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 3855 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 3856 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 3857 .writefn = hcr_write }, 3858 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 3859 .type = ARM_CP_ALIAS, 3860 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 3861 .access = PL2_RW, 3862 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 3863 { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64, 3864 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 3865 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 3866 { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64, 3867 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 3868 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 3869 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 3870 .type = ARM_CP_ALIAS, 3871 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 3872 .access = PL2_RW, 3873 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 3874 { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64, 3875 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 3876 .access = PL2_RW, .writefn = vbar_write, 3877 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 3878 .resetvalue = 0 }, 3879 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 3880 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 3881 .access = PL3_RW, .type = ARM_CP_ALIAS, 3882 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 3883 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 3884 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 3885 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 3886 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) }, 3887 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 3888 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 3889 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 3890 .resetvalue = 0 }, 3891 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3892 .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 3893 .access = PL2_RW, .type = ARM_CP_ALIAS, 3894 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 3895 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 3896 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 3897 .access = PL2_RW, .type = ARM_CP_CONST, 3898 .resetvalue = 0 }, 3899 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 3900 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 3901 .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 3902 .access = PL2_RW, .type = ARM_CP_CONST, 3903 .resetvalue = 0 }, 3904 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 3905 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 3906 .access = PL2_RW, .type = ARM_CP_CONST, 3907 .resetvalue = 0 }, 3908 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 3909 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 3910 .access = PL2_RW, .type = ARM_CP_CONST, 3911 .resetvalue = 0 }, 3912 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 3913 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 3914 .access = PL2_RW, 3915 /* no .writefn needed as this can't cause an ASID change; 3916 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3917 */ 3918 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 3919 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 3920 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3921 .type = ARM_CP_ALIAS, 3922 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3923 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 3924 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 3925 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 3926 .access = PL2_RW, 3927 /* no .writefn needed as this can't cause an ASID change; 3928 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 3929 */ 3930 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 3931 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 3932 .cp = 15, .opc1 = 6, .crm = 2, 3933 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3934 .access = PL2_RW, .accessfn = access_el3_aa32ns, 3935 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 3936 .writefn = vttbr_write }, 3937 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 3938 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 3939 .access = PL2_RW, .writefn = vttbr_write, 3940 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 3941 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 3942 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 3943 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 3944 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 3945 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 3946 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 3947 .access = PL2_RW, .resetvalue = 0, 3948 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 3949 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 3950 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 3951 .access = PL2_RW, .resetvalue = 0, 3952 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 3953 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 3954 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3955 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 3956 { .name = "TLBIALLNSNH", 3957 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 3958 .type = ARM_CP_NO_RAW, .access = PL2_W, 3959 .writefn = tlbiall_nsnh_write }, 3960 { .name = "TLBIALLNSNHIS", 3961 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 3962 .type = ARM_CP_NO_RAW, .access = PL2_W, 3963 .writefn = tlbiall_nsnh_is_write }, 3964 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 3965 .type = ARM_CP_NO_RAW, .access = PL2_W, 3966 .writefn = tlbiall_hyp_write }, 3967 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 3968 .type = ARM_CP_NO_RAW, .access = PL2_W, 3969 .writefn = tlbiall_hyp_is_write }, 3970 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 3971 .type = ARM_CP_NO_RAW, .access = PL2_W, 3972 .writefn = tlbimva_hyp_write }, 3973 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 3974 .type = ARM_CP_NO_RAW, .access = PL2_W, 3975 .writefn = tlbimva_hyp_is_write }, 3976 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 3977 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 3978 .type = ARM_CP_NO_RAW, .access = PL2_W, 3979 .writefn = tlbi_aa64_alle2_write }, 3980 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 3981 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 3982 .type = ARM_CP_NO_RAW, .access = PL2_W, 3983 .writefn = tlbi_aa64_vae2_write }, 3984 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 3985 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 3986 .access = PL2_W, .type = ARM_CP_NO_RAW, 3987 .writefn = tlbi_aa64_vae2_write }, 3988 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 3989 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 3990 .access = PL2_W, .type = ARM_CP_NO_RAW, 3991 .writefn = tlbi_aa64_alle2is_write }, 3992 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 3993 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 3994 .type = ARM_CP_NO_RAW, .access = PL2_W, 3995 .writefn = tlbi_aa64_vae2is_write }, 3996 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 3997 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 3998 .access = PL2_W, .type = ARM_CP_NO_RAW, 3999 .writefn = tlbi_aa64_vae2is_write }, 4000 #ifndef CONFIG_USER_ONLY 4001 /* Unlike the other EL2-related AT operations, these must 4002 * UNDEF from EL3 if EL2 is not implemented, which is why we 4003 * define them here rather than with the rest of the AT ops. 4004 */ 4005 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 4006 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4007 .access = PL2_W, .accessfn = at_s1e2_access, 4008 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4009 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 4010 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4011 .access = PL2_W, .accessfn = at_s1e2_access, 4012 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4013 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 4014 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 4015 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 4016 * to behave as if SCR.NS was 1. 4017 */ 4018 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4019 .access = PL2_W, 4020 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4021 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4022 .access = PL2_W, 4023 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4024 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4025 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4026 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 4027 * reset values as IMPDEF. We choose to reset to 3 to comply with 4028 * both ARMv7 and ARMv8. 4029 */ 4030 .access = PL2_RW, .resetvalue = 3, 4031 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 4032 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4033 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 4034 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 4035 .writefn = gt_cntvoff_write, 4036 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4037 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 4038 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 4039 .writefn = gt_cntvoff_write, 4040 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4041 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 4042 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 4043 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4044 .type = ARM_CP_IO, .access = PL2_RW, 4045 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4046 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 4047 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4048 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 4049 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4050 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4051 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4052 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 4053 .resetfn = gt_hyp_timer_reset, 4054 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 4055 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4056 .type = ARM_CP_IO, 4057 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4058 .access = PL2_RW, 4059 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 4060 .resetvalue = 0, 4061 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 4062 #endif 4063 /* The only field of MDCR_EL2 that has a defined architectural reset value 4064 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 4065 * don't impelment any PMU event counters, so using zero as a reset 4066 * value for MDCR_EL2 is okay 4067 */ 4068 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4069 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4070 .access = PL2_RW, .resetvalue = 0, 4071 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 4072 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 4073 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4074 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4075 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4076 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 4077 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4078 .access = PL2_RW, 4079 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4080 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4081 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4082 .access = PL2_RW, 4083 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 4084 REGINFO_SENTINEL 4085 }; 4086 4087 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 4088 bool isread) 4089 { 4090 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 4091 * At Secure EL1 it traps to EL3. 4092 */ 4093 if (arm_current_el(env) == 3) { 4094 return CP_ACCESS_OK; 4095 } 4096 if (arm_is_secure_below_el3(env)) { 4097 return CP_ACCESS_TRAP_EL3; 4098 } 4099 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 4100 if (isread) { 4101 return CP_ACCESS_OK; 4102 } 4103 return CP_ACCESS_TRAP_UNCATEGORIZED; 4104 } 4105 4106 static const ARMCPRegInfo el3_cp_reginfo[] = { 4107 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 4108 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 4109 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 4110 .resetvalue = 0, .writefn = scr_write }, 4111 { .name = "SCR", .type = ARM_CP_ALIAS, 4112 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 4113 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4114 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 4115 .writefn = scr_write }, 4116 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 4117 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 4118 .access = PL3_RW, .resetvalue = 0, 4119 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 4120 { .name = "SDER", 4121 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 4122 .access = PL3_RW, .resetvalue = 0, 4123 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 4124 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 4125 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4126 .writefn = vbar_write, .resetvalue = 0, 4127 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 4128 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 4129 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 4130 .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 4131 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 4132 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 4133 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 4134 .access = PL3_RW, 4135 /* no .writefn needed as this can't cause an ASID change; 4136 * we must provide a .raw_writefn and .resetfn because we handle 4137 * reset and migration for the AArch32 TTBCR(S), which might be 4138 * using mask and base_mask. 4139 */ 4140 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 4141 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 4142 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 4143 .type = ARM_CP_ALIAS, 4144 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 4145 .access = PL3_RW, 4146 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 4147 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 4148 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 4149 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 4150 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 4151 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 4152 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 4153 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 4154 .type = ARM_CP_ALIAS, 4155 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 4156 .access = PL3_RW, 4157 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 4158 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 4159 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 4160 .access = PL3_RW, .writefn = vbar_write, 4161 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 4162 .resetvalue = 0 }, 4163 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 4164 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 4165 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 4166 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 4167 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 4168 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 4169 .access = PL3_RW, .resetvalue = 0, 4170 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 4171 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 4172 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 4173 .access = PL3_RW, .type = ARM_CP_CONST, 4174 .resetvalue = 0 }, 4175 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 4176 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 4177 .access = PL3_RW, .type = ARM_CP_CONST, 4178 .resetvalue = 0 }, 4179 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 4180 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 4181 .access = PL3_RW, .type = ARM_CP_CONST, 4182 .resetvalue = 0 }, 4183 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 4184 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 4185 .access = PL3_W, .type = ARM_CP_NO_RAW, 4186 .writefn = tlbi_aa64_alle3is_write }, 4187 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 4188 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 4189 .access = PL3_W, .type = ARM_CP_NO_RAW, 4190 .writefn = tlbi_aa64_vae3is_write }, 4191 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 4192 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 4193 .access = PL3_W, .type = ARM_CP_NO_RAW, 4194 .writefn = tlbi_aa64_vae3is_write }, 4195 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 4196 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 4197 .access = PL3_W, .type = ARM_CP_NO_RAW, 4198 .writefn = tlbi_aa64_alle3_write }, 4199 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 4200 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 4201 .access = PL3_W, .type = ARM_CP_NO_RAW, 4202 .writefn = tlbi_aa64_vae3_write }, 4203 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 4204 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 4205 .access = PL3_W, .type = ARM_CP_NO_RAW, 4206 .writefn = tlbi_aa64_vae3_write }, 4207 REGINFO_SENTINEL 4208 }; 4209 4210 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4211 bool isread) 4212 { 4213 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, 4214 * but the AArch32 CTR has its own reginfo struct) 4215 */ 4216 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 4217 return CP_ACCESS_TRAP; 4218 } 4219 return CP_ACCESS_OK; 4220 } 4221 4222 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4223 uint64_t value) 4224 { 4225 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 4226 * read via a bit in OSLSR_EL1. 4227 */ 4228 int oslock; 4229 4230 if (ri->state == ARM_CP_STATE_AA32) { 4231 oslock = (value == 0xC5ACCE55); 4232 } else { 4233 oslock = value & 1; 4234 } 4235 4236 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 4237 } 4238 4239 static const ARMCPRegInfo debug_cp_reginfo[] = { 4240 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 4241 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 4242 * unlike DBGDRAR it is never accessible from EL0. 4243 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 4244 * accessor. 4245 */ 4246 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 4247 .access = PL0_R, .accessfn = access_tdra, 4248 .type = ARM_CP_CONST, .resetvalue = 0 }, 4249 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 4250 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 4251 .access = PL1_R, .accessfn = access_tdra, 4252 .type = ARM_CP_CONST, .resetvalue = 0 }, 4253 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 4254 .access = PL0_R, .accessfn = access_tdra, 4255 .type = ARM_CP_CONST, .resetvalue = 0 }, 4256 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 4257 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 4258 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4259 .access = PL1_RW, .accessfn = access_tda, 4260 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 4261 .resetvalue = 0 }, 4262 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 4263 * We don't implement the configurable EL0 access. 4264 */ 4265 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 4266 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4267 .type = ARM_CP_ALIAS, 4268 .access = PL1_R, .accessfn = access_tda, 4269 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 4270 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 4271 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 4272 .access = PL1_W, .type = ARM_CP_NO_RAW, 4273 .accessfn = access_tdosa, 4274 .writefn = oslar_write }, 4275 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 4276 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 4277 .access = PL1_R, .resetvalue = 10, 4278 .accessfn = access_tdosa, 4279 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 4280 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 4281 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 4282 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 4283 .access = PL1_RW, .accessfn = access_tdosa, 4284 .type = ARM_CP_NOP }, 4285 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 4286 * implement vector catch debug events yet. 4287 */ 4288 { .name = "DBGVCR", 4289 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 4290 .access = PL1_RW, .accessfn = access_tda, 4291 .type = ARM_CP_NOP }, 4292 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 4293 * to save and restore a 32-bit guest's DBGVCR) 4294 */ 4295 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 4296 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 4297 .access = PL2_RW, .accessfn = access_tda, 4298 .type = ARM_CP_NOP }, 4299 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 4300 * Channel but Linux may try to access this register. The 32-bit 4301 * alias is DBGDCCINT. 4302 */ 4303 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 4304 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4305 .access = PL1_RW, .accessfn = access_tda, 4306 .type = ARM_CP_NOP }, 4307 REGINFO_SENTINEL 4308 }; 4309 4310 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 4311 /* 64 bit access versions of the (dummy) debug registers */ 4312 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 4313 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4314 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 4315 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 4316 REGINFO_SENTINEL 4317 }; 4318 4319 /* Return the exception level to which SVE-disabled exceptions should 4320 * be taken, or 0 if SVE is enabled. 4321 */ 4322 static int sve_exception_el(CPUARMState *env) 4323 { 4324 #ifndef CONFIG_USER_ONLY 4325 unsigned current_el = arm_current_el(env); 4326 4327 /* The CPACR.ZEN controls traps to EL1: 4328 * 0, 2 : trap EL0 and EL1 accesses 4329 * 1 : trap only EL0 accesses 4330 * 3 : trap no accesses 4331 */ 4332 switch (extract32(env->cp15.cpacr_el1, 16, 2)) { 4333 default: 4334 if (current_el <= 1) { 4335 /* Trap to PL1, which might be EL1 or EL3 */ 4336 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 4337 return 3; 4338 } 4339 return 1; 4340 } 4341 break; 4342 case 1: 4343 if (current_el == 0) { 4344 return 1; 4345 } 4346 break; 4347 case 3: 4348 break; 4349 } 4350 4351 /* Similarly for CPACR.FPEN, after having checked ZEN. */ 4352 switch (extract32(env->cp15.cpacr_el1, 20, 2)) { 4353 default: 4354 if (current_el <= 1) { 4355 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 4356 return 3; 4357 } 4358 return 1; 4359 } 4360 break; 4361 case 1: 4362 if (current_el == 0) { 4363 return 1; 4364 } 4365 break; 4366 case 3: 4367 break; 4368 } 4369 4370 /* CPTR_EL2. Check both TZ and TFP. */ 4371 if (current_el <= 2 4372 && (env->cp15.cptr_el[2] & (CPTR_TFP | CPTR_TZ)) 4373 && !arm_is_secure_below_el3(env)) { 4374 return 2; 4375 } 4376 4377 /* CPTR_EL3. Check both EZ and TFP. */ 4378 if (!(env->cp15.cptr_el[3] & CPTR_EZ) 4379 || (env->cp15.cptr_el[3] & CPTR_TFP)) { 4380 return 3; 4381 } 4382 #endif 4383 return 0; 4384 } 4385 4386 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4387 uint64_t value) 4388 { 4389 /* Bits other than [3:0] are RAZ/WI. */ 4390 raw_write(env, ri, value & 0xf); 4391 } 4392 4393 static const ARMCPRegInfo zcr_el1_reginfo = { 4394 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 4395 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 4396 .access = PL1_RW, .type = ARM_CP_SVE | ARM_CP_FPU, 4397 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 4398 .writefn = zcr_write, .raw_writefn = raw_write 4399 }; 4400 4401 static const ARMCPRegInfo zcr_el2_reginfo = { 4402 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4403 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4404 .access = PL2_RW, .type = ARM_CP_SVE | ARM_CP_FPU, 4405 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 4406 .writefn = zcr_write, .raw_writefn = raw_write 4407 }; 4408 4409 static const ARMCPRegInfo zcr_no_el2_reginfo = { 4410 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 4411 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 4412 .access = PL2_RW, .type = ARM_CP_SVE | ARM_CP_FPU, 4413 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 4414 }; 4415 4416 static const ARMCPRegInfo zcr_el3_reginfo = { 4417 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 4418 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 4419 .access = PL3_RW, .type = ARM_CP_SVE | ARM_CP_FPU, 4420 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 4421 .writefn = zcr_write, .raw_writefn = raw_write 4422 }; 4423 4424 void hw_watchpoint_update(ARMCPU *cpu, int n) 4425 { 4426 CPUARMState *env = &cpu->env; 4427 vaddr len = 0; 4428 vaddr wvr = env->cp15.dbgwvr[n]; 4429 uint64_t wcr = env->cp15.dbgwcr[n]; 4430 int mask; 4431 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 4432 4433 if (env->cpu_watchpoint[n]) { 4434 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 4435 env->cpu_watchpoint[n] = NULL; 4436 } 4437 4438 if (!extract64(wcr, 0, 1)) { 4439 /* E bit clear : watchpoint disabled */ 4440 return; 4441 } 4442 4443 switch (extract64(wcr, 3, 2)) { 4444 case 0: 4445 /* LSC 00 is reserved and must behave as if the wp is disabled */ 4446 return; 4447 case 1: 4448 flags |= BP_MEM_READ; 4449 break; 4450 case 2: 4451 flags |= BP_MEM_WRITE; 4452 break; 4453 case 3: 4454 flags |= BP_MEM_ACCESS; 4455 break; 4456 } 4457 4458 /* Attempts to use both MASK and BAS fields simultaneously are 4459 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 4460 * thus generating a watchpoint for every byte in the masked region. 4461 */ 4462 mask = extract64(wcr, 24, 4); 4463 if (mask == 1 || mask == 2) { 4464 /* Reserved values of MASK; we must act as if the mask value was 4465 * some non-reserved value, or as if the watchpoint were disabled. 4466 * We choose the latter. 4467 */ 4468 return; 4469 } else if (mask) { 4470 /* Watchpoint covers an aligned area up to 2GB in size */ 4471 len = 1ULL << mask; 4472 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 4473 * whether the watchpoint fires when the unmasked bits match; we opt 4474 * to generate the exceptions. 4475 */ 4476 wvr &= ~(len - 1); 4477 } else { 4478 /* Watchpoint covers bytes defined by the byte address select bits */ 4479 int bas = extract64(wcr, 5, 8); 4480 int basstart; 4481 4482 if (bas == 0) { 4483 /* This must act as if the watchpoint is disabled */ 4484 return; 4485 } 4486 4487 if (extract64(wvr, 2, 1)) { 4488 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 4489 * ignored, and BAS[3:0] define which bytes to watch. 4490 */ 4491 bas &= 0xf; 4492 } 4493 /* The BAS bits are supposed to be programmed to indicate a contiguous 4494 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 4495 * we fire for each byte in the word/doubleword addressed by the WVR. 4496 * We choose to ignore any non-zero bits after the first range of 1s. 4497 */ 4498 basstart = ctz32(bas); 4499 len = cto32(bas >> basstart); 4500 wvr += basstart; 4501 } 4502 4503 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 4504 &env->cpu_watchpoint[n]); 4505 } 4506 4507 void hw_watchpoint_update_all(ARMCPU *cpu) 4508 { 4509 int i; 4510 CPUARMState *env = &cpu->env; 4511 4512 /* Completely clear out existing QEMU watchpoints and our array, to 4513 * avoid possible stale entries following migration load. 4514 */ 4515 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 4516 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 4517 4518 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 4519 hw_watchpoint_update(cpu, i); 4520 } 4521 } 4522 4523 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4524 uint64_t value) 4525 { 4526 ARMCPU *cpu = arm_env_get_cpu(env); 4527 int i = ri->crm; 4528 4529 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 4530 * register reads and behaves as if values written are sign extended. 4531 * Bits [1:0] are RES0. 4532 */ 4533 value = sextract64(value, 0, 49) & ~3ULL; 4534 4535 raw_write(env, ri, value); 4536 hw_watchpoint_update(cpu, i); 4537 } 4538 4539 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4540 uint64_t value) 4541 { 4542 ARMCPU *cpu = arm_env_get_cpu(env); 4543 int i = ri->crm; 4544 4545 raw_write(env, ri, value); 4546 hw_watchpoint_update(cpu, i); 4547 } 4548 4549 void hw_breakpoint_update(ARMCPU *cpu, int n) 4550 { 4551 CPUARMState *env = &cpu->env; 4552 uint64_t bvr = env->cp15.dbgbvr[n]; 4553 uint64_t bcr = env->cp15.dbgbcr[n]; 4554 vaddr addr; 4555 int bt; 4556 int flags = BP_CPU; 4557 4558 if (env->cpu_breakpoint[n]) { 4559 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 4560 env->cpu_breakpoint[n] = NULL; 4561 } 4562 4563 if (!extract64(bcr, 0, 1)) { 4564 /* E bit clear : watchpoint disabled */ 4565 return; 4566 } 4567 4568 bt = extract64(bcr, 20, 4); 4569 4570 switch (bt) { 4571 case 4: /* unlinked address mismatch (reserved if AArch64) */ 4572 case 5: /* linked address mismatch (reserved if AArch64) */ 4573 qemu_log_mask(LOG_UNIMP, 4574 "arm: address mismatch breakpoint types not implemented\n"); 4575 return; 4576 case 0: /* unlinked address match */ 4577 case 1: /* linked address match */ 4578 { 4579 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 4580 * we behave as if the register was sign extended. Bits [1:0] are 4581 * RES0. The BAS field is used to allow setting breakpoints on 16 4582 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 4583 * a bp will fire if the addresses covered by the bp and the addresses 4584 * covered by the insn overlap but the insn doesn't start at the 4585 * start of the bp address range. We choose to require the insn and 4586 * the bp to have the same address. The constraints on writing to 4587 * BAS enforced in dbgbcr_write mean we have only four cases: 4588 * 0b0000 => no breakpoint 4589 * 0b0011 => breakpoint on addr 4590 * 0b1100 => breakpoint on addr + 2 4591 * 0b1111 => breakpoint on addr 4592 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 4593 */ 4594 int bas = extract64(bcr, 5, 4); 4595 addr = sextract64(bvr, 0, 49) & ~3ULL; 4596 if (bas == 0) { 4597 return; 4598 } 4599 if (bas == 0xc) { 4600 addr += 2; 4601 } 4602 break; 4603 } 4604 case 2: /* unlinked context ID match */ 4605 case 8: /* unlinked VMID match (reserved if no EL2) */ 4606 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 4607 qemu_log_mask(LOG_UNIMP, 4608 "arm: unlinked context breakpoint types not implemented\n"); 4609 return; 4610 case 9: /* linked VMID match (reserved if no EL2) */ 4611 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 4612 case 3: /* linked context ID match */ 4613 default: 4614 /* We must generate no events for Linked context matches (unless 4615 * they are linked to by some other bp/wp, which is handled in 4616 * updates for the linking bp/wp). We choose to also generate no events 4617 * for reserved values. 4618 */ 4619 return; 4620 } 4621 4622 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 4623 } 4624 4625 void hw_breakpoint_update_all(ARMCPU *cpu) 4626 { 4627 int i; 4628 CPUARMState *env = &cpu->env; 4629 4630 /* Completely clear out existing QEMU breakpoints and our array, to 4631 * avoid possible stale entries following migration load. 4632 */ 4633 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 4634 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 4635 4636 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 4637 hw_breakpoint_update(cpu, i); 4638 } 4639 } 4640 4641 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4642 uint64_t value) 4643 { 4644 ARMCPU *cpu = arm_env_get_cpu(env); 4645 int i = ri->crm; 4646 4647 raw_write(env, ri, value); 4648 hw_breakpoint_update(cpu, i); 4649 } 4650 4651 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4652 uint64_t value) 4653 { 4654 ARMCPU *cpu = arm_env_get_cpu(env); 4655 int i = ri->crm; 4656 4657 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 4658 * copy of BAS[0]. 4659 */ 4660 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 4661 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 4662 4663 raw_write(env, ri, value); 4664 hw_breakpoint_update(cpu, i); 4665 } 4666 4667 static void define_debug_regs(ARMCPU *cpu) 4668 { 4669 /* Define v7 and v8 architectural debug registers. 4670 * These are just dummy implementations for now. 4671 */ 4672 int i; 4673 int wrps, brps, ctx_cmps; 4674 ARMCPRegInfo dbgdidr = { 4675 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 4676 .access = PL0_R, .accessfn = access_tda, 4677 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, 4678 }; 4679 4680 /* Note that all these register fields hold "number of Xs minus 1". */ 4681 brps = extract32(cpu->dbgdidr, 24, 4); 4682 wrps = extract32(cpu->dbgdidr, 28, 4); 4683 ctx_cmps = extract32(cpu->dbgdidr, 20, 4); 4684 4685 assert(ctx_cmps <= brps); 4686 4687 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties 4688 * of the debug registers such as number of breakpoints; 4689 * check that if they both exist then they agree. 4690 */ 4691 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 4692 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); 4693 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); 4694 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); 4695 } 4696 4697 define_one_arm_cp_reg(cpu, &dbgdidr); 4698 define_arm_cp_regs(cpu, debug_cp_reginfo); 4699 4700 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 4701 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 4702 } 4703 4704 for (i = 0; i < brps + 1; i++) { 4705 ARMCPRegInfo dbgregs[] = { 4706 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 4707 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 4708 .access = PL1_RW, .accessfn = access_tda, 4709 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 4710 .writefn = dbgbvr_write, .raw_writefn = raw_write 4711 }, 4712 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 4713 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 4714 .access = PL1_RW, .accessfn = access_tda, 4715 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 4716 .writefn = dbgbcr_write, .raw_writefn = raw_write 4717 }, 4718 REGINFO_SENTINEL 4719 }; 4720 define_arm_cp_regs(cpu, dbgregs); 4721 } 4722 4723 for (i = 0; i < wrps + 1; i++) { 4724 ARMCPRegInfo dbgregs[] = { 4725 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 4726 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 4727 .access = PL1_RW, .accessfn = access_tda, 4728 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 4729 .writefn = dbgwvr_write, .raw_writefn = raw_write 4730 }, 4731 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 4732 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 4733 .access = PL1_RW, .accessfn = access_tda, 4734 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 4735 .writefn = dbgwcr_write, .raw_writefn = raw_write 4736 }, 4737 REGINFO_SENTINEL 4738 }; 4739 define_arm_cp_regs(cpu, dbgregs); 4740 } 4741 } 4742 4743 /* We don't know until after realize whether there's a GICv3 4744 * attached, and that is what registers the gicv3 sysregs. 4745 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 4746 * at runtime. 4747 */ 4748 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 4749 { 4750 ARMCPU *cpu = arm_env_get_cpu(env); 4751 uint64_t pfr1 = cpu->id_pfr1; 4752 4753 if (env->gicv3state) { 4754 pfr1 |= 1 << 28; 4755 } 4756 return pfr1; 4757 } 4758 4759 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 4760 { 4761 ARMCPU *cpu = arm_env_get_cpu(env); 4762 uint64_t pfr0 = cpu->id_aa64pfr0; 4763 4764 if (env->gicv3state) { 4765 pfr0 |= 1 << 24; 4766 } 4767 return pfr0; 4768 } 4769 4770 void register_cp_regs_for_features(ARMCPU *cpu) 4771 { 4772 /* Register all the coprocessor registers based on feature bits */ 4773 CPUARMState *env = &cpu->env; 4774 if (arm_feature(env, ARM_FEATURE_M)) { 4775 /* M profile has no coprocessor registers */ 4776 return; 4777 } 4778 4779 define_arm_cp_regs(cpu, cp_reginfo); 4780 if (!arm_feature(env, ARM_FEATURE_V8)) { 4781 /* Must go early as it is full of wildcards that may be 4782 * overridden by later definitions. 4783 */ 4784 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 4785 } 4786 4787 if (arm_feature(env, ARM_FEATURE_V6)) { 4788 /* The ID registers all have impdef reset values */ 4789 ARMCPRegInfo v6_idregs[] = { 4790 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 4791 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 4792 .access = PL1_R, .type = ARM_CP_CONST, 4793 .resetvalue = cpu->id_pfr0 }, 4794 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 4795 * the value of the GIC field until after we define these regs. 4796 */ 4797 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 4798 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 4799 .access = PL1_R, .type = ARM_CP_NO_RAW, 4800 .readfn = id_pfr1_read, 4801 .writefn = arm_cp_write_ignore }, 4802 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 4803 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 4804 .access = PL1_R, .type = ARM_CP_CONST, 4805 .resetvalue = cpu->id_dfr0 }, 4806 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 4807 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 4808 .access = PL1_R, .type = ARM_CP_CONST, 4809 .resetvalue = cpu->id_afr0 }, 4810 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 4811 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 4812 .access = PL1_R, .type = ARM_CP_CONST, 4813 .resetvalue = cpu->id_mmfr0 }, 4814 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 4815 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 4816 .access = PL1_R, .type = ARM_CP_CONST, 4817 .resetvalue = cpu->id_mmfr1 }, 4818 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 4819 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 4820 .access = PL1_R, .type = ARM_CP_CONST, 4821 .resetvalue = cpu->id_mmfr2 }, 4822 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 4823 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 4824 .access = PL1_R, .type = ARM_CP_CONST, 4825 .resetvalue = cpu->id_mmfr3 }, 4826 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 4827 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 4828 .access = PL1_R, .type = ARM_CP_CONST, 4829 .resetvalue = cpu->id_isar0 }, 4830 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 4831 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 4832 .access = PL1_R, .type = ARM_CP_CONST, 4833 .resetvalue = cpu->id_isar1 }, 4834 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 4835 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 4836 .access = PL1_R, .type = ARM_CP_CONST, 4837 .resetvalue = cpu->id_isar2 }, 4838 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 4839 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 4840 .access = PL1_R, .type = ARM_CP_CONST, 4841 .resetvalue = cpu->id_isar3 }, 4842 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 4843 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 4844 .access = PL1_R, .type = ARM_CP_CONST, 4845 .resetvalue = cpu->id_isar4 }, 4846 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 4847 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 4848 .access = PL1_R, .type = ARM_CP_CONST, 4849 .resetvalue = cpu->id_isar5 }, 4850 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 4851 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 4852 .access = PL1_R, .type = ARM_CP_CONST, 4853 .resetvalue = cpu->id_mmfr4 }, 4854 /* 7 is as yet unallocated and must RAZ */ 4855 { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH, 4856 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 4857 .access = PL1_R, .type = ARM_CP_CONST, 4858 .resetvalue = 0 }, 4859 REGINFO_SENTINEL 4860 }; 4861 define_arm_cp_regs(cpu, v6_idregs); 4862 define_arm_cp_regs(cpu, v6_cp_reginfo); 4863 } else { 4864 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 4865 } 4866 if (arm_feature(env, ARM_FEATURE_V6K)) { 4867 define_arm_cp_regs(cpu, v6k_cp_reginfo); 4868 } 4869 if (arm_feature(env, ARM_FEATURE_V7MP) && 4870 !arm_feature(env, ARM_FEATURE_PMSA)) { 4871 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 4872 } 4873 if (arm_feature(env, ARM_FEATURE_V7)) { 4874 /* v7 performance monitor control register: same implementor 4875 * field as main ID register, and we implement only the cycle 4876 * count register. 4877 */ 4878 #ifndef CONFIG_USER_ONLY 4879 ARMCPRegInfo pmcr = { 4880 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 4881 .access = PL0_RW, 4882 .type = ARM_CP_IO | ARM_CP_ALIAS, 4883 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 4884 .accessfn = pmreg_access, .writefn = pmcr_write, 4885 .raw_writefn = raw_write, 4886 }; 4887 ARMCPRegInfo pmcr64 = { 4888 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 4889 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 4890 .access = PL0_RW, .accessfn = pmreg_access, 4891 .type = ARM_CP_IO, 4892 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 4893 .resetvalue = cpu->midr & 0xff000000, 4894 .writefn = pmcr_write, .raw_writefn = raw_write, 4895 }; 4896 define_one_arm_cp_reg(cpu, &pmcr); 4897 define_one_arm_cp_reg(cpu, &pmcr64); 4898 #endif 4899 ARMCPRegInfo clidr = { 4900 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 4901 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 4902 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr 4903 }; 4904 define_one_arm_cp_reg(cpu, &clidr); 4905 define_arm_cp_regs(cpu, v7_cp_reginfo); 4906 define_debug_regs(cpu); 4907 } else { 4908 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 4909 } 4910 if (arm_feature(env, ARM_FEATURE_V8)) { 4911 /* AArch64 ID registers, which all have impdef reset values. 4912 * Note that within the ID register ranges the unused slots 4913 * must all RAZ, not UNDEF; future architecture versions may 4914 * define new registers here. 4915 */ 4916 ARMCPRegInfo v8_idregs[] = { 4917 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 4918 * know the right value for the GIC field until after we 4919 * define these regs. 4920 */ 4921 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 4922 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 4923 .access = PL1_R, .type = ARM_CP_NO_RAW, 4924 .readfn = id_aa64pfr0_read, 4925 .writefn = arm_cp_write_ignore }, 4926 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 4927 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 4928 .access = PL1_R, .type = ARM_CP_CONST, 4929 .resetvalue = cpu->id_aa64pfr1}, 4930 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4931 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 4932 .access = PL1_R, .type = ARM_CP_CONST, 4933 .resetvalue = 0 }, 4934 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4935 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 4936 .access = PL1_R, .type = ARM_CP_CONST, 4937 .resetvalue = 0 }, 4938 { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4939 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 4940 .access = PL1_R, .type = ARM_CP_CONST, 4941 .resetvalue = 0 }, 4942 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4943 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 4944 .access = PL1_R, .type = ARM_CP_CONST, 4945 .resetvalue = 0 }, 4946 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4947 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 4948 .access = PL1_R, .type = ARM_CP_CONST, 4949 .resetvalue = 0 }, 4950 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4951 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 4952 .access = PL1_R, .type = ARM_CP_CONST, 4953 .resetvalue = 0 }, 4954 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 4955 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 4956 .access = PL1_R, .type = ARM_CP_CONST, 4957 .resetvalue = cpu->id_aa64dfr0 }, 4958 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 4959 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 4960 .access = PL1_R, .type = ARM_CP_CONST, 4961 .resetvalue = cpu->id_aa64dfr1 }, 4962 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4963 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 4964 .access = PL1_R, .type = ARM_CP_CONST, 4965 .resetvalue = 0 }, 4966 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4967 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 4968 .access = PL1_R, .type = ARM_CP_CONST, 4969 .resetvalue = 0 }, 4970 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 4971 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 4972 .access = PL1_R, .type = ARM_CP_CONST, 4973 .resetvalue = cpu->id_aa64afr0 }, 4974 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 4975 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 4976 .access = PL1_R, .type = ARM_CP_CONST, 4977 .resetvalue = cpu->id_aa64afr1 }, 4978 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4979 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 4980 .access = PL1_R, .type = ARM_CP_CONST, 4981 .resetvalue = 0 }, 4982 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4983 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 4984 .access = PL1_R, .type = ARM_CP_CONST, 4985 .resetvalue = 0 }, 4986 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 4987 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 4988 .access = PL1_R, .type = ARM_CP_CONST, 4989 .resetvalue = cpu->id_aa64isar0 }, 4990 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 4991 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 4992 .access = PL1_R, .type = ARM_CP_CONST, 4993 .resetvalue = cpu->id_aa64isar1 }, 4994 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4995 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 4996 .access = PL1_R, .type = ARM_CP_CONST, 4997 .resetvalue = 0 }, 4998 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 4999 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 5000 .access = PL1_R, .type = ARM_CP_CONST, 5001 .resetvalue = 0 }, 5002 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5003 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 5004 .access = PL1_R, .type = ARM_CP_CONST, 5005 .resetvalue = 0 }, 5006 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5007 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 5008 .access = PL1_R, .type = ARM_CP_CONST, 5009 .resetvalue = 0 }, 5010 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5011 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 5012 .access = PL1_R, .type = ARM_CP_CONST, 5013 .resetvalue = 0 }, 5014 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5015 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 5016 .access = PL1_R, .type = ARM_CP_CONST, 5017 .resetvalue = 0 }, 5018 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 5019 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 5020 .access = PL1_R, .type = ARM_CP_CONST, 5021 .resetvalue = cpu->id_aa64mmfr0 }, 5022 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 5023 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 5024 .access = PL1_R, .type = ARM_CP_CONST, 5025 .resetvalue = cpu->id_aa64mmfr1 }, 5026 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5027 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 5028 .access = PL1_R, .type = ARM_CP_CONST, 5029 .resetvalue = 0 }, 5030 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5031 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 5032 .access = PL1_R, .type = ARM_CP_CONST, 5033 .resetvalue = 0 }, 5034 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5035 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 5036 .access = PL1_R, .type = ARM_CP_CONST, 5037 .resetvalue = 0 }, 5038 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5039 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 5040 .access = PL1_R, .type = ARM_CP_CONST, 5041 .resetvalue = 0 }, 5042 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5043 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 5044 .access = PL1_R, .type = ARM_CP_CONST, 5045 .resetvalue = 0 }, 5046 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5047 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 5048 .access = PL1_R, .type = ARM_CP_CONST, 5049 .resetvalue = 0 }, 5050 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 5051 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 5052 .access = PL1_R, .type = ARM_CP_CONST, 5053 .resetvalue = cpu->mvfr0 }, 5054 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 5055 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 5056 .access = PL1_R, .type = ARM_CP_CONST, 5057 .resetvalue = cpu->mvfr1 }, 5058 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 5059 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 5060 .access = PL1_R, .type = ARM_CP_CONST, 5061 .resetvalue = cpu->mvfr2 }, 5062 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5063 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 5064 .access = PL1_R, .type = ARM_CP_CONST, 5065 .resetvalue = 0 }, 5066 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5067 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 5068 .access = PL1_R, .type = ARM_CP_CONST, 5069 .resetvalue = 0 }, 5070 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5071 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 5072 .access = PL1_R, .type = ARM_CP_CONST, 5073 .resetvalue = 0 }, 5074 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5075 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 5076 .access = PL1_R, .type = ARM_CP_CONST, 5077 .resetvalue = 0 }, 5078 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5079 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 5080 .access = PL1_R, .type = ARM_CP_CONST, 5081 .resetvalue = 0 }, 5082 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 5083 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 5084 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5085 .resetvalue = cpu->pmceid0 }, 5086 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 5087 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 5088 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5089 .resetvalue = cpu->pmceid0 }, 5090 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 5091 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 5092 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5093 .resetvalue = cpu->pmceid1 }, 5094 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 5095 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 5096 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5097 .resetvalue = cpu->pmceid1 }, 5098 REGINFO_SENTINEL 5099 }; 5100 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 5101 if (!arm_feature(env, ARM_FEATURE_EL3) && 5102 !arm_feature(env, ARM_FEATURE_EL2)) { 5103 ARMCPRegInfo rvbar = { 5104 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 5105 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5106 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 5107 }; 5108 define_one_arm_cp_reg(cpu, &rvbar); 5109 } 5110 define_arm_cp_regs(cpu, v8_idregs); 5111 define_arm_cp_regs(cpu, v8_cp_reginfo); 5112 } 5113 if (arm_feature(env, ARM_FEATURE_EL2)) { 5114 uint64_t vmpidr_def = mpidr_read_val(env); 5115 ARMCPRegInfo vpidr_regs[] = { 5116 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 5117 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5118 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5119 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 5120 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 5121 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 5122 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5123 .access = PL2_RW, .resetvalue = cpu->midr, 5124 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5125 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 5126 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5127 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5128 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 5129 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 5130 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 5131 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5132 .access = PL2_RW, 5133 .resetvalue = vmpidr_def, 5134 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 5135 REGINFO_SENTINEL 5136 }; 5137 define_arm_cp_regs(cpu, vpidr_regs); 5138 define_arm_cp_regs(cpu, el2_cp_reginfo); 5139 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 5140 if (!arm_feature(env, ARM_FEATURE_EL3)) { 5141 ARMCPRegInfo rvbar = { 5142 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 5143 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 5144 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 5145 }; 5146 define_one_arm_cp_reg(cpu, &rvbar); 5147 } 5148 } else { 5149 /* If EL2 is missing but higher ELs are enabled, we need to 5150 * register the no_el2 reginfos. 5151 */ 5152 if (arm_feature(env, ARM_FEATURE_EL3)) { 5153 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 5154 * of MIDR_EL1 and MPIDR_EL1. 5155 */ 5156 ARMCPRegInfo vpidr_regs[] = { 5157 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5158 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 5159 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5160 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 5161 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 5162 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5163 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 5164 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5165 .type = ARM_CP_NO_RAW, 5166 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 5167 REGINFO_SENTINEL 5168 }; 5169 define_arm_cp_regs(cpu, vpidr_regs); 5170 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 5171 } 5172 } 5173 if (arm_feature(env, ARM_FEATURE_EL3)) { 5174 define_arm_cp_regs(cpu, el3_cp_reginfo); 5175 ARMCPRegInfo el3_regs[] = { 5176 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 5177 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 5178 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 5179 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 5180 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 5181 .access = PL3_RW, 5182 .raw_writefn = raw_write, .writefn = sctlr_write, 5183 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 5184 .resetvalue = cpu->reset_sctlr }, 5185 REGINFO_SENTINEL 5186 }; 5187 5188 define_arm_cp_regs(cpu, el3_regs); 5189 } 5190 /* The behaviour of NSACR is sufficiently various that we don't 5191 * try to describe it in a single reginfo: 5192 * if EL3 is 64 bit, then trap to EL3 from S EL1, 5193 * reads as constant 0xc00 from NS EL1 and NS EL2 5194 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 5195 * if v7 without EL3, register doesn't exist 5196 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 5197 */ 5198 if (arm_feature(env, ARM_FEATURE_EL3)) { 5199 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5200 ARMCPRegInfo nsacr = { 5201 .name = "NSACR", .type = ARM_CP_CONST, 5202 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5203 .access = PL1_RW, .accessfn = nsacr_access, 5204 .resetvalue = 0xc00 5205 }; 5206 define_one_arm_cp_reg(cpu, &nsacr); 5207 } else { 5208 ARMCPRegInfo nsacr = { 5209 .name = "NSACR", 5210 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5211 .access = PL3_RW | PL1_R, 5212 .resetvalue = 0, 5213 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 5214 }; 5215 define_one_arm_cp_reg(cpu, &nsacr); 5216 } 5217 } else { 5218 if (arm_feature(env, ARM_FEATURE_V8)) { 5219 ARMCPRegInfo nsacr = { 5220 .name = "NSACR", .type = ARM_CP_CONST, 5221 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 5222 .access = PL1_R, 5223 .resetvalue = 0xc00 5224 }; 5225 define_one_arm_cp_reg(cpu, &nsacr); 5226 } 5227 } 5228 5229 if (arm_feature(env, ARM_FEATURE_PMSA)) { 5230 if (arm_feature(env, ARM_FEATURE_V6)) { 5231 /* PMSAv6 not implemented */ 5232 assert(arm_feature(env, ARM_FEATURE_V7)); 5233 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5234 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 5235 } else { 5236 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 5237 } 5238 } else { 5239 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 5240 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 5241 } 5242 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 5243 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 5244 } 5245 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 5246 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 5247 } 5248 if (arm_feature(env, ARM_FEATURE_VAPA)) { 5249 define_arm_cp_regs(cpu, vapa_cp_reginfo); 5250 } 5251 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 5252 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 5253 } 5254 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 5255 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 5256 } 5257 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 5258 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 5259 } 5260 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 5261 define_arm_cp_regs(cpu, omap_cp_reginfo); 5262 } 5263 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 5264 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 5265 } 5266 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5267 define_arm_cp_regs(cpu, xscale_cp_reginfo); 5268 } 5269 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 5270 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 5271 } 5272 if (arm_feature(env, ARM_FEATURE_LPAE)) { 5273 define_arm_cp_regs(cpu, lpae_cp_reginfo); 5274 } 5275 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 5276 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 5277 * be read-only (ie write causes UNDEF exception). 5278 */ 5279 { 5280 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 5281 /* Pre-v8 MIDR space. 5282 * Note that the MIDR isn't a simple constant register because 5283 * of the TI925 behaviour where writes to another register can 5284 * cause the MIDR value to change. 5285 * 5286 * Unimplemented registers in the c15 0 0 0 space default to 5287 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 5288 * and friends override accordingly. 5289 */ 5290 { .name = "MIDR", 5291 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 5292 .access = PL1_R, .resetvalue = cpu->midr, 5293 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 5294 .readfn = midr_read, 5295 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5296 .type = ARM_CP_OVERRIDE }, 5297 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 5298 { .name = "DUMMY", 5299 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 5300 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5301 { .name = "DUMMY", 5302 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 5303 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5304 { .name = "DUMMY", 5305 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 5306 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5307 { .name = "DUMMY", 5308 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 5309 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5310 { .name = "DUMMY", 5311 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 5312 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5313 REGINFO_SENTINEL 5314 }; 5315 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 5316 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 5317 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 5318 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 5319 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 5320 .readfn = midr_read }, 5321 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 5322 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5323 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5324 .access = PL1_R, .resetvalue = cpu->midr }, 5325 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 5326 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 5327 .access = PL1_R, .resetvalue = cpu->midr }, 5328 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 5329 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 5330 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 5331 REGINFO_SENTINEL 5332 }; 5333 ARMCPRegInfo id_cp_reginfo[] = { 5334 /* These are common to v8 and pre-v8 */ 5335 { .name = "CTR", 5336 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 5337 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5338 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 5339 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 5340 .access = PL0_R, .accessfn = ctr_el0_access, 5341 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 5342 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 5343 { .name = "TCMTR", 5344 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 5345 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 5346 REGINFO_SENTINEL 5347 }; 5348 /* TLBTR is specific to VMSA */ 5349 ARMCPRegInfo id_tlbtr_reginfo = { 5350 .name = "TLBTR", 5351 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 5352 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, 5353 }; 5354 /* MPUIR is specific to PMSA V6+ */ 5355 ARMCPRegInfo id_mpuir_reginfo = { 5356 .name = "MPUIR", 5357 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 5358 .access = PL1_R, .type = ARM_CP_CONST, 5359 .resetvalue = cpu->pmsav7_dregion << 8 5360 }; 5361 ARMCPRegInfo crn0_wi_reginfo = { 5362 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 5363 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 5364 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 5365 }; 5366 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 5367 arm_feature(env, ARM_FEATURE_STRONGARM)) { 5368 ARMCPRegInfo *r; 5369 /* Register the blanket "writes ignored" value first to cover the 5370 * whole space. Then update the specific ID registers to allow write 5371 * access, so that they ignore writes rather than causing them to 5372 * UNDEF. 5373 */ 5374 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 5375 for (r = id_pre_v8_midr_cp_reginfo; 5376 r->type != ARM_CP_SENTINEL; r++) { 5377 r->access = PL1_RW; 5378 } 5379 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 5380 r->access = PL1_RW; 5381 } 5382 id_mpuir_reginfo.access = PL1_RW; 5383 id_tlbtr_reginfo.access = PL1_RW; 5384 } 5385 if (arm_feature(env, ARM_FEATURE_V8)) { 5386 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 5387 } else { 5388 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 5389 } 5390 define_arm_cp_regs(cpu, id_cp_reginfo); 5391 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 5392 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 5393 } else if (arm_feature(env, ARM_FEATURE_V7)) { 5394 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 5395 } 5396 } 5397 5398 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 5399 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 5400 } 5401 5402 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 5403 ARMCPRegInfo auxcr_reginfo[] = { 5404 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 5405 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 5406 .access = PL1_RW, .type = ARM_CP_CONST, 5407 .resetvalue = cpu->reset_auxcr }, 5408 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 5409 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 5410 .access = PL2_RW, .type = ARM_CP_CONST, 5411 .resetvalue = 0 }, 5412 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 5413 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 5414 .access = PL3_RW, .type = ARM_CP_CONST, 5415 .resetvalue = 0 }, 5416 REGINFO_SENTINEL 5417 }; 5418 define_arm_cp_regs(cpu, auxcr_reginfo); 5419 } 5420 5421 if (arm_feature(env, ARM_FEATURE_CBAR)) { 5422 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5423 /* 32 bit view is [31:18] 0...0 [43:32]. */ 5424 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 5425 | extract64(cpu->reset_cbar, 32, 12); 5426 ARMCPRegInfo cbar_reginfo[] = { 5427 { .name = "CBAR", 5428 .type = ARM_CP_CONST, 5429 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5430 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 5431 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 5432 .type = ARM_CP_CONST, 5433 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 5434 .access = PL1_R, .resetvalue = cbar32 }, 5435 REGINFO_SENTINEL 5436 }; 5437 /* We don't implement a r/w 64 bit CBAR currently */ 5438 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 5439 define_arm_cp_regs(cpu, cbar_reginfo); 5440 } else { 5441 ARMCPRegInfo cbar = { 5442 .name = "CBAR", 5443 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 5444 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 5445 .fieldoffset = offsetof(CPUARMState, 5446 cp15.c15_config_base_address) 5447 }; 5448 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 5449 cbar.access = PL1_R; 5450 cbar.fieldoffset = 0; 5451 cbar.type = ARM_CP_CONST; 5452 } 5453 define_one_arm_cp_reg(cpu, &cbar); 5454 } 5455 } 5456 5457 if (arm_feature(env, ARM_FEATURE_VBAR)) { 5458 ARMCPRegInfo vbar_cp_reginfo[] = { 5459 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 5460 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 5461 .access = PL1_RW, .writefn = vbar_write, 5462 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 5463 offsetof(CPUARMState, cp15.vbar_ns) }, 5464 .resetvalue = 0 }, 5465 REGINFO_SENTINEL 5466 }; 5467 define_arm_cp_regs(cpu, vbar_cp_reginfo); 5468 } 5469 5470 /* Generic registers whose values depend on the implementation */ 5471 { 5472 ARMCPRegInfo sctlr = { 5473 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 5474 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5475 .access = PL1_RW, 5476 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 5477 offsetof(CPUARMState, cp15.sctlr_ns) }, 5478 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 5479 .raw_writefn = raw_write, 5480 }; 5481 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 5482 /* Normally we would always end the TB on an SCTLR write, but Linux 5483 * arch/arm/mach-pxa/sleep.S expects two instructions following 5484 * an MMU enable to execute from cache. Imitate this behaviour. 5485 */ 5486 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 5487 } 5488 define_one_arm_cp_reg(cpu, &sctlr); 5489 } 5490 5491 if (arm_feature(env, ARM_FEATURE_SVE)) { 5492 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 5493 if (arm_feature(env, ARM_FEATURE_EL2)) { 5494 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 5495 } else { 5496 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 5497 } 5498 if (arm_feature(env, ARM_FEATURE_EL3)) { 5499 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 5500 } 5501 } 5502 } 5503 5504 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 5505 { 5506 CPUState *cs = CPU(cpu); 5507 CPUARMState *env = &cpu->env; 5508 5509 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5510 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 5511 aarch64_fpu_gdb_set_reg, 5512 34, "aarch64-fpu.xml", 0); 5513 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 5514 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5515 51, "arm-neon.xml", 0); 5516 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 5517 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5518 35, "arm-vfp3.xml", 0); 5519 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 5520 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 5521 19, "arm-vfp.xml", 0); 5522 } 5523 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 5524 arm_gen_dynamic_xml(cs), 5525 "system-registers.xml", 0); 5526 } 5527 5528 /* Sort alphabetically by type name, except for "any". */ 5529 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 5530 { 5531 ObjectClass *class_a = (ObjectClass *)a; 5532 ObjectClass *class_b = (ObjectClass *)b; 5533 const char *name_a, *name_b; 5534 5535 name_a = object_class_get_name(class_a); 5536 name_b = object_class_get_name(class_b); 5537 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 5538 return 1; 5539 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 5540 return -1; 5541 } else { 5542 return strcmp(name_a, name_b); 5543 } 5544 } 5545 5546 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 5547 { 5548 ObjectClass *oc = data; 5549 CPUListState *s = user_data; 5550 const char *typename; 5551 char *name; 5552 5553 typename = object_class_get_name(oc); 5554 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5555 (*s->cpu_fprintf)(s->file, " %s\n", 5556 name); 5557 g_free(name); 5558 } 5559 5560 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) 5561 { 5562 CPUListState s = { 5563 .file = f, 5564 .cpu_fprintf = cpu_fprintf, 5565 }; 5566 GSList *list; 5567 5568 list = object_class_get_list(TYPE_ARM_CPU, false); 5569 list = g_slist_sort(list, arm_cpu_list_compare); 5570 (*cpu_fprintf)(f, "Available CPUs:\n"); 5571 g_slist_foreach(list, arm_cpu_list_entry, &s); 5572 g_slist_free(list); 5573 #ifdef CONFIG_KVM 5574 /* The 'host' CPU type is dynamically registered only if KVM is 5575 * enabled, so we have to special-case it here: 5576 */ 5577 (*cpu_fprintf)(f, " host (only available in KVM mode)\n"); 5578 #endif 5579 } 5580 5581 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 5582 { 5583 ObjectClass *oc = data; 5584 CpuDefinitionInfoList **cpu_list = user_data; 5585 CpuDefinitionInfoList *entry; 5586 CpuDefinitionInfo *info; 5587 const char *typename; 5588 5589 typename = object_class_get_name(oc); 5590 info = g_malloc0(sizeof(*info)); 5591 info->name = g_strndup(typename, 5592 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 5593 info->q_typename = g_strdup(typename); 5594 5595 entry = g_malloc0(sizeof(*entry)); 5596 entry->value = info; 5597 entry->next = *cpu_list; 5598 *cpu_list = entry; 5599 } 5600 5601 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 5602 { 5603 CpuDefinitionInfoList *cpu_list = NULL; 5604 GSList *list; 5605 5606 list = object_class_get_list(TYPE_ARM_CPU, false); 5607 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 5608 g_slist_free(list); 5609 5610 return cpu_list; 5611 } 5612 5613 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 5614 void *opaque, int state, int secstate, 5615 int crm, int opc1, int opc2, 5616 const char *name) 5617 { 5618 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 5619 * add a single reginfo struct to the hash table. 5620 */ 5621 uint32_t *key = g_new(uint32_t, 1); 5622 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 5623 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 5624 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 5625 5626 r2->name = g_strdup(name); 5627 /* Reset the secure state to the specific incoming state. This is 5628 * necessary as the register may have been defined with both states. 5629 */ 5630 r2->secure = secstate; 5631 5632 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5633 /* Register is banked (using both entries in array). 5634 * Overwriting fieldoffset as the array is only used to define 5635 * banked registers but later only fieldoffset is used. 5636 */ 5637 r2->fieldoffset = r->bank_fieldoffsets[ns]; 5638 } 5639 5640 if (state == ARM_CP_STATE_AA32) { 5641 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 5642 /* If the register is banked then we don't need to migrate or 5643 * reset the 32-bit instance in certain cases: 5644 * 5645 * 1) If the register has both 32-bit and 64-bit instances then we 5646 * can count on the 64-bit instance taking care of the 5647 * non-secure bank. 5648 * 2) If ARMv8 is enabled then we can count on a 64-bit version 5649 * taking care of the secure bank. This requires that separate 5650 * 32 and 64-bit definitions are provided. 5651 */ 5652 if ((r->state == ARM_CP_STATE_BOTH && ns) || 5653 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 5654 r2->type |= ARM_CP_ALIAS; 5655 } 5656 } else if ((secstate != r->secure) && !ns) { 5657 /* The register is not banked so we only want to allow migration of 5658 * the non-secure instance. 5659 */ 5660 r2->type |= ARM_CP_ALIAS; 5661 } 5662 5663 if (r->state == ARM_CP_STATE_BOTH) { 5664 /* We assume it is a cp15 register if the .cp field is left unset. 5665 */ 5666 if (r2->cp == 0) { 5667 r2->cp = 15; 5668 } 5669 5670 #ifdef HOST_WORDS_BIGENDIAN 5671 if (r2->fieldoffset) { 5672 r2->fieldoffset += sizeof(uint32_t); 5673 } 5674 #endif 5675 } 5676 } 5677 if (state == ARM_CP_STATE_AA64) { 5678 /* To allow abbreviation of ARMCPRegInfo 5679 * definitions, we treat cp == 0 as equivalent to 5680 * the value for "standard guest-visible sysreg". 5681 * STATE_BOTH definitions are also always "standard 5682 * sysreg" in their AArch64 view (the .cp value may 5683 * be non-zero for the benefit of the AArch32 view). 5684 */ 5685 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 5686 r2->cp = CP_REG_ARM64_SYSREG_CP; 5687 } 5688 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 5689 r2->opc0, opc1, opc2); 5690 } else { 5691 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 5692 } 5693 if (opaque) { 5694 r2->opaque = opaque; 5695 } 5696 /* reginfo passed to helpers is correct for the actual access, 5697 * and is never ARM_CP_STATE_BOTH: 5698 */ 5699 r2->state = state; 5700 /* Make sure reginfo passed to helpers for wildcarded regs 5701 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 5702 */ 5703 r2->crm = crm; 5704 r2->opc1 = opc1; 5705 r2->opc2 = opc2; 5706 /* By convention, for wildcarded registers only the first 5707 * entry is used for migration; the others are marked as 5708 * ALIAS so we don't try to transfer the register 5709 * multiple times. Special registers (ie NOP/WFI) are 5710 * never migratable and not even raw-accessible. 5711 */ 5712 if ((r->type & ARM_CP_SPECIAL)) { 5713 r2->type |= ARM_CP_NO_RAW; 5714 } 5715 if (((r->crm == CP_ANY) && crm != 0) || 5716 ((r->opc1 == CP_ANY) && opc1 != 0) || 5717 ((r->opc2 == CP_ANY) && opc2 != 0)) { 5718 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 5719 } 5720 5721 /* Check that raw accesses are either forbidden or handled. Note that 5722 * we can't assert this earlier because the setup of fieldoffset for 5723 * banked registers has to be done first. 5724 */ 5725 if (!(r2->type & ARM_CP_NO_RAW)) { 5726 assert(!raw_accessors_invalid(r2)); 5727 } 5728 5729 /* Overriding of an existing definition must be explicitly 5730 * requested. 5731 */ 5732 if (!(r->type & ARM_CP_OVERRIDE)) { 5733 ARMCPRegInfo *oldreg; 5734 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 5735 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 5736 fprintf(stderr, "Register redefined: cp=%d %d bit " 5737 "crn=%d crm=%d opc1=%d opc2=%d, " 5738 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 5739 r2->crn, r2->crm, r2->opc1, r2->opc2, 5740 oldreg->name, r2->name); 5741 g_assert_not_reached(); 5742 } 5743 } 5744 g_hash_table_insert(cpu->cp_regs, key, r2); 5745 } 5746 5747 5748 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 5749 const ARMCPRegInfo *r, void *opaque) 5750 { 5751 /* Define implementations of coprocessor registers. 5752 * We store these in a hashtable because typically 5753 * there are less than 150 registers in a space which 5754 * is 16*16*16*8*8 = 262144 in size. 5755 * Wildcarding is supported for the crm, opc1 and opc2 fields. 5756 * If a register is defined twice then the second definition is 5757 * used, so this can be used to define some generic registers and 5758 * then override them with implementation specific variations. 5759 * At least one of the original and the second definition should 5760 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 5761 * against accidental use. 5762 * 5763 * The state field defines whether the register is to be 5764 * visible in the AArch32 or AArch64 execution state. If the 5765 * state is set to ARM_CP_STATE_BOTH then we synthesise a 5766 * reginfo structure for the AArch32 view, which sees the lower 5767 * 32 bits of the 64 bit register. 5768 * 5769 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 5770 * be wildcarded. AArch64 registers are always considered to be 64 5771 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 5772 * the register, if any. 5773 */ 5774 int crm, opc1, opc2, state; 5775 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 5776 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 5777 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 5778 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 5779 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 5780 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 5781 /* 64 bit registers have only CRm and Opc1 fields */ 5782 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 5783 /* op0 only exists in the AArch64 encodings */ 5784 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 5785 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 5786 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 5787 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 5788 * encodes a minimum access level for the register. We roll this 5789 * runtime check into our general permission check code, so check 5790 * here that the reginfo's specified permissions are strict enough 5791 * to encompass the generic architectural permission check. 5792 */ 5793 if (r->state != ARM_CP_STATE_AA32) { 5794 int mask = 0; 5795 switch (r->opc1) { 5796 case 0: case 1: case 2: 5797 /* min_EL EL1 */ 5798 mask = PL1_RW; 5799 break; 5800 case 3: 5801 /* min_EL EL0 */ 5802 mask = PL0_RW; 5803 break; 5804 case 4: 5805 /* min_EL EL2 */ 5806 mask = PL2_RW; 5807 break; 5808 case 5: 5809 /* unallocated encoding, so not possible */ 5810 assert(false); 5811 break; 5812 case 6: 5813 /* min_EL EL3 */ 5814 mask = PL3_RW; 5815 break; 5816 case 7: 5817 /* min_EL EL1, secure mode only (we don't check the latter) */ 5818 mask = PL1_RW; 5819 break; 5820 default: 5821 /* broken reginfo with out-of-range opc1 */ 5822 assert(false); 5823 break; 5824 } 5825 /* assert our permissions are not too lax (stricter is fine) */ 5826 assert((r->access & ~mask) == 0); 5827 } 5828 5829 /* Check that the register definition has enough info to handle 5830 * reads and writes if they are permitted. 5831 */ 5832 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 5833 if (r->access & PL3_R) { 5834 assert((r->fieldoffset || 5835 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5836 r->readfn); 5837 } 5838 if (r->access & PL3_W) { 5839 assert((r->fieldoffset || 5840 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 5841 r->writefn); 5842 } 5843 } 5844 /* Bad type field probably means missing sentinel at end of reg list */ 5845 assert(cptype_valid(r->type)); 5846 for (crm = crmmin; crm <= crmmax; crm++) { 5847 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 5848 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 5849 for (state = ARM_CP_STATE_AA32; 5850 state <= ARM_CP_STATE_AA64; state++) { 5851 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 5852 continue; 5853 } 5854 if (state == ARM_CP_STATE_AA32) { 5855 /* Under AArch32 CP registers can be common 5856 * (same for secure and non-secure world) or banked. 5857 */ 5858 char *name; 5859 5860 switch (r->secure) { 5861 case ARM_CP_SECSTATE_S: 5862 case ARM_CP_SECSTATE_NS: 5863 add_cpreg_to_hashtable(cpu, r, opaque, state, 5864 r->secure, crm, opc1, opc2, 5865 r->name); 5866 break; 5867 default: 5868 name = g_strdup_printf("%s_S", r->name); 5869 add_cpreg_to_hashtable(cpu, r, opaque, state, 5870 ARM_CP_SECSTATE_S, 5871 crm, opc1, opc2, name); 5872 g_free(name); 5873 add_cpreg_to_hashtable(cpu, r, opaque, state, 5874 ARM_CP_SECSTATE_NS, 5875 crm, opc1, opc2, r->name); 5876 break; 5877 } 5878 } else { 5879 /* AArch64 registers get mapped to non-secure instance 5880 * of AArch32 */ 5881 add_cpreg_to_hashtable(cpu, r, opaque, state, 5882 ARM_CP_SECSTATE_NS, 5883 crm, opc1, opc2, r->name); 5884 } 5885 } 5886 } 5887 } 5888 } 5889 } 5890 5891 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 5892 const ARMCPRegInfo *regs, void *opaque) 5893 { 5894 /* Define a whole list of registers */ 5895 const ARMCPRegInfo *r; 5896 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 5897 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 5898 } 5899 } 5900 5901 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 5902 { 5903 return g_hash_table_lookup(cpregs, &encoded_cp); 5904 } 5905 5906 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 5907 uint64_t value) 5908 { 5909 /* Helper coprocessor write function for write-ignore registers */ 5910 } 5911 5912 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 5913 { 5914 /* Helper coprocessor write function for read-as-zero registers */ 5915 return 0; 5916 } 5917 5918 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 5919 { 5920 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 5921 } 5922 5923 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 5924 { 5925 /* Return true if it is not valid for us to switch to 5926 * this CPU mode (ie all the UNPREDICTABLE cases in 5927 * the ARM ARM CPSRWriteByInstr pseudocode). 5928 */ 5929 5930 /* Changes to or from Hyp via MSR and CPS are illegal. */ 5931 if (write_type == CPSRWriteByInstr && 5932 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 5933 mode == ARM_CPU_MODE_HYP)) { 5934 return 1; 5935 } 5936 5937 switch (mode) { 5938 case ARM_CPU_MODE_USR: 5939 return 0; 5940 case ARM_CPU_MODE_SYS: 5941 case ARM_CPU_MODE_SVC: 5942 case ARM_CPU_MODE_ABT: 5943 case ARM_CPU_MODE_UND: 5944 case ARM_CPU_MODE_IRQ: 5945 case ARM_CPU_MODE_FIQ: 5946 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 5947 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 5948 */ 5949 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 5950 * and CPS are treated as illegal mode changes. 5951 */ 5952 if (write_type == CPSRWriteByInstr && 5953 (env->cp15.hcr_el2 & HCR_TGE) && 5954 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 5955 !arm_is_secure_below_el3(env)) { 5956 return 1; 5957 } 5958 return 0; 5959 case ARM_CPU_MODE_HYP: 5960 return !arm_feature(env, ARM_FEATURE_EL2) 5961 || arm_current_el(env) < 2 || arm_is_secure(env); 5962 case ARM_CPU_MODE_MON: 5963 return arm_current_el(env) < 3; 5964 default: 5965 return 1; 5966 } 5967 } 5968 5969 uint32_t cpsr_read(CPUARMState *env) 5970 { 5971 int ZF; 5972 ZF = (env->ZF == 0); 5973 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 5974 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 5975 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 5976 | ((env->condexec_bits & 0xfc) << 8) 5977 | (env->GE << 16) | (env->daif & CPSR_AIF); 5978 } 5979 5980 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 5981 CPSRWriteType write_type) 5982 { 5983 uint32_t changed_daif; 5984 5985 if (mask & CPSR_NZCV) { 5986 env->ZF = (~val) & CPSR_Z; 5987 env->NF = val; 5988 env->CF = (val >> 29) & 1; 5989 env->VF = (val << 3) & 0x80000000; 5990 } 5991 if (mask & CPSR_Q) 5992 env->QF = ((val & CPSR_Q) != 0); 5993 if (mask & CPSR_T) 5994 env->thumb = ((val & CPSR_T) != 0); 5995 if (mask & CPSR_IT_0_1) { 5996 env->condexec_bits &= ~3; 5997 env->condexec_bits |= (val >> 25) & 3; 5998 } 5999 if (mask & CPSR_IT_2_7) { 6000 env->condexec_bits &= 3; 6001 env->condexec_bits |= (val >> 8) & 0xfc; 6002 } 6003 if (mask & CPSR_GE) { 6004 env->GE = (val >> 16) & 0xf; 6005 } 6006 6007 /* In a V7 implementation that includes the security extensions but does 6008 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 6009 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 6010 * bits respectively. 6011 * 6012 * In a V8 implementation, it is permitted for privileged software to 6013 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 6014 */ 6015 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 6016 arm_feature(env, ARM_FEATURE_EL3) && 6017 !arm_feature(env, ARM_FEATURE_EL2) && 6018 !arm_is_secure(env)) { 6019 6020 changed_daif = (env->daif ^ val) & mask; 6021 6022 if (changed_daif & CPSR_A) { 6023 /* Check to see if we are allowed to change the masking of async 6024 * abort exceptions from a non-secure state. 6025 */ 6026 if (!(env->cp15.scr_el3 & SCR_AW)) { 6027 qemu_log_mask(LOG_GUEST_ERROR, 6028 "Ignoring attempt to switch CPSR_A flag from " 6029 "non-secure world with SCR.AW bit clear\n"); 6030 mask &= ~CPSR_A; 6031 } 6032 } 6033 6034 if (changed_daif & CPSR_F) { 6035 /* Check to see if we are allowed to change the masking of FIQ 6036 * exceptions from a non-secure state. 6037 */ 6038 if (!(env->cp15.scr_el3 & SCR_FW)) { 6039 qemu_log_mask(LOG_GUEST_ERROR, 6040 "Ignoring attempt to switch CPSR_F flag from " 6041 "non-secure world with SCR.FW bit clear\n"); 6042 mask &= ~CPSR_F; 6043 } 6044 6045 /* Check whether non-maskable FIQ (NMFI) support is enabled. 6046 * If this bit is set software is not allowed to mask 6047 * FIQs, but is allowed to set CPSR_F to 0. 6048 */ 6049 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 6050 (val & CPSR_F)) { 6051 qemu_log_mask(LOG_GUEST_ERROR, 6052 "Ignoring attempt to enable CPSR_F flag " 6053 "(non-maskable FIQ [NMFI] support enabled)\n"); 6054 mask &= ~CPSR_F; 6055 } 6056 } 6057 } 6058 6059 env->daif &= ~(CPSR_AIF & mask); 6060 env->daif |= val & CPSR_AIF & mask; 6061 6062 if (write_type != CPSRWriteRaw && 6063 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 6064 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 6065 /* Note that we can only get here in USR mode if this is a 6066 * gdb stub write; for this case we follow the architectural 6067 * behaviour for guest writes in USR mode of ignoring an attempt 6068 * to switch mode. (Those are caught by translate.c for writes 6069 * triggered by guest instructions.) 6070 */ 6071 mask &= ~CPSR_M; 6072 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 6073 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 6074 * v7, and has defined behaviour in v8: 6075 * + leave CPSR.M untouched 6076 * + allow changes to the other CPSR fields 6077 * + set PSTATE.IL 6078 * For user changes via the GDB stub, we don't set PSTATE.IL, 6079 * as this would be unnecessarily harsh for a user error. 6080 */ 6081 mask &= ~CPSR_M; 6082 if (write_type != CPSRWriteByGDBStub && 6083 arm_feature(env, ARM_FEATURE_V8)) { 6084 mask |= CPSR_IL; 6085 val |= CPSR_IL; 6086 } 6087 } else { 6088 switch_mode(env, val & CPSR_M); 6089 } 6090 } 6091 mask &= ~CACHED_CPSR_BITS; 6092 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 6093 } 6094 6095 /* Sign/zero extend */ 6096 uint32_t HELPER(sxtb16)(uint32_t x) 6097 { 6098 uint32_t res; 6099 res = (uint16_t)(int8_t)x; 6100 res |= (uint32_t)(int8_t)(x >> 16) << 16; 6101 return res; 6102 } 6103 6104 uint32_t HELPER(uxtb16)(uint32_t x) 6105 { 6106 uint32_t res; 6107 res = (uint16_t)(uint8_t)x; 6108 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 6109 return res; 6110 } 6111 6112 int32_t HELPER(sdiv)(int32_t num, int32_t den) 6113 { 6114 if (den == 0) 6115 return 0; 6116 if (num == INT_MIN && den == -1) 6117 return INT_MIN; 6118 return num / den; 6119 } 6120 6121 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 6122 { 6123 if (den == 0) 6124 return 0; 6125 return num / den; 6126 } 6127 6128 uint32_t HELPER(rbit)(uint32_t x) 6129 { 6130 return revbit32(x); 6131 } 6132 6133 #if defined(CONFIG_USER_ONLY) 6134 6135 /* These should probably raise undefined insn exceptions. */ 6136 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 6137 { 6138 ARMCPU *cpu = arm_env_get_cpu(env); 6139 6140 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); 6141 } 6142 6143 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 6144 { 6145 ARMCPU *cpu = arm_env_get_cpu(env); 6146 6147 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); 6148 return 0; 6149 } 6150 6151 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6152 { 6153 /* translate.c should never generate calls here in user-only mode */ 6154 g_assert_not_reached(); 6155 } 6156 6157 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6158 { 6159 /* translate.c should never generate calls here in user-only mode */ 6160 g_assert_not_reached(); 6161 } 6162 6163 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 6164 { 6165 /* The TT instructions can be used by unprivileged code, but in 6166 * user-only emulation we don't have the MPU. 6167 * Luckily since we know we are NonSecure unprivileged (and that in 6168 * turn means that the A flag wasn't specified), all the bits in the 6169 * register must be zero: 6170 * IREGION: 0 because IRVALID is 0 6171 * IRVALID: 0 because NS 6172 * S: 0 because NS 6173 * NSRW: 0 because NS 6174 * NSR: 0 because NS 6175 * RW: 0 because unpriv and A flag not set 6176 * R: 0 because unpriv and A flag not set 6177 * SRVALID: 0 because NS 6178 * MRVALID: 0 because unpriv and A flag not set 6179 * SREGION: 0 becaus SRVALID is 0 6180 * MREGION: 0 because MRVALID is 0 6181 */ 6182 return 0; 6183 } 6184 6185 void switch_mode(CPUARMState *env, int mode) 6186 { 6187 ARMCPU *cpu = arm_env_get_cpu(env); 6188 6189 if (mode != ARM_CPU_MODE_USR) { 6190 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 6191 } 6192 } 6193 6194 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6195 uint32_t cur_el, bool secure) 6196 { 6197 return 1; 6198 } 6199 6200 void aarch64_sync_64_to_32(CPUARMState *env) 6201 { 6202 g_assert_not_reached(); 6203 } 6204 6205 #else 6206 6207 void switch_mode(CPUARMState *env, int mode) 6208 { 6209 int old_mode; 6210 int i; 6211 6212 old_mode = env->uncached_cpsr & CPSR_M; 6213 if (mode == old_mode) 6214 return; 6215 6216 if (old_mode == ARM_CPU_MODE_FIQ) { 6217 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6218 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 6219 } else if (mode == ARM_CPU_MODE_FIQ) { 6220 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 6221 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 6222 } 6223 6224 i = bank_number(old_mode); 6225 env->banked_r13[i] = env->regs[13]; 6226 env->banked_r14[i] = env->regs[14]; 6227 env->banked_spsr[i] = env->spsr; 6228 6229 i = bank_number(mode); 6230 env->regs[13] = env->banked_r13[i]; 6231 env->regs[14] = env->banked_r14[i]; 6232 env->spsr = env->banked_spsr[i]; 6233 } 6234 6235 /* Physical Interrupt Target EL Lookup Table 6236 * 6237 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 6238 * 6239 * The below multi-dimensional table is used for looking up the target 6240 * exception level given numerous condition criteria. Specifically, the 6241 * target EL is based on SCR and HCR routing controls as well as the 6242 * currently executing EL and secure state. 6243 * 6244 * Dimensions: 6245 * target_el_table[2][2][2][2][2][4] 6246 * | | | | | +--- Current EL 6247 * | | | | +------ Non-secure(0)/Secure(1) 6248 * | | | +--------- HCR mask override 6249 * | | +------------ SCR exec state control 6250 * | +--------------- SCR mask override 6251 * +------------------ 32-bit(0)/64-bit(1) EL3 6252 * 6253 * The table values are as such: 6254 * 0-3 = EL0-EL3 6255 * -1 = Cannot occur 6256 * 6257 * The ARM ARM target EL table includes entries indicating that an "exception 6258 * is not taken". The two cases where this is applicable are: 6259 * 1) An exception is taken from EL3 but the SCR does not have the exception 6260 * routed to EL3. 6261 * 2) An exception is taken from EL2 but the HCR does not have the exception 6262 * routed to EL2. 6263 * In these two cases, the below table contain a target of EL1. This value is 6264 * returned as it is expected that the consumer of the table data will check 6265 * for "target EL >= current EL" to ensure the exception is not taken. 6266 * 6267 * SCR HCR 6268 * 64 EA AMO From 6269 * BIT IRQ IMO Non-secure Secure 6270 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 6271 */ 6272 static const int8_t target_el_table[2][2][2][2][2][4] = { 6273 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6274 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 6275 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 6276 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 6277 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6278 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 6279 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 6280 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 6281 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 6282 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 6283 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 6284 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 6285 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6286 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 6287 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 6288 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 6289 }; 6290 6291 /* 6292 * Determine the target EL for physical exceptions 6293 */ 6294 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 6295 uint32_t cur_el, bool secure) 6296 { 6297 CPUARMState *env = cs->env_ptr; 6298 int rw; 6299 int scr; 6300 int hcr; 6301 int target_el; 6302 /* Is the highest EL AArch64? */ 6303 int is64 = arm_feature(env, ARM_FEATURE_AARCH64); 6304 6305 if (arm_feature(env, ARM_FEATURE_EL3)) { 6306 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 6307 } else { 6308 /* Either EL2 is the highest EL (and so the EL2 register width 6309 * is given by is64); or there is no EL2 or EL3, in which case 6310 * the value of 'rw' does not affect the table lookup anyway. 6311 */ 6312 rw = is64; 6313 } 6314 6315 switch (excp_idx) { 6316 case EXCP_IRQ: 6317 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 6318 hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO); 6319 break; 6320 case EXCP_FIQ: 6321 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 6322 hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO); 6323 break; 6324 default: 6325 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 6326 hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO); 6327 break; 6328 }; 6329 6330 /* If HCR.TGE is set then HCR is treated as being 1 */ 6331 hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE); 6332 6333 /* Perform a table-lookup for the target EL given the current state */ 6334 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 6335 6336 assert(target_el > 0); 6337 6338 return target_el; 6339 } 6340 6341 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, 6342 ARMMMUIdx mmu_idx, bool ignfault) 6343 { 6344 CPUState *cs = CPU(cpu); 6345 CPUARMState *env = &cpu->env; 6346 MemTxAttrs attrs = {}; 6347 MemTxResult txres; 6348 target_ulong page_size; 6349 hwaddr physaddr; 6350 int prot; 6351 ARMMMUFaultInfo fi; 6352 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6353 int exc; 6354 bool exc_secure; 6355 6356 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, 6357 &attrs, &prot, &page_size, &fi, NULL)) { 6358 /* MPU/SAU lookup failed */ 6359 if (fi.type == ARMFault_QEMU_SFault) { 6360 qemu_log_mask(CPU_LOG_INT, 6361 "...SecureFault with SFSR.AUVIOL during stacking\n"); 6362 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6363 env->v7m.sfar = addr; 6364 exc = ARMV7M_EXCP_SECURE; 6365 exc_secure = false; 6366 } else { 6367 qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n"); 6368 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; 6369 exc = ARMV7M_EXCP_MEM; 6370 exc_secure = secure; 6371 } 6372 goto pend_fault; 6373 } 6374 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value, 6375 attrs, &txres); 6376 if (txres != MEMTX_OK) { 6377 /* BusFault trying to write the data */ 6378 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); 6379 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; 6380 exc = ARMV7M_EXCP_BUS; 6381 exc_secure = false; 6382 goto pend_fault; 6383 } 6384 return true; 6385 6386 pend_fault: 6387 /* By pending the exception at this point we are making 6388 * the IMPDEF choice "overridden exceptions pended" (see the 6389 * MergeExcInfo() pseudocode). The other choice would be to not 6390 * pend them now and then make a choice about which to throw away 6391 * later if we have two derived exceptions. 6392 * The only case when we must not pend the exception but instead 6393 * throw it away is if we are doing the push of the callee registers 6394 * and we've already generated a derived exception. Even in this 6395 * case we will still update the fault status registers. 6396 */ 6397 if (!ignfault) { 6398 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); 6399 } 6400 return false; 6401 } 6402 6403 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, 6404 ARMMMUIdx mmu_idx) 6405 { 6406 CPUState *cs = CPU(cpu); 6407 CPUARMState *env = &cpu->env; 6408 MemTxAttrs attrs = {}; 6409 MemTxResult txres; 6410 target_ulong page_size; 6411 hwaddr physaddr; 6412 int prot; 6413 ARMMMUFaultInfo fi; 6414 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 6415 int exc; 6416 bool exc_secure; 6417 uint32_t value; 6418 6419 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, 6420 &attrs, &prot, &page_size, &fi, NULL)) { 6421 /* MPU/SAU lookup failed */ 6422 if (fi.type == ARMFault_QEMU_SFault) { 6423 qemu_log_mask(CPU_LOG_INT, 6424 "...SecureFault with SFSR.AUVIOL during unstack\n"); 6425 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 6426 env->v7m.sfar = addr; 6427 exc = ARMV7M_EXCP_SECURE; 6428 exc_secure = false; 6429 } else { 6430 qemu_log_mask(CPU_LOG_INT, 6431 "...MemManageFault with CFSR.MUNSTKERR\n"); 6432 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; 6433 exc = ARMV7M_EXCP_MEM; 6434 exc_secure = secure; 6435 } 6436 goto pend_fault; 6437 } 6438 6439 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, 6440 attrs, &txres); 6441 if (txres != MEMTX_OK) { 6442 /* BusFault trying to read the data */ 6443 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); 6444 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; 6445 exc = ARMV7M_EXCP_BUS; 6446 exc_secure = false; 6447 goto pend_fault; 6448 } 6449 6450 *dest = value; 6451 return true; 6452 6453 pend_fault: 6454 /* By pending the exception at this point we are making 6455 * the IMPDEF choice "overridden exceptions pended" (see the 6456 * MergeExcInfo() pseudocode). The other choice would be to not 6457 * pend them now and then make a choice about which to throw away 6458 * later if we have two derived exceptions. 6459 */ 6460 armv7m_nvic_set_pending(env->nvic, exc, exc_secure); 6461 return false; 6462 } 6463 6464 /* Return true if we're using the process stack pointer (not the MSP) */ 6465 static bool v7m_using_psp(CPUARMState *env) 6466 { 6467 /* Handler mode always uses the main stack; for thread mode 6468 * the CONTROL.SPSEL bit determines the answer. 6469 * Note that in v7M it is not possible to be in Handler mode with 6470 * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both. 6471 */ 6472 return !arm_v7m_is_handler_mode(env) && 6473 env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK; 6474 } 6475 6476 /* Write to v7M CONTROL.SPSEL bit for the specified security bank. 6477 * This may change the current stack pointer between Main and Process 6478 * stack pointers if it is done for the CONTROL register for the current 6479 * security state. 6480 */ 6481 static void write_v7m_control_spsel_for_secstate(CPUARMState *env, 6482 bool new_spsel, 6483 bool secstate) 6484 { 6485 bool old_is_psp = v7m_using_psp(env); 6486 6487 env->v7m.control[secstate] = 6488 deposit32(env->v7m.control[secstate], 6489 R_V7M_CONTROL_SPSEL_SHIFT, 6490 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); 6491 6492 if (secstate == env->v7m.secure) { 6493 bool new_is_psp = v7m_using_psp(env); 6494 uint32_t tmp; 6495 6496 if (old_is_psp != new_is_psp) { 6497 tmp = env->v7m.other_sp; 6498 env->v7m.other_sp = env->regs[13]; 6499 env->regs[13] = tmp; 6500 } 6501 } 6502 } 6503 6504 /* Write to v7M CONTROL.SPSEL bit. This may change the current 6505 * stack pointer between Main and Process stack pointers. 6506 */ 6507 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) 6508 { 6509 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); 6510 } 6511 6512 void write_v7m_exception(CPUARMState *env, uint32_t new_exc) 6513 { 6514 /* Write a new value to v7m.exception, thus transitioning into or out 6515 * of Handler mode; this may result in a change of active stack pointer. 6516 */ 6517 bool new_is_psp, old_is_psp = v7m_using_psp(env); 6518 uint32_t tmp; 6519 6520 env->v7m.exception = new_exc; 6521 6522 new_is_psp = v7m_using_psp(env); 6523 6524 if (old_is_psp != new_is_psp) { 6525 tmp = env->v7m.other_sp; 6526 env->v7m.other_sp = env->regs[13]; 6527 env->regs[13] = tmp; 6528 } 6529 } 6530 6531 /* Switch M profile security state between NS and S */ 6532 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate) 6533 { 6534 uint32_t new_ss_msp, new_ss_psp; 6535 6536 if (env->v7m.secure == new_secstate) { 6537 return; 6538 } 6539 6540 /* All the banked state is accessed by looking at env->v7m.secure 6541 * except for the stack pointer; rearrange the SP appropriately. 6542 */ 6543 new_ss_msp = env->v7m.other_ss_msp; 6544 new_ss_psp = env->v7m.other_ss_psp; 6545 6546 if (v7m_using_psp(env)) { 6547 env->v7m.other_ss_psp = env->regs[13]; 6548 env->v7m.other_ss_msp = env->v7m.other_sp; 6549 } else { 6550 env->v7m.other_ss_msp = env->regs[13]; 6551 env->v7m.other_ss_psp = env->v7m.other_sp; 6552 } 6553 6554 env->v7m.secure = new_secstate; 6555 6556 if (v7m_using_psp(env)) { 6557 env->regs[13] = new_ss_psp; 6558 env->v7m.other_sp = new_ss_msp; 6559 } else { 6560 env->regs[13] = new_ss_msp; 6561 env->v7m.other_sp = new_ss_psp; 6562 } 6563 } 6564 6565 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 6566 { 6567 /* Handle v7M BXNS: 6568 * - if the return value is a magic value, do exception return (like BX) 6569 * - otherwise bit 0 of the return value is the target security state 6570 */ 6571 uint32_t min_magic; 6572 6573 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6574 /* Covers FNC_RETURN and EXC_RETURN magic */ 6575 min_magic = FNC_RETURN_MIN_MAGIC; 6576 } else { 6577 /* EXC_RETURN magic only */ 6578 min_magic = EXC_RETURN_MIN_MAGIC; 6579 } 6580 6581 if (dest >= min_magic) { 6582 /* This is an exception return magic value; put it where 6583 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. 6584 * Note that if we ever add gen_ss_advance() singlestep support to 6585 * M profile this should count as an "instruction execution complete" 6586 * event (compare gen_bx_excret_final_code()). 6587 */ 6588 env->regs[15] = dest & ~1; 6589 env->thumb = dest & 1; 6590 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT); 6591 /* notreached */ 6592 } 6593 6594 /* translate.c should have made BXNS UNDEF unless we're secure */ 6595 assert(env->v7m.secure); 6596 6597 switch_v7m_security_state(env, dest & 1); 6598 env->thumb = 1; 6599 env->regs[15] = dest & ~1; 6600 } 6601 6602 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 6603 { 6604 /* Handle v7M BLXNS: 6605 * - bit 0 of the destination address is the target security state 6606 */ 6607 6608 /* At this point regs[15] is the address just after the BLXNS */ 6609 uint32_t nextinst = env->regs[15] | 1; 6610 uint32_t sp = env->regs[13] - 8; 6611 uint32_t saved_psr; 6612 6613 /* translate.c will have made BLXNS UNDEF unless we're secure */ 6614 assert(env->v7m.secure); 6615 6616 if (dest & 1) { 6617 /* target is Secure, so this is just a normal BLX, 6618 * except that the low bit doesn't indicate Thumb/not. 6619 */ 6620 env->regs[14] = nextinst; 6621 env->thumb = 1; 6622 env->regs[15] = dest & ~1; 6623 return; 6624 } 6625 6626 /* Target is non-secure: first push a stack frame */ 6627 if (!QEMU_IS_ALIGNED(sp, 8)) { 6628 qemu_log_mask(LOG_GUEST_ERROR, 6629 "BLXNS with misaligned SP is UNPREDICTABLE\n"); 6630 } 6631 6632 saved_psr = env->v7m.exception; 6633 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { 6634 saved_psr |= XPSR_SFPA; 6635 } 6636 6637 /* Note that these stores can throw exceptions on MPU faults */ 6638 cpu_stl_data(env, sp, nextinst); 6639 cpu_stl_data(env, sp + 4, saved_psr); 6640 6641 env->regs[13] = sp; 6642 env->regs[14] = 0xfeffffff; 6643 if (arm_v7m_is_handler_mode(env)) { 6644 /* Write a dummy value to IPSR, to avoid leaking the current secure 6645 * exception number to non-secure code. This is guaranteed not 6646 * to cause write_v7m_exception() to actually change stacks. 6647 */ 6648 write_v7m_exception(env, 1); 6649 } 6650 switch_v7m_security_state(env, 0); 6651 env->thumb = 1; 6652 env->regs[15] = dest; 6653 } 6654 6655 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, 6656 bool spsel) 6657 { 6658 /* Return a pointer to the location where we currently store the 6659 * stack pointer for the requested security state and thread mode. 6660 * This pointer will become invalid if the CPU state is updated 6661 * such that the stack pointers are switched around (eg changing 6662 * the SPSEL control bit). 6663 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). 6664 * Unlike that pseudocode, we require the caller to pass us in the 6665 * SPSEL control bit value; this is because we also use this 6666 * function in handling of pushing of the callee-saves registers 6667 * part of the v8M stack frame (pseudocode PushCalleeStack()), 6668 * and in the tailchain codepath the SPSEL bit comes from the exception 6669 * return magic LR value from the previous exception. The pseudocode 6670 * opencodes the stack-selection in PushCalleeStack(), but we prefer 6671 * to make this utility function generic enough to do the job. 6672 */ 6673 bool want_psp = threadmode && spsel; 6674 6675 if (secure == env->v7m.secure) { 6676 if (want_psp == v7m_using_psp(env)) { 6677 return &env->regs[13]; 6678 } else { 6679 return &env->v7m.other_sp; 6680 } 6681 } else { 6682 if (want_psp) { 6683 return &env->v7m.other_ss_psp; 6684 } else { 6685 return &env->v7m.other_ss_msp; 6686 } 6687 } 6688 } 6689 6690 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, 6691 uint32_t *pvec) 6692 { 6693 CPUState *cs = CPU(cpu); 6694 CPUARMState *env = &cpu->env; 6695 MemTxResult result; 6696 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; 6697 uint32_t vector_entry; 6698 MemTxAttrs attrs = {}; 6699 ARMMMUIdx mmu_idx; 6700 bool exc_secure; 6701 6702 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); 6703 6704 /* We don't do a get_phys_addr() here because the rules for vector 6705 * loads are special: they always use the default memory map, and 6706 * the default memory map permits reads from all addresses. 6707 * Since there's no easy way to pass through to pmsav8_mpu_lookup() 6708 * that we want this special case which would always say "yes", 6709 * we just do the SAU lookup here followed by a direct physical load. 6710 */ 6711 attrs.secure = targets_secure; 6712 attrs.user = false; 6713 6714 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6715 V8M_SAttributes sattrs = {}; 6716 6717 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 6718 if (sattrs.ns) { 6719 attrs.secure = false; 6720 } else if (!targets_secure) { 6721 /* NS access to S memory */ 6722 goto load_fail; 6723 } 6724 } 6725 6726 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr, 6727 attrs, &result); 6728 if (result != MEMTX_OK) { 6729 goto load_fail; 6730 } 6731 *pvec = vector_entry; 6732 return true; 6733 6734 load_fail: 6735 /* All vector table fetch fails are reported as HardFault, with 6736 * HFSR.VECTTBL and .FORCED set. (FORCED is set because 6737 * technically the underlying exception is a MemManage or BusFault 6738 * that is escalated to HardFault.) This is a terminal exception, 6739 * so we will either take the HardFault immediately or else enter 6740 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). 6741 */ 6742 exc_secure = targets_secure || 6743 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); 6744 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; 6745 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); 6746 return false; 6747 } 6748 6749 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6750 bool ignore_faults) 6751 { 6752 /* For v8M, push the callee-saves register part of the stack frame. 6753 * Compare the v8M pseudocode PushCalleeStack(). 6754 * In the tailchaining case this may not be the current stack. 6755 */ 6756 CPUARMState *env = &cpu->env; 6757 uint32_t *frame_sp_p; 6758 uint32_t frameptr; 6759 ARMMMUIdx mmu_idx; 6760 bool stacked_ok; 6761 6762 if (dotailchain) { 6763 bool mode = lr & R_V7M_EXCRET_MODE_MASK; 6764 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || 6765 !mode; 6766 6767 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); 6768 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, 6769 lr & R_V7M_EXCRET_SPSEL_MASK); 6770 } else { 6771 mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6772 frame_sp_p = &env->regs[13]; 6773 } 6774 6775 frameptr = *frame_sp_p - 0x28; 6776 6777 /* Write as much of the stack frame as we can. A write failure may 6778 * cause us to pend a derived exception. 6779 */ 6780 stacked_ok = 6781 v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) && 6782 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, 6783 ignore_faults) && 6784 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, 6785 ignore_faults) && 6786 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, 6787 ignore_faults) && 6788 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, 6789 ignore_faults) && 6790 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, 6791 ignore_faults) && 6792 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, 6793 ignore_faults) && 6794 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, 6795 ignore_faults) && 6796 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, 6797 ignore_faults); 6798 6799 /* Update SP regardless of whether any of the stack accesses failed. 6800 * When we implement v8M stack limit checking then this attempt to 6801 * update SP might also fail and result in a derived exception. 6802 */ 6803 *frame_sp_p = frameptr; 6804 6805 return !stacked_ok; 6806 } 6807 6808 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, 6809 bool ignore_stackfaults) 6810 { 6811 /* Do the "take the exception" parts of exception entry, 6812 * but not the pushing of state to the stack. This is 6813 * similar to the pseudocode ExceptionTaken() function. 6814 */ 6815 CPUARMState *env = &cpu->env; 6816 uint32_t addr; 6817 bool targets_secure; 6818 int exc; 6819 bool push_failed = false; 6820 6821 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); 6822 6823 if (arm_feature(env, ARM_FEATURE_V8)) { 6824 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 6825 (lr & R_V7M_EXCRET_S_MASK)) { 6826 /* The background code (the owner of the registers in the 6827 * exception frame) is Secure. This means it may either already 6828 * have or now needs to push callee-saves registers. 6829 */ 6830 if (targets_secure) { 6831 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { 6832 /* We took an exception from Secure to NonSecure 6833 * (which means the callee-saved registers got stacked) 6834 * and are now tailchaining to a Secure exception. 6835 * Clear DCRS so eventual return from this Secure 6836 * exception unstacks the callee-saved registers. 6837 */ 6838 lr &= ~R_V7M_EXCRET_DCRS_MASK; 6839 } 6840 } else { 6841 /* We're going to a non-secure exception; push the 6842 * callee-saves registers to the stack now, if they're 6843 * not already saved. 6844 */ 6845 if (lr & R_V7M_EXCRET_DCRS_MASK && 6846 !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) { 6847 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, 6848 ignore_stackfaults); 6849 } 6850 lr |= R_V7M_EXCRET_DCRS_MASK; 6851 } 6852 } 6853 6854 lr &= ~R_V7M_EXCRET_ES_MASK; 6855 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6856 lr |= R_V7M_EXCRET_ES_MASK; 6857 } 6858 lr &= ~R_V7M_EXCRET_SPSEL_MASK; 6859 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { 6860 lr |= R_V7M_EXCRET_SPSEL_MASK; 6861 } 6862 6863 /* Clear registers if necessary to prevent non-secure exception 6864 * code being able to see register values from secure code. 6865 * Where register values become architecturally UNKNOWN we leave 6866 * them with their previous values. 6867 */ 6868 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 6869 if (!targets_secure) { 6870 /* Always clear the caller-saved registers (they have been 6871 * pushed to the stack earlier in v7m_push_stack()). 6872 * Clear callee-saved registers if the background code is 6873 * Secure (in which case these regs were saved in 6874 * v7m_push_callee_stack()). 6875 */ 6876 int i; 6877 6878 for (i = 0; i < 13; i++) { 6879 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ 6880 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { 6881 env->regs[i] = 0; 6882 } 6883 } 6884 /* Clear EAPSR */ 6885 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); 6886 } 6887 } 6888 } 6889 6890 if (push_failed && !ignore_stackfaults) { 6891 /* Derived exception on callee-saves register stacking: 6892 * we might now want to take a different exception which 6893 * targets a different security state, so try again from the top. 6894 */ 6895 v7m_exception_taken(cpu, lr, true, true); 6896 return; 6897 } 6898 6899 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { 6900 /* Vector load failed: derived exception */ 6901 v7m_exception_taken(cpu, lr, true, true); 6902 return; 6903 } 6904 6905 /* Now we've done everything that might cause a derived exception 6906 * we can go ahead and activate whichever exception we're going to 6907 * take (which might now be the derived exception). 6908 */ 6909 armv7m_nvic_acknowledge_irq(env->nvic); 6910 6911 /* Switch to target security state -- must do this before writing SPSEL */ 6912 switch_v7m_security_state(env, targets_secure); 6913 write_v7m_control_spsel(env, 0); 6914 arm_clear_exclusive(env); 6915 /* Clear IT bits */ 6916 env->condexec_bits = 0; 6917 env->regs[14] = lr; 6918 env->regs[15] = addr & 0xfffffffe; 6919 env->thumb = addr & 1; 6920 } 6921 6922 static bool v7m_push_stack(ARMCPU *cpu) 6923 { 6924 /* Do the "set up stack frame" part of exception entry, 6925 * similar to pseudocode PushStack(). 6926 * Return true if we generate a derived exception (and so 6927 * should ignore further stack faults trying to process 6928 * that derived exception.) 6929 */ 6930 bool stacked_ok; 6931 CPUARMState *env = &cpu->env; 6932 uint32_t xpsr = xpsr_read(env); 6933 uint32_t frameptr = env->regs[13]; 6934 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 6935 6936 /* Align stack pointer if the guest wants that */ 6937 if ((frameptr & 4) && 6938 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { 6939 frameptr -= 4; 6940 xpsr |= XPSR_SPREALIGN; 6941 } 6942 6943 frameptr -= 0x20; 6944 6945 /* Write as much of the stack frame as we can. If we fail a stack 6946 * write this will result in a derived exception being pended 6947 * (which may be taken in preference to the one we started with 6948 * if it has higher priority). 6949 */ 6950 stacked_ok = 6951 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) && 6952 v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) && 6953 v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) && 6954 v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) && 6955 v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) && 6956 v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) && 6957 v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) && 6958 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false); 6959 6960 /* Update SP regardless of whether any of the stack accesses failed. 6961 * When we implement v8M stack limit checking then this attempt to 6962 * update SP might also fail and result in a derived exception. 6963 */ 6964 env->regs[13] = frameptr; 6965 6966 return !stacked_ok; 6967 } 6968 6969 static void do_v7m_exception_exit(ARMCPU *cpu) 6970 { 6971 CPUARMState *env = &cpu->env; 6972 uint32_t excret; 6973 uint32_t xpsr; 6974 bool ufault = false; 6975 bool sfault = false; 6976 bool return_to_sp_process; 6977 bool return_to_handler; 6978 bool rettobase = false; 6979 bool exc_secure = false; 6980 bool return_to_secure; 6981 6982 /* If we're not in Handler mode then jumps to magic exception-exit 6983 * addresses don't have magic behaviour. However for the v8M 6984 * security extensions the magic secure-function-return has to 6985 * work in thread mode too, so to avoid doing an extra check in 6986 * the generated code we allow exception-exit magic to also cause the 6987 * internal exception and bring us here in thread mode. Correct code 6988 * will never try to do this (the following insn fetch will always 6989 * fault) so we the overhead of having taken an unnecessary exception 6990 * doesn't matter. 6991 */ 6992 if (!arm_v7m_is_handler_mode(env)) { 6993 return; 6994 } 6995 6996 /* In the spec pseudocode ExceptionReturn() is called directly 6997 * from BXWritePC() and gets the full target PC value including 6998 * bit zero. In QEMU's implementation we treat it as a normal 6999 * jump-to-register (which is then caught later on), and so split 7000 * the target value up between env->regs[15] and env->thumb in 7001 * gen_bx(). Reconstitute it. 7002 */ 7003 excret = env->regs[15]; 7004 if (env->thumb) { 7005 excret |= 1; 7006 } 7007 7008 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 7009 " previous exception %d\n", 7010 excret, env->v7m.exception); 7011 7012 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) { 7013 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " 7014 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", 7015 excret); 7016 } 7017 7018 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7019 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before 7020 * we pick which FAULTMASK to clear. 7021 */ 7022 if (!env->v7m.secure && 7023 ((excret & R_V7M_EXCRET_ES_MASK) || 7024 !(excret & R_V7M_EXCRET_DCRS_MASK))) { 7025 sfault = 1; 7026 /* For all other purposes, treat ES as 0 (R_HXSR) */ 7027 excret &= ~R_V7M_EXCRET_ES_MASK; 7028 } 7029 } 7030 7031 if (env->v7m.exception != ARMV7M_EXCP_NMI) { 7032 /* Auto-clear FAULTMASK on return from other than NMI. 7033 * If the security extension is implemented then this only 7034 * happens if the raw execution priority is >= 0; the 7035 * value of the ES bit in the exception return value indicates 7036 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.) 7037 */ 7038 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7039 exc_secure = excret & R_V7M_EXCRET_ES_MASK; 7040 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) { 7041 env->v7m.faultmask[exc_secure] = 0; 7042 } 7043 } else { 7044 env->v7m.faultmask[M_REG_NS] = 0; 7045 } 7046 } 7047 7048 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, 7049 exc_secure)) { 7050 case -1: 7051 /* attempt to exit an exception that isn't active */ 7052 ufault = true; 7053 break; 7054 case 0: 7055 /* still an irq active now */ 7056 break; 7057 case 1: 7058 /* we returned to base exception level, no nesting. 7059 * (In the pseudocode this is written using "NestedActivation != 1" 7060 * where we have 'rettobase == false'.) 7061 */ 7062 rettobase = true; 7063 break; 7064 default: 7065 g_assert_not_reached(); 7066 } 7067 7068 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); 7069 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; 7070 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && 7071 (excret & R_V7M_EXCRET_S_MASK); 7072 7073 if (arm_feature(env, ARM_FEATURE_V8)) { 7074 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7075 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); 7076 * we choose to take the UsageFault. 7077 */ 7078 if ((excret & R_V7M_EXCRET_S_MASK) || 7079 (excret & R_V7M_EXCRET_ES_MASK) || 7080 !(excret & R_V7M_EXCRET_DCRS_MASK)) { 7081 ufault = true; 7082 } 7083 } 7084 if (excret & R_V7M_EXCRET_RES0_MASK) { 7085 ufault = true; 7086 } 7087 } else { 7088 /* For v7M we only recognize certain combinations of the low bits */ 7089 switch (excret & 0xf) { 7090 case 1: /* Return to Handler */ 7091 break; 7092 case 13: /* Return to Thread using Process stack */ 7093 case 9: /* Return to Thread using Main stack */ 7094 /* We only need to check NONBASETHRDENA for v7M, because in 7095 * v8M this bit does not exist (it is RES1). 7096 */ 7097 if (!rettobase && 7098 !(env->v7m.ccr[env->v7m.secure] & 7099 R_V7M_CCR_NONBASETHRDENA_MASK)) { 7100 ufault = true; 7101 } 7102 break; 7103 default: 7104 ufault = true; 7105 } 7106 } 7107 7108 if (sfault) { 7109 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; 7110 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7111 v7m_exception_taken(cpu, excret, true, false); 7112 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7113 "stackframe: failed EXC_RETURN.ES validity check\n"); 7114 return; 7115 } 7116 7117 if (ufault) { 7118 /* Bad exception return: instead of popping the exception 7119 * stack, directly take a usage fault on the current stack. 7120 */ 7121 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7122 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7123 v7m_exception_taken(cpu, excret, true, false); 7124 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7125 "stackframe: failed exception return integrity check\n"); 7126 return; 7127 } 7128 7129 /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in 7130 * Handler mode (and will be until we write the new XPSR.Interrupt 7131 * field) this does not switch around the current stack pointer. 7132 */ 7133 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); 7134 7135 switch_v7m_security_state(env, return_to_secure); 7136 7137 { 7138 /* The stack pointer we should be reading the exception frame from 7139 * depends on bits in the magic exception return type value (and 7140 * for v8M isn't necessarily the stack pointer we will eventually 7141 * end up resuming execution with). Get a pointer to the location 7142 * in the CPU state struct where the SP we need is currently being 7143 * stored; we will use and modify it in place. 7144 * We use this limited C variable scope so we don't accidentally 7145 * use 'frame_sp_p' after we do something that makes it invalid. 7146 */ 7147 uint32_t *frame_sp_p = get_v7m_sp_ptr(env, 7148 return_to_secure, 7149 !return_to_handler, 7150 return_to_sp_process); 7151 uint32_t frameptr = *frame_sp_p; 7152 bool pop_ok = true; 7153 ARMMMUIdx mmu_idx; 7154 7155 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, 7156 !return_to_handler); 7157 7158 if (!QEMU_IS_ALIGNED(frameptr, 8) && 7159 arm_feature(env, ARM_FEATURE_V8)) { 7160 qemu_log_mask(LOG_GUEST_ERROR, 7161 "M profile exception return with non-8-aligned SP " 7162 "for destination state is UNPREDICTABLE\n"); 7163 } 7164 7165 /* Do we need to pop callee-saved registers? */ 7166 if (return_to_secure && 7167 ((excret & R_V7M_EXCRET_ES_MASK) == 0 || 7168 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { 7169 uint32_t expected_sig = 0xfefa125b; 7170 uint32_t actual_sig; 7171 7172 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx); 7173 7174 if (pop_ok && expected_sig != actual_sig) { 7175 /* Take a SecureFault on the current stack */ 7176 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; 7177 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7178 v7m_exception_taken(cpu, excret, true, false); 7179 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 7180 "stackframe: failed exception return integrity " 7181 "signature check\n"); 7182 return; 7183 } 7184 7185 pop_ok = pop_ok && 7186 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7187 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 7188 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && 7189 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && 7190 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && 7191 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && 7192 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && 7193 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && 7194 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); 7195 7196 frameptr += 0x28; 7197 } 7198 7199 /* Pop registers */ 7200 pop_ok = pop_ok && 7201 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && 7202 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && 7203 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && 7204 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && 7205 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && 7206 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && 7207 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && 7208 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); 7209 7210 if (!pop_ok) { 7211 /* v7m_stack_read() pended a fault, so take it (as a tail 7212 * chained exception on the same stack frame) 7213 */ 7214 v7m_exception_taken(cpu, excret, true, false); 7215 return; 7216 } 7217 7218 /* Returning from an exception with a PC with bit 0 set is defined 7219 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified 7220 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore 7221 * the lsbit, and there are several RTOSes out there which incorrectly 7222 * assume the r15 in the stack frame should be a Thumb-style "lsbit 7223 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but 7224 * complain about the badly behaved guest. 7225 */ 7226 if (env->regs[15] & 1) { 7227 env->regs[15] &= ~1U; 7228 if (!arm_feature(env, ARM_FEATURE_V8)) { 7229 qemu_log_mask(LOG_GUEST_ERROR, 7230 "M profile return from interrupt with misaligned " 7231 "PC is UNPREDICTABLE on v7M\n"); 7232 } 7233 } 7234 7235 if (arm_feature(env, ARM_FEATURE_V8)) { 7236 /* For v8M we have to check whether the xPSR exception field 7237 * matches the EXCRET value for return to handler/thread 7238 * before we commit to changing the SP and xPSR. 7239 */ 7240 bool will_be_handler = (xpsr & XPSR_EXCP) != 0; 7241 if (return_to_handler != will_be_handler) { 7242 /* Take an INVPC UsageFault on the current stack. 7243 * By this point we will have switched to the security state 7244 * for the background state, so this UsageFault will target 7245 * that state. 7246 */ 7247 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7248 env->v7m.secure); 7249 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7250 v7m_exception_taken(cpu, excret, true, false); 7251 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 7252 "stackframe: failed exception return integrity " 7253 "check\n"); 7254 return; 7255 } 7256 } 7257 7258 /* Commit to consuming the stack frame */ 7259 frameptr += 0x20; 7260 /* Undo stack alignment (the SPREALIGN bit indicates that the original 7261 * pre-exception SP was not 8-aligned and we added a padding word to 7262 * align it, so we undo this by ORing in the bit that increases it 7263 * from the current 8-aligned value to the 8-unaligned value. (Adding 4 7264 * would work too but a logical OR is how the pseudocode specifies it.) 7265 */ 7266 if (xpsr & XPSR_SPREALIGN) { 7267 frameptr |= 4; 7268 } 7269 *frame_sp_p = frameptr; 7270 } 7271 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ 7272 xpsr_write(env, xpsr, ~XPSR_SPREALIGN); 7273 7274 /* The restored xPSR exception field will be zero if we're 7275 * resuming in Thread mode. If that doesn't match what the 7276 * exception return excret specified then this is a UsageFault. 7277 * v7M requires we make this check here; v8M did it earlier. 7278 */ 7279 if (return_to_handler != arm_v7m_is_handler_mode(env)) { 7280 /* Take an INVPC UsageFault by pushing the stack again; 7281 * we know we're v7M so this is never a Secure UsageFault. 7282 */ 7283 bool ignore_stackfaults; 7284 7285 assert(!arm_feature(env, ARM_FEATURE_V8)); 7286 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); 7287 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7288 ignore_stackfaults = v7m_push_stack(cpu); 7289 v7m_exception_taken(cpu, excret, false, ignore_stackfaults); 7290 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " 7291 "failed exception return integrity check\n"); 7292 return; 7293 } 7294 7295 /* Otherwise, we have a successful exception exit. */ 7296 arm_clear_exclusive(env); 7297 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); 7298 } 7299 7300 static bool do_v7m_function_return(ARMCPU *cpu) 7301 { 7302 /* v8M security extensions magic function return. 7303 * We may either: 7304 * (1) throw an exception (longjump) 7305 * (2) return true if we successfully handled the function return 7306 * (3) return false if we failed a consistency check and have 7307 * pended a UsageFault that needs to be taken now 7308 * 7309 * At this point the magic return value is split between env->regs[15] 7310 * and env->thumb. We don't bother to reconstitute it because we don't 7311 * need it (all values are handled the same way). 7312 */ 7313 CPUARMState *env = &cpu->env; 7314 uint32_t newpc, newpsr, newpsr_exc; 7315 7316 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); 7317 7318 { 7319 bool threadmode, spsel; 7320 TCGMemOpIdx oi; 7321 ARMMMUIdx mmu_idx; 7322 uint32_t *frame_sp_p; 7323 uint32_t frameptr; 7324 7325 /* Pull the return address and IPSR from the Secure stack */ 7326 threadmode = !arm_v7m_is_handler_mode(env); 7327 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; 7328 7329 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); 7330 frameptr = *frame_sp_p; 7331 7332 /* These loads may throw an exception (for MPU faults). We want to 7333 * do them as secure, so work out what MMU index that is. 7334 */ 7335 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7336 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); 7337 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); 7338 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); 7339 7340 /* Consistency checks on new IPSR */ 7341 newpsr_exc = newpsr & XPSR_EXCP; 7342 if (!((env->v7m.exception == 0 && newpsr_exc == 0) || 7343 (env->v7m.exception == 1 && newpsr_exc != 0))) { 7344 /* Pend the fault and tell our caller to take it */ 7345 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 7346 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7347 env->v7m.secure); 7348 qemu_log_mask(CPU_LOG_INT, 7349 "...taking INVPC UsageFault: " 7350 "IPSR consistency check failed\n"); 7351 return false; 7352 } 7353 7354 *frame_sp_p = frameptr + 8; 7355 } 7356 7357 /* This invalidates frame_sp_p */ 7358 switch_v7m_security_state(env, true); 7359 env->v7m.exception = newpsr_exc; 7360 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; 7361 if (newpsr & XPSR_SFPA) { 7362 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; 7363 } 7364 xpsr_write(env, 0, XPSR_IT); 7365 env->thumb = newpc & 1; 7366 env->regs[15] = newpc & ~1; 7367 7368 qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); 7369 return true; 7370 } 7371 7372 static void arm_log_exception(int idx) 7373 { 7374 if (qemu_loglevel_mask(CPU_LOG_INT)) { 7375 const char *exc = NULL; 7376 static const char * const excnames[] = { 7377 [EXCP_UDEF] = "Undefined Instruction", 7378 [EXCP_SWI] = "SVC", 7379 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 7380 [EXCP_DATA_ABORT] = "Data Abort", 7381 [EXCP_IRQ] = "IRQ", 7382 [EXCP_FIQ] = "FIQ", 7383 [EXCP_BKPT] = "Breakpoint", 7384 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 7385 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 7386 [EXCP_HVC] = "Hypervisor Call", 7387 [EXCP_HYP_TRAP] = "Hypervisor Trap", 7388 [EXCP_SMC] = "Secure Monitor Call", 7389 [EXCP_VIRQ] = "Virtual IRQ", 7390 [EXCP_VFIQ] = "Virtual FIQ", 7391 [EXCP_SEMIHOST] = "Semihosting call", 7392 [EXCP_NOCP] = "v7M NOCP UsageFault", 7393 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 7394 }; 7395 7396 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 7397 exc = excnames[idx]; 7398 } 7399 if (!exc) { 7400 exc = "unknown"; 7401 } 7402 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 7403 } 7404 } 7405 7406 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, 7407 uint32_t addr, uint16_t *insn) 7408 { 7409 /* Load a 16-bit portion of a v7M instruction, returning true on success, 7410 * or false on failure (in which case we will have pended the appropriate 7411 * exception). 7412 * We need to do the instruction fetch's MPU and SAU checks 7413 * like this because there is no MMU index that would allow 7414 * doing the load with a single function call. Instead we must 7415 * first check that the security attributes permit the load 7416 * and that they don't mismatch on the two halves of the instruction, 7417 * and then we do the load as a secure load (ie using the security 7418 * attributes of the address, not the CPU, as architecturally required). 7419 */ 7420 CPUState *cs = CPU(cpu); 7421 CPUARMState *env = &cpu->env; 7422 V8M_SAttributes sattrs = {}; 7423 MemTxAttrs attrs = {}; 7424 ARMMMUFaultInfo fi = {}; 7425 MemTxResult txres; 7426 target_ulong page_size; 7427 hwaddr physaddr; 7428 int prot; 7429 7430 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); 7431 if (!sattrs.nsc || sattrs.ns) { 7432 /* This must be the second half of the insn, and it straddles a 7433 * region boundary with the second half not being S&NSC. 7434 */ 7435 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7436 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7437 qemu_log_mask(CPU_LOG_INT, 7438 "...really SecureFault with SFSR.INVEP\n"); 7439 return false; 7440 } 7441 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, 7442 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { 7443 /* the MPU lookup failed */ 7444 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7445 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); 7446 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); 7447 return false; 7448 } 7449 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, 7450 attrs, &txres); 7451 if (txres != MEMTX_OK) { 7452 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7453 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7454 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); 7455 return false; 7456 } 7457 return true; 7458 } 7459 7460 static bool v7m_handle_execute_nsc(ARMCPU *cpu) 7461 { 7462 /* Check whether this attempt to execute code in a Secure & NS-Callable 7463 * memory region is for an SG instruction; if so, then emulate the 7464 * effect of the SG instruction and return true. Otherwise pend 7465 * the correct kind of exception and return false. 7466 */ 7467 CPUARMState *env = &cpu->env; 7468 ARMMMUIdx mmu_idx; 7469 uint16_t insn; 7470 7471 /* We should never get here unless get_phys_addr_pmsav8() caused 7472 * an exception for NS executing in S&NSC memory. 7473 */ 7474 assert(!env->v7m.secure); 7475 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7476 7477 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ 7478 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 7479 7480 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { 7481 return false; 7482 } 7483 7484 if (!env->thumb) { 7485 goto gen_invep; 7486 } 7487 7488 if (insn != 0xe97f) { 7489 /* Not an SG instruction first half (we choose the IMPDEF 7490 * early-SG-check option). 7491 */ 7492 goto gen_invep; 7493 } 7494 7495 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { 7496 return false; 7497 } 7498 7499 if (insn != 0xe97f) { 7500 /* Not an SG instruction second half (yes, both halves of the SG 7501 * insn have the same hex value) 7502 */ 7503 goto gen_invep; 7504 } 7505 7506 /* OK, we have confirmed that we really have an SG instruction. 7507 * We know we're NS in S memory so don't need to repeat those checks. 7508 */ 7509 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 7510 ", executing it\n", env->regs[15]); 7511 env->regs[14] &= ~1; 7512 switch_v7m_security_state(env, true); 7513 xpsr_write(env, 0, XPSR_IT); 7514 env->regs[15] += 4; 7515 return true; 7516 7517 gen_invep: 7518 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7519 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7520 qemu_log_mask(CPU_LOG_INT, 7521 "...really SecureFault with SFSR.INVEP\n"); 7522 return false; 7523 } 7524 7525 void arm_v7m_cpu_do_interrupt(CPUState *cs) 7526 { 7527 ARMCPU *cpu = ARM_CPU(cs); 7528 CPUARMState *env = &cpu->env; 7529 uint32_t lr; 7530 bool ignore_stackfaults; 7531 7532 arm_log_exception(cs->exception_index); 7533 7534 /* For exceptions we just mark as pending on the NVIC, and let that 7535 handle it. */ 7536 switch (cs->exception_index) { 7537 case EXCP_UDEF: 7538 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7539 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK; 7540 break; 7541 case EXCP_NOCP: 7542 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7543 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK; 7544 break; 7545 case EXCP_INVSTATE: 7546 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 7547 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; 7548 break; 7549 case EXCP_SWI: 7550 /* The PC already points to the next instruction. */ 7551 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); 7552 break; 7553 case EXCP_PREFETCH_ABORT: 7554 case EXCP_DATA_ABORT: 7555 /* Note that for M profile we don't have a guest facing FSR, but 7556 * the env->exception.fsr will be populated by the code that 7557 * raises the fault, in the A profile short-descriptor format. 7558 */ 7559 switch (env->exception.fsr & 0xf) { 7560 case M_FAKE_FSR_NSC_EXEC: 7561 /* Exception generated when we try to execute code at an address 7562 * which is marked as Secure & Non-Secure Callable and the CPU 7563 * is in the Non-Secure state. The only instruction which can 7564 * be executed like this is SG (and that only if both halves of 7565 * the SG instruction have the same security attributes.) 7566 * Everything else must generate an INVEP SecureFault, so we 7567 * emulate the SG instruction here. 7568 */ 7569 if (v7m_handle_execute_nsc(cpu)) { 7570 return; 7571 } 7572 break; 7573 case M_FAKE_FSR_SFAULT: 7574 /* Various flavours of SecureFault for attempts to execute or 7575 * access data in the wrong security state. 7576 */ 7577 switch (cs->exception_index) { 7578 case EXCP_PREFETCH_ABORT: 7579 if (env->v7m.secure) { 7580 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; 7581 qemu_log_mask(CPU_LOG_INT, 7582 "...really SecureFault with SFSR.INVTRAN\n"); 7583 } else { 7584 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 7585 qemu_log_mask(CPU_LOG_INT, 7586 "...really SecureFault with SFSR.INVEP\n"); 7587 } 7588 break; 7589 case EXCP_DATA_ABORT: 7590 /* This must be an NS access to S memory */ 7591 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; 7592 qemu_log_mask(CPU_LOG_INT, 7593 "...really SecureFault with SFSR.AUVIOL\n"); 7594 break; 7595 } 7596 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 7597 break; 7598 case 0x8: /* External Abort */ 7599 switch (cs->exception_index) { 7600 case EXCP_PREFETCH_ABORT: 7601 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 7602 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n"); 7603 break; 7604 case EXCP_DATA_ABORT: 7605 env->v7m.cfsr[M_REG_NS] |= 7606 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK); 7607 env->v7m.bfar = env->exception.vaddress; 7608 qemu_log_mask(CPU_LOG_INT, 7609 "...with CFSR.PRECISERR and BFAR 0x%x\n", 7610 env->v7m.bfar); 7611 break; 7612 } 7613 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 7614 break; 7615 default: 7616 /* All other FSR values are either MPU faults or "can't happen 7617 * for M profile" cases. 7618 */ 7619 switch (cs->exception_index) { 7620 case EXCP_PREFETCH_ABORT: 7621 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 7622 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n"); 7623 break; 7624 case EXCP_DATA_ABORT: 7625 env->v7m.cfsr[env->v7m.secure] |= 7626 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK); 7627 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress; 7628 qemu_log_mask(CPU_LOG_INT, 7629 "...with CFSR.DACCVIOL and MMFAR 0x%x\n", 7630 env->v7m.mmfar[env->v7m.secure]); 7631 break; 7632 } 7633 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, 7634 env->v7m.secure); 7635 break; 7636 } 7637 break; 7638 case EXCP_BKPT: 7639 if (semihosting_enabled()) { 7640 int nr; 7641 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff; 7642 if (nr == 0xab) { 7643 env->regs[15] += 2; 7644 qemu_log_mask(CPU_LOG_INT, 7645 "...handling as semihosting call 0x%x\n", 7646 env->regs[0]); 7647 env->regs[0] = do_arm_semihosting(env); 7648 return; 7649 } 7650 } 7651 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); 7652 break; 7653 case EXCP_IRQ: 7654 break; 7655 case EXCP_EXCEPTION_EXIT: 7656 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { 7657 /* Must be v8M security extension function return */ 7658 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); 7659 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 7660 if (do_v7m_function_return(cpu)) { 7661 return; 7662 } 7663 } else { 7664 do_v7m_exception_exit(cpu); 7665 return; 7666 } 7667 break; 7668 default: 7669 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 7670 return; /* Never happens. Keep compiler happy. */ 7671 } 7672 7673 if (arm_feature(env, ARM_FEATURE_V8)) { 7674 lr = R_V7M_EXCRET_RES1_MASK | 7675 R_V7M_EXCRET_DCRS_MASK | 7676 R_V7M_EXCRET_FTYPE_MASK; 7677 /* The S bit indicates whether we should return to Secure 7678 * or NonSecure (ie our current state). 7679 * The ES bit indicates whether we're taking this exception 7680 * to Secure or NonSecure (ie our target state). We set it 7681 * later, in v7m_exception_taken(). 7682 * The SPSEL bit is also set in v7m_exception_taken() for v8M. 7683 * This corresponds to the ARM ARM pseudocode for v8M setting 7684 * some LR bits in PushStack() and some in ExceptionTaken(); 7685 * the distinction matters for the tailchain cases where we 7686 * can take an exception without pushing the stack. 7687 */ 7688 if (env->v7m.secure) { 7689 lr |= R_V7M_EXCRET_S_MASK; 7690 } 7691 } else { 7692 lr = R_V7M_EXCRET_RES1_MASK | 7693 R_V7M_EXCRET_S_MASK | 7694 R_V7M_EXCRET_DCRS_MASK | 7695 R_V7M_EXCRET_FTYPE_MASK | 7696 R_V7M_EXCRET_ES_MASK; 7697 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { 7698 lr |= R_V7M_EXCRET_SPSEL_MASK; 7699 } 7700 } 7701 if (!arm_v7m_is_handler_mode(env)) { 7702 lr |= R_V7M_EXCRET_MODE_MASK; 7703 } 7704 7705 ignore_stackfaults = v7m_push_stack(cpu); 7706 v7m_exception_taken(cpu, lr, false, ignore_stackfaults); 7707 qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception); 7708 } 7709 7710 /* Function used to synchronize QEMU's AArch64 register set with AArch32 7711 * register set. This is necessary when switching between AArch32 and AArch64 7712 * execution state. 7713 */ 7714 void aarch64_sync_32_to_64(CPUARMState *env) 7715 { 7716 int i; 7717 uint32_t mode = env->uncached_cpsr & CPSR_M; 7718 7719 /* We can blanket copy R[0:7] to X[0:7] */ 7720 for (i = 0; i < 8; i++) { 7721 env->xregs[i] = env->regs[i]; 7722 } 7723 7724 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 7725 * Otherwise, they come from the banked user regs. 7726 */ 7727 if (mode == ARM_CPU_MODE_FIQ) { 7728 for (i = 8; i < 13; i++) { 7729 env->xregs[i] = env->usr_regs[i - 8]; 7730 } 7731 } else { 7732 for (i = 8; i < 13; i++) { 7733 env->xregs[i] = env->regs[i]; 7734 } 7735 } 7736 7737 /* Registers x13-x23 are the various mode SP and FP registers. Registers 7738 * r13 and r14 are only copied if we are in that mode, otherwise we copy 7739 * from the mode banked register. 7740 */ 7741 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7742 env->xregs[13] = env->regs[13]; 7743 env->xregs[14] = env->regs[14]; 7744 } else { 7745 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 7746 /* HYP is an exception in that it is copied from r14 */ 7747 if (mode == ARM_CPU_MODE_HYP) { 7748 env->xregs[14] = env->regs[14]; 7749 } else { 7750 env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)]; 7751 } 7752 } 7753 7754 if (mode == ARM_CPU_MODE_HYP) { 7755 env->xregs[15] = env->regs[13]; 7756 } else { 7757 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 7758 } 7759 7760 if (mode == ARM_CPU_MODE_IRQ) { 7761 env->xregs[16] = env->regs[14]; 7762 env->xregs[17] = env->regs[13]; 7763 } else { 7764 env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)]; 7765 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 7766 } 7767 7768 if (mode == ARM_CPU_MODE_SVC) { 7769 env->xregs[18] = env->regs[14]; 7770 env->xregs[19] = env->regs[13]; 7771 } else { 7772 env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)]; 7773 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 7774 } 7775 7776 if (mode == ARM_CPU_MODE_ABT) { 7777 env->xregs[20] = env->regs[14]; 7778 env->xregs[21] = env->regs[13]; 7779 } else { 7780 env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)]; 7781 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 7782 } 7783 7784 if (mode == ARM_CPU_MODE_UND) { 7785 env->xregs[22] = env->regs[14]; 7786 env->xregs[23] = env->regs[13]; 7787 } else { 7788 env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)]; 7789 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 7790 } 7791 7792 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7793 * mode, then we can copy from r8-r14. Otherwise, we copy from the 7794 * FIQ bank for r8-r14. 7795 */ 7796 if (mode == ARM_CPU_MODE_FIQ) { 7797 for (i = 24; i < 31; i++) { 7798 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 7799 } 7800 } else { 7801 for (i = 24; i < 29; i++) { 7802 env->xregs[i] = env->fiq_regs[i - 24]; 7803 } 7804 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 7805 env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)]; 7806 } 7807 7808 env->pc = env->regs[15]; 7809 } 7810 7811 /* Function used to synchronize QEMU's AArch32 register set with AArch64 7812 * register set. This is necessary when switching between AArch32 and AArch64 7813 * execution state. 7814 */ 7815 void aarch64_sync_64_to_32(CPUARMState *env) 7816 { 7817 int i; 7818 uint32_t mode = env->uncached_cpsr & CPSR_M; 7819 7820 /* We can blanket copy X[0:7] to R[0:7] */ 7821 for (i = 0; i < 8; i++) { 7822 env->regs[i] = env->xregs[i]; 7823 } 7824 7825 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 7826 * Otherwise, we copy x8-x12 into the banked user regs. 7827 */ 7828 if (mode == ARM_CPU_MODE_FIQ) { 7829 for (i = 8; i < 13; i++) { 7830 env->usr_regs[i - 8] = env->xregs[i]; 7831 } 7832 } else { 7833 for (i = 8; i < 13; i++) { 7834 env->regs[i] = env->xregs[i]; 7835 } 7836 } 7837 7838 /* Registers r13 & r14 depend on the current mode. 7839 * If we are in a given mode, we copy the corresponding x registers to r13 7840 * and r14. Otherwise, we copy the x register to the banked r13 and r14 7841 * for the mode. 7842 */ 7843 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 7844 env->regs[13] = env->xregs[13]; 7845 env->regs[14] = env->xregs[14]; 7846 } else { 7847 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 7848 7849 /* HYP is an exception in that it does not have its own banked r14 but 7850 * shares the USR r14 7851 */ 7852 if (mode == ARM_CPU_MODE_HYP) { 7853 env->regs[14] = env->xregs[14]; 7854 } else { 7855 env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 7856 } 7857 } 7858 7859 if (mode == ARM_CPU_MODE_HYP) { 7860 env->regs[13] = env->xregs[15]; 7861 } else { 7862 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 7863 } 7864 7865 if (mode == ARM_CPU_MODE_IRQ) { 7866 env->regs[14] = env->xregs[16]; 7867 env->regs[13] = env->xregs[17]; 7868 } else { 7869 env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 7870 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 7871 } 7872 7873 if (mode == ARM_CPU_MODE_SVC) { 7874 env->regs[14] = env->xregs[18]; 7875 env->regs[13] = env->xregs[19]; 7876 } else { 7877 env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 7878 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 7879 } 7880 7881 if (mode == ARM_CPU_MODE_ABT) { 7882 env->regs[14] = env->xregs[20]; 7883 env->regs[13] = env->xregs[21]; 7884 } else { 7885 env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 7886 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 7887 } 7888 7889 if (mode == ARM_CPU_MODE_UND) { 7890 env->regs[14] = env->xregs[22]; 7891 env->regs[13] = env->xregs[23]; 7892 } else { 7893 env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 7894 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 7895 } 7896 7897 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 7898 * mode, then we can copy to r8-r14. Otherwise, we copy to the 7899 * FIQ bank for r8-r14. 7900 */ 7901 if (mode == ARM_CPU_MODE_FIQ) { 7902 for (i = 24; i < 31; i++) { 7903 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 7904 } 7905 } else { 7906 for (i = 24; i < 29; i++) { 7907 env->fiq_regs[i - 24] = env->xregs[i]; 7908 } 7909 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 7910 env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 7911 } 7912 7913 env->regs[15] = env->pc; 7914 } 7915 7916 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 7917 { 7918 ARMCPU *cpu = ARM_CPU(cs); 7919 CPUARMState *env = &cpu->env; 7920 uint32_t addr; 7921 uint32_t mask; 7922 int new_mode; 7923 uint32_t offset; 7924 uint32_t moe; 7925 7926 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 7927 switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { 7928 case EC_BREAKPOINT: 7929 case EC_BREAKPOINT_SAME_EL: 7930 moe = 1; 7931 break; 7932 case EC_WATCHPOINT: 7933 case EC_WATCHPOINT_SAME_EL: 7934 moe = 10; 7935 break; 7936 case EC_AA32_BKPT: 7937 moe = 3; 7938 break; 7939 case EC_VECTORCATCH: 7940 moe = 5; 7941 break; 7942 default: 7943 moe = 0; 7944 break; 7945 } 7946 7947 if (moe) { 7948 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 7949 } 7950 7951 /* TODO: Vectored interrupt controller. */ 7952 switch (cs->exception_index) { 7953 case EXCP_UDEF: 7954 new_mode = ARM_CPU_MODE_UND; 7955 addr = 0x04; 7956 mask = CPSR_I; 7957 if (env->thumb) 7958 offset = 2; 7959 else 7960 offset = 4; 7961 break; 7962 case EXCP_SWI: 7963 new_mode = ARM_CPU_MODE_SVC; 7964 addr = 0x08; 7965 mask = CPSR_I; 7966 /* The PC already points to the next instruction. */ 7967 offset = 0; 7968 break; 7969 case EXCP_BKPT: 7970 /* Fall through to prefetch abort. */ 7971 case EXCP_PREFETCH_ABORT: 7972 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 7973 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 7974 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 7975 env->exception.fsr, (uint32_t)env->exception.vaddress); 7976 new_mode = ARM_CPU_MODE_ABT; 7977 addr = 0x0c; 7978 mask = CPSR_A | CPSR_I; 7979 offset = 4; 7980 break; 7981 case EXCP_DATA_ABORT: 7982 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 7983 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 7984 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 7985 env->exception.fsr, 7986 (uint32_t)env->exception.vaddress); 7987 new_mode = ARM_CPU_MODE_ABT; 7988 addr = 0x10; 7989 mask = CPSR_A | CPSR_I; 7990 offset = 8; 7991 break; 7992 case EXCP_IRQ: 7993 new_mode = ARM_CPU_MODE_IRQ; 7994 addr = 0x18; 7995 /* Disable IRQ and imprecise data aborts. */ 7996 mask = CPSR_A | CPSR_I; 7997 offset = 4; 7998 if (env->cp15.scr_el3 & SCR_IRQ) { 7999 /* IRQ routed to monitor mode */ 8000 new_mode = ARM_CPU_MODE_MON; 8001 mask |= CPSR_F; 8002 } 8003 break; 8004 case EXCP_FIQ: 8005 new_mode = ARM_CPU_MODE_FIQ; 8006 addr = 0x1c; 8007 /* Disable FIQ, IRQ and imprecise data aborts. */ 8008 mask = CPSR_A | CPSR_I | CPSR_F; 8009 if (env->cp15.scr_el3 & SCR_FIQ) { 8010 /* FIQ routed to monitor mode */ 8011 new_mode = ARM_CPU_MODE_MON; 8012 } 8013 offset = 4; 8014 break; 8015 case EXCP_VIRQ: 8016 new_mode = ARM_CPU_MODE_IRQ; 8017 addr = 0x18; 8018 /* Disable IRQ and imprecise data aborts. */ 8019 mask = CPSR_A | CPSR_I; 8020 offset = 4; 8021 break; 8022 case EXCP_VFIQ: 8023 new_mode = ARM_CPU_MODE_FIQ; 8024 addr = 0x1c; 8025 /* Disable FIQ, IRQ and imprecise data aborts. */ 8026 mask = CPSR_A | CPSR_I | CPSR_F; 8027 offset = 4; 8028 break; 8029 case EXCP_SMC: 8030 new_mode = ARM_CPU_MODE_MON; 8031 addr = 0x08; 8032 mask = CPSR_A | CPSR_I | CPSR_F; 8033 offset = 0; 8034 break; 8035 default: 8036 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8037 return; /* Never happens. Keep compiler happy. */ 8038 } 8039 8040 if (new_mode == ARM_CPU_MODE_MON) { 8041 addr += env->cp15.mvbar; 8042 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 8043 /* High vectors. When enabled, base address cannot be remapped. */ 8044 addr += 0xffff0000; 8045 } else { 8046 /* ARM v7 architectures provide a vector base address register to remap 8047 * the interrupt vector table. 8048 * This register is only followed in non-monitor mode, and is banked. 8049 * Note: only bits 31:5 are valid. 8050 */ 8051 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 8052 } 8053 8054 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 8055 env->cp15.scr_el3 &= ~SCR_NS; 8056 } 8057 8058 switch_mode (env, new_mode); 8059 /* For exceptions taken to AArch32 we must clear the SS bit in both 8060 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 8061 */ 8062 env->uncached_cpsr &= ~PSTATE_SS; 8063 env->spsr = cpsr_read(env); 8064 /* Clear IT bits. */ 8065 env->condexec_bits = 0; 8066 /* Switch to the new mode, and to the correct instruction set. */ 8067 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 8068 /* Set new mode endianness */ 8069 env->uncached_cpsr &= ~CPSR_E; 8070 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { 8071 env->uncached_cpsr |= CPSR_E; 8072 } 8073 env->daif |= mask; 8074 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares 8075 * and we should just guard the thumb mode on V4 */ 8076 if (arm_feature(env, ARM_FEATURE_V4T)) { 8077 env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 8078 } 8079 env->regs[14] = env->regs[15] + offset; 8080 env->regs[15] = addr; 8081 } 8082 8083 /* Handle exception entry to a target EL which is using AArch64 */ 8084 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 8085 { 8086 ARMCPU *cpu = ARM_CPU(cs); 8087 CPUARMState *env = &cpu->env; 8088 unsigned int new_el = env->exception.target_el; 8089 target_ulong addr = env->cp15.vbar_el[new_el]; 8090 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 8091 8092 if (arm_current_el(env) < new_el) { 8093 /* Entry vector offset depends on whether the implemented EL 8094 * immediately lower than the target level is using AArch32 or AArch64 8095 */ 8096 bool is_aa64; 8097 8098 switch (new_el) { 8099 case 3: 8100 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 8101 break; 8102 case 2: 8103 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0; 8104 break; 8105 case 1: 8106 is_aa64 = is_a64(env); 8107 break; 8108 default: 8109 g_assert_not_reached(); 8110 } 8111 8112 if (is_aa64) { 8113 addr += 0x400; 8114 } else { 8115 addr += 0x600; 8116 } 8117 } else if (pstate_read(env) & PSTATE_SP) { 8118 addr += 0x200; 8119 } 8120 8121 switch (cs->exception_index) { 8122 case EXCP_PREFETCH_ABORT: 8123 case EXCP_DATA_ABORT: 8124 env->cp15.far_el[new_el] = env->exception.vaddress; 8125 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 8126 env->cp15.far_el[new_el]); 8127 /* fall through */ 8128 case EXCP_BKPT: 8129 case EXCP_UDEF: 8130 case EXCP_SWI: 8131 case EXCP_HVC: 8132 case EXCP_HYP_TRAP: 8133 case EXCP_SMC: 8134 env->cp15.esr_el[new_el] = env->exception.syndrome; 8135 break; 8136 case EXCP_IRQ: 8137 case EXCP_VIRQ: 8138 addr += 0x80; 8139 break; 8140 case EXCP_FIQ: 8141 case EXCP_VFIQ: 8142 addr += 0x100; 8143 break; 8144 case EXCP_SEMIHOST: 8145 qemu_log_mask(CPU_LOG_INT, 8146 "...handling as semihosting call 0x%" PRIx64 "\n", 8147 env->xregs[0]); 8148 env->xregs[0] = do_arm_semihosting(env); 8149 return; 8150 default: 8151 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8152 } 8153 8154 if (is_a64(env)) { 8155 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); 8156 aarch64_save_sp(env, arm_current_el(env)); 8157 env->elr_el[new_el] = env->pc; 8158 } else { 8159 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); 8160 env->elr_el[new_el] = env->regs[15]; 8161 8162 aarch64_sync_32_to_64(env); 8163 8164 env->condexec_bits = 0; 8165 } 8166 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 8167 env->elr_el[new_el]); 8168 8169 pstate_write(env, PSTATE_DAIF | new_mode); 8170 env->aarch64 = 1; 8171 aarch64_restore_sp(env, new_el); 8172 8173 env->pc = addr; 8174 8175 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 8176 new_el, env->pc, pstate_read(env)); 8177 } 8178 8179 static inline bool check_for_semihosting(CPUState *cs) 8180 { 8181 /* Check whether this exception is a semihosting call; if so 8182 * then handle it and return true; otherwise return false. 8183 */ 8184 ARMCPU *cpu = ARM_CPU(cs); 8185 CPUARMState *env = &cpu->env; 8186 8187 if (is_a64(env)) { 8188 if (cs->exception_index == EXCP_SEMIHOST) { 8189 /* This is always the 64-bit semihosting exception. 8190 * The "is this usermode" and "is semihosting enabled" 8191 * checks have been done at translate time. 8192 */ 8193 qemu_log_mask(CPU_LOG_INT, 8194 "...handling as semihosting call 0x%" PRIx64 "\n", 8195 env->xregs[0]); 8196 env->xregs[0] = do_arm_semihosting(env); 8197 return true; 8198 } 8199 return false; 8200 } else { 8201 uint32_t imm; 8202 8203 /* Only intercept calls from privileged modes, to provide some 8204 * semblance of security. 8205 */ 8206 if (cs->exception_index != EXCP_SEMIHOST && 8207 (!semihosting_enabled() || 8208 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) { 8209 return false; 8210 } 8211 8212 switch (cs->exception_index) { 8213 case EXCP_SEMIHOST: 8214 /* This is always a semihosting call; the "is this usermode" 8215 * and "is semihosting enabled" checks have been done at 8216 * translate time. 8217 */ 8218 break; 8219 case EXCP_SWI: 8220 /* Check for semihosting interrupt. */ 8221 if (env->thumb) { 8222 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env)) 8223 & 0xff; 8224 if (imm == 0xab) { 8225 break; 8226 } 8227 } else { 8228 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env)) 8229 & 0xffffff; 8230 if (imm == 0x123456) { 8231 break; 8232 } 8233 } 8234 return false; 8235 case EXCP_BKPT: 8236 /* See if this is a semihosting syscall. */ 8237 if (env->thumb) { 8238 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) 8239 & 0xff; 8240 if (imm == 0xab) { 8241 env->regs[15] += 2; 8242 break; 8243 } 8244 } 8245 return false; 8246 default: 8247 return false; 8248 } 8249 8250 qemu_log_mask(CPU_LOG_INT, 8251 "...handling as semihosting call 0x%x\n", 8252 env->regs[0]); 8253 env->regs[0] = do_arm_semihosting(env); 8254 return true; 8255 } 8256 } 8257 8258 /* Handle a CPU exception for A and R profile CPUs. 8259 * Do any appropriate logging, handle PSCI calls, and then hand off 8260 * to the AArch64-entry or AArch32-entry function depending on the 8261 * target exception level's register width. 8262 */ 8263 void arm_cpu_do_interrupt(CPUState *cs) 8264 { 8265 ARMCPU *cpu = ARM_CPU(cs); 8266 CPUARMState *env = &cpu->env; 8267 unsigned int new_el = env->exception.target_el; 8268 8269 assert(!arm_feature(env, ARM_FEATURE_M)); 8270 8271 arm_log_exception(cs->exception_index); 8272 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 8273 new_el); 8274 if (qemu_loglevel_mask(CPU_LOG_INT) 8275 && !excp_is_internal(cs->exception_index)) { 8276 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 8277 env->exception.syndrome >> ARM_EL_EC_SHIFT, 8278 env->exception.syndrome); 8279 } 8280 8281 if (arm_is_psci_call(cpu, cs->exception_index)) { 8282 arm_handle_psci_call(cpu); 8283 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 8284 return; 8285 } 8286 8287 /* Semihosting semantics depend on the register width of the 8288 * code that caused the exception, not the target exception level, 8289 * so must be handled here. 8290 */ 8291 if (check_for_semihosting(cs)) { 8292 return; 8293 } 8294 8295 /* Hooks may change global state so BQL should be held, also the 8296 * BQL needs to be held for any modification of 8297 * cs->interrupt_request. 8298 */ 8299 g_assert(qemu_mutex_iothread_locked()); 8300 8301 arm_call_pre_el_change_hook(cpu); 8302 8303 assert(!excp_is_internal(cs->exception_index)); 8304 if (arm_el_is_aa64(env, new_el)) { 8305 arm_cpu_do_interrupt_aarch64(cs); 8306 } else { 8307 arm_cpu_do_interrupt_aarch32(cs); 8308 } 8309 8310 arm_call_el_change_hook(cpu); 8311 8312 if (!kvm_enabled()) { 8313 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 8314 } 8315 } 8316 8317 /* Return the exception level which controls this address translation regime */ 8318 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 8319 { 8320 switch (mmu_idx) { 8321 case ARMMMUIdx_S2NS: 8322 case ARMMMUIdx_S1E2: 8323 return 2; 8324 case ARMMMUIdx_S1E3: 8325 return 3; 8326 case ARMMMUIdx_S1SE0: 8327 return arm_el_is_aa64(env, 3) ? 1 : 3; 8328 case ARMMMUIdx_S1SE1: 8329 case ARMMMUIdx_S1NSE0: 8330 case ARMMMUIdx_S1NSE1: 8331 case ARMMMUIdx_MPrivNegPri: 8332 case ARMMMUIdx_MUserNegPri: 8333 case ARMMMUIdx_MPriv: 8334 case ARMMMUIdx_MUser: 8335 case ARMMMUIdx_MSPrivNegPri: 8336 case ARMMMUIdx_MSUserNegPri: 8337 case ARMMMUIdx_MSPriv: 8338 case ARMMMUIdx_MSUser: 8339 return 1; 8340 default: 8341 g_assert_not_reached(); 8342 } 8343 } 8344 8345 /* Return the SCTLR value which controls this address translation regime */ 8346 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 8347 { 8348 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 8349 } 8350 8351 /* Return true if the specified stage of address translation is disabled */ 8352 static inline bool regime_translation_disabled(CPUARMState *env, 8353 ARMMMUIdx mmu_idx) 8354 { 8355 if (arm_feature(env, ARM_FEATURE_M)) { 8356 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 8357 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 8358 case R_V7M_MPU_CTRL_ENABLE_MASK: 8359 /* Enabled, but not for HardFault and NMI */ 8360 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 8361 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 8362 /* Enabled for all cases */ 8363 return false; 8364 case 0: 8365 default: 8366 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 8367 * we warned about that in armv7m_nvic.c when the guest set it. 8368 */ 8369 return true; 8370 } 8371 } 8372 8373 if (mmu_idx == ARMMMUIdx_S2NS) { 8374 return (env->cp15.hcr_el2 & HCR_VM) == 0; 8375 } 8376 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 8377 } 8378 8379 static inline bool regime_translation_big_endian(CPUARMState *env, 8380 ARMMMUIdx mmu_idx) 8381 { 8382 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 8383 } 8384 8385 /* Return the TCR controlling this translation regime */ 8386 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 8387 { 8388 if (mmu_idx == ARMMMUIdx_S2NS) { 8389 return &env->cp15.vtcr_el2; 8390 } 8391 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 8392 } 8393 8394 /* Convert a possible stage1+2 MMU index into the appropriate 8395 * stage 1 MMU index 8396 */ 8397 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 8398 { 8399 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 8400 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0); 8401 } 8402 return mmu_idx; 8403 } 8404 8405 /* Returns TBI0 value for current regime el */ 8406 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx) 8407 { 8408 TCR *tcr; 8409 uint32_t el; 8410 8411 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8412 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8413 */ 8414 mmu_idx = stage_1_mmu_idx(mmu_idx); 8415 8416 tcr = regime_tcr(env, mmu_idx); 8417 el = regime_el(env, mmu_idx); 8418 8419 if (el > 1) { 8420 return extract64(tcr->raw_tcr, 20, 1); 8421 } else { 8422 return extract64(tcr->raw_tcr, 37, 1); 8423 } 8424 } 8425 8426 /* Returns TBI1 value for current regime el */ 8427 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx) 8428 { 8429 TCR *tcr; 8430 uint32_t el; 8431 8432 /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert 8433 * a stage 1+2 mmu index into the appropriate stage 1 mmu index. 8434 */ 8435 mmu_idx = stage_1_mmu_idx(mmu_idx); 8436 8437 tcr = regime_tcr(env, mmu_idx); 8438 el = regime_el(env, mmu_idx); 8439 8440 if (el > 1) { 8441 return 0; 8442 } else { 8443 return extract64(tcr->raw_tcr, 38, 1); 8444 } 8445 } 8446 8447 /* Return the TTBR associated with this translation regime */ 8448 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 8449 int ttbrn) 8450 { 8451 if (mmu_idx == ARMMMUIdx_S2NS) { 8452 return env->cp15.vttbr_el2; 8453 } 8454 if (ttbrn == 0) { 8455 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 8456 } else { 8457 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 8458 } 8459 } 8460 8461 /* Return true if the translation regime is using LPAE format page tables */ 8462 static inline bool regime_using_lpae_format(CPUARMState *env, 8463 ARMMMUIdx mmu_idx) 8464 { 8465 int el = regime_el(env, mmu_idx); 8466 if (el == 2 || arm_el_is_aa64(env, el)) { 8467 return true; 8468 } 8469 if (arm_feature(env, ARM_FEATURE_LPAE) 8470 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 8471 return true; 8472 } 8473 return false; 8474 } 8475 8476 /* Returns true if the stage 1 translation regime is using LPAE format page 8477 * tables. Used when raising alignment exceptions, whose FSR changes depending 8478 * on whether the long or short descriptor format is in use. */ 8479 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 8480 { 8481 mmu_idx = stage_1_mmu_idx(mmu_idx); 8482 8483 return regime_using_lpae_format(env, mmu_idx); 8484 } 8485 8486 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 8487 { 8488 switch (mmu_idx) { 8489 case ARMMMUIdx_S1SE0: 8490 case ARMMMUIdx_S1NSE0: 8491 case ARMMMUIdx_MUser: 8492 case ARMMMUIdx_MSUser: 8493 case ARMMMUIdx_MUserNegPri: 8494 case ARMMMUIdx_MSUserNegPri: 8495 return true; 8496 default: 8497 return false; 8498 case ARMMMUIdx_S12NSE0: 8499 case ARMMMUIdx_S12NSE1: 8500 g_assert_not_reached(); 8501 } 8502 } 8503 8504 /* Translate section/page access permissions to page 8505 * R/W protection flags 8506 * 8507 * @env: CPUARMState 8508 * @mmu_idx: MMU index indicating required translation regime 8509 * @ap: The 3-bit access permissions (AP[2:0]) 8510 * @domain_prot: The 2-bit domain access permissions 8511 */ 8512 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 8513 int ap, int domain_prot) 8514 { 8515 bool is_user = regime_is_user(env, mmu_idx); 8516 8517 if (domain_prot == 3) { 8518 return PAGE_READ | PAGE_WRITE; 8519 } 8520 8521 switch (ap) { 8522 case 0: 8523 if (arm_feature(env, ARM_FEATURE_V7)) { 8524 return 0; 8525 } 8526 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 8527 case SCTLR_S: 8528 return is_user ? 0 : PAGE_READ; 8529 case SCTLR_R: 8530 return PAGE_READ; 8531 default: 8532 return 0; 8533 } 8534 case 1: 8535 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8536 case 2: 8537 if (is_user) { 8538 return PAGE_READ; 8539 } else { 8540 return PAGE_READ | PAGE_WRITE; 8541 } 8542 case 3: 8543 return PAGE_READ | PAGE_WRITE; 8544 case 4: /* Reserved. */ 8545 return 0; 8546 case 5: 8547 return is_user ? 0 : PAGE_READ; 8548 case 6: 8549 return PAGE_READ; 8550 case 7: 8551 if (!arm_feature(env, ARM_FEATURE_V6K)) { 8552 return 0; 8553 } 8554 return PAGE_READ; 8555 default: 8556 g_assert_not_reached(); 8557 } 8558 } 8559 8560 /* Translate section/page access permissions to page 8561 * R/W protection flags. 8562 * 8563 * @ap: The 2-bit simple AP (AP[2:1]) 8564 * @is_user: TRUE if accessing from PL0 8565 */ 8566 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 8567 { 8568 switch (ap) { 8569 case 0: 8570 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 8571 case 1: 8572 return PAGE_READ | PAGE_WRITE; 8573 case 2: 8574 return is_user ? 0 : PAGE_READ; 8575 case 3: 8576 return PAGE_READ; 8577 default: 8578 g_assert_not_reached(); 8579 } 8580 } 8581 8582 static inline int 8583 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 8584 { 8585 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 8586 } 8587 8588 /* Translate S2 section/page access permissions to protection flags 8589 * 8590 * @env: CPUARMState 8591 * @s2ap: The 2-bit stage2 access permissions (S2AP) 8592 * @xn: XN (execute-never) bit 8593 */ 8594 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 8595 { 8596 int prot = 0; 8597 8598 if (s2ap & 1) { 8599 prot |= PAGE_READ; 8600 } 8601 if (s2ap & 2) { 8602 prot |= PAGE_WRITE; 8603 } 8604 if (!xn) { 8605 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 8606 prot |= PAGE_EXEC; 8607 } 8608 } 8609 return prot; 8610 } 8611 8612 /* Translate section/page access permissions to protection flags 8613 * 8614 * @env: CPUARMState 8615 * @mmu_idx: MMU index indicating required translation regime 8616 * @is_aa64: TRUE if AArch64 8617 * @ap: The 2-bit simple AP (AP[2:1]) 8618 * @ns: NS (non-secure) bit 8619 * @xn: XN (execute-never) bit 8620 * @pxn: PXN (privileged execute-never) bit 8621 */ 8622 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 8623 int ap, int ns, int xn, int pxn) 8624 { 8625 bool is_user = regime_is_user(env, mmu_idx); 8626 int prot_rw, user_rw; 8627 bool have_wxn; 8628 int wxn = 0; 8629 8630 assert(mmu_idx != ARMMMUIdx_S2NS); 8631 8632 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 8633 if (is_user) { 8634 prot_rw = user_rw; 8635 } else { 8636 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 8637 } 8638 8639 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 8640 return prot_rw; 8641 } 8642 8643 /* TODO have_wxn should be replaced with 8644 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 8645 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 8646 * compatible processors have EL2, which is required for [U]WXN. 8647 */ 8648 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 8649 8650 if (have_wxn) { 8651 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 8652 } 8653 8654 if (is_aa64) { 8655 switch (regime_el(env, mmu_idx)) { 8656 case 1: 8657 if (!is_user) { 8658 xn = pxn || (user_rw & PAGE_WRITE); 8659 } 8660 break; 8661 case 2: 8662 case 3: 8663 break; 8664 } 8665 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8666 switch (regime_el(env, mmu_idx)) { 8667 case 1: 8668 case 3: 8669 if (is_user) { 8670 xn = xn || !(user_rw & PAGE_READ); 8671 } else { 8672 int uwxn = 0; 8673 if (have_wxn) { 8674 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 8675 } 8676 xn = xn || !(prot_rw & PAGE_READ) || pxn || 8677 (uwxn && (user_rw & PAGE_WRITE)); 8678 } 8679 break; 8680 case 2: 8681 break; 8682 } 8683 } else { 8684 xn = wxn = 0; 8685 } 8686 8687 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 8688 return prot_rw; 8689 } 8690 return prot_rw | PAGE_EXEC; 8691 } 8692 8693 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 8694 uint32_t *table, uint32_t address) 8695 { 8696 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 8697 TCR *tcr = regime_tcr(env, mmu_idx); 8698 8699 if (address & tcr->mask) { 8700 if (tcr->raw_tcr & TTBCR_PD1) { 8701 /* Translation table walk disabled for TTBR1 */ 8702 return false; 8703 } 8704 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 8705 } else { 8706 if (tcr->raw_tcr & TTBCR_PD0) { 8707 /* Translation table walk disabled for TTBR0 */ 8708 return false; 8709 } 8710 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 8711 } 8712 *table |= (address >> 18) & 0x3ffc; 8713 return true; 8714 } 8715 8716 /* Translate a S1 pagetable walk through S2 if needed. */ 8717 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 8718 hwaddr addr, MemTxAttrs txattrs, 8719 ARMMMUFaultInfo *fi) 8720 { 8721 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) && 8722 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 8723 target_ulong s2size; 8724 hwaddr s2pa; 8725 int s2prot; 8726 int ret; 8727 8728 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, 8729 &txattrs, &s2prot, &s2size, fi, NULL); 8730 if (ret) { 8731 assert(fi->type != ARMFault_None); 8732 fi->s2addr = addr; 8733 fi->stage2 = true; 8734 fi->s1ptw = true; 8735 return ~0; 8736 } 8737 addr = s2pa; 8738 } 8739 return addr; 8740 } 8741 8742 /* All loads done in the course of a page table walk go through here. */ 8743 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8744 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8745 { 8746 ARMCPU *cpu = ARM_CPU(cs); 8747 CPUARMState *env = &cpu->env; 8748 MemTxAttrs attrs = {}; 8749 MemTxResult result = MEMTX_OK; 8750 AddressSpace *as; 8751 uint32_t data; 8752 8753 attrs.secure = is_secure; 8754 as = arm_addressspace(cs, attrs); 8755 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8756 if (fi->s1ptw) { 8757 return 0; 8758 } 8759 if (regime_translation_big_endian(env, mmu_idx)) { 8760 data = address_space_ldl_be(as, addr, attrs, &result); 8761 } else { 8762 data = address_space_ldl_le(as, addr, attrs, &result); 8763 } 8764 if (result == MEMTX_OK) { 8765 return data; 8766 } 8767 fi->type = ARMFault_SyncExternalOnWalk; 8768 fi->ea = arm_extabort_type(result); 8769 return 0; 8770 } 8771 8772 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 8773 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 8774 { 8775 ARMCPU *cpu = ARM_CPU(cs); 8776 CPUARMState *env = &cpu->env; 8777 MemTxAttrs attrs = {}; 8778 MemTxResult result = MEMTX_OK; 8779 AddressSpace *as; 8780 uint64_t data; 8781 8782 attrs.secure = is_secure; 8783 as = arm_addressspace(cs, attrs); 8784 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 8785 if (fi->s1ptw) { 8786 return 0; 8787 } 8788 if (regime_translation_big_endian(env, mmu_idx)) { 8789 data = address_space_ldq_be(as, addr, attrs, &result); 8790 } else { 8791 data = address_space_ldq_le(as, addr, attrs, &result); 8792 } 8793 if (result == MEMTX_OK) { 8794 return data; 8795 } 8796 fi->type = ARMFault_SyncExternalOnWalk; 8797 fi->ea = arm_extabort_type(result); 8798 return 0; 8799 } 8800 8801 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 8802 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8803 hwaddr *phys_ptr, int *prot, 8804 target_ulong *page_size, 8805 ARMMMUFaultInfo *fi) 8806 { 8807 CPUState *cs = CPU(arm_env_get_cpu(env)); 8808 int level = 1; 8809 uint32_t table; 8810 uint32_t desc; 8811 int type; 8812 int ap; 8813 int domain = 0; 8814 int domain_prot; 8815 hwaddr phys_addr; 8816 uint32_t dacr; 8817 8818 /* Pagetable walk. */ 8819 /* Lookup l1 descriptor. */ 8820 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 8821 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 8822 fi->type = ARMFault_Translation; 8823 goto do_fault; 8824 } 8825 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8826 mmu_idx, fi); 8827 if (fi->type != ARMFault_None) { 8828 goto do_fault; 8829 } 8830 type = (desc & 3); 8831 domain = (desc >> 5) & 0x0f; 8832 if (regime_el(env, mmu_idx) == 1) { 8833 dacr = env->cp15.dacr_ns; 8834 } else { 8835 dacr = env->cp15.dacr_s; 8836 } 8837 domain_prot = (dacr >> (domain * 2)) & 3; 8838 if (type == 0) { 8839 /* Section translation fault. */ 8840 fi->type = ARMFault_Translation; 8841 goto do_fault; 8842 } 8843 if (type != 2) { 8844 level = 2; 8845 } 8846 if (domain_prot == 0 || domain_prot == 2) { 8847 fi->type = ARMFault_Domain; 8848 goto do_fault; 8849 } 8850 if (type == 2) { 8851 /* 1Mb section. */ 8852 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 8853 ap = (desc >> 10) & 3; 8854 *page_size = 1024 * 1024; 8855 } else { 8856 /* Lookup l2 entry. */ 8857 if (type == 1) { 8858 /* Coarse pagetable. */ 8859 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 8860 } else { 8861 /* Fine pagetable. */ 8862 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 8863 } 8864 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8865 mmu_idx, fi); 8866 if (fi->type != ARMFault_None) { 8867 goto do_fault; 8868 } 8869 switch (desc & 3) { 8870 case 0: /* Page translation fault. */ 8871 fi->type = ARMFault_Translation; 8872 goto do_fault; 8873 case 1: /* 64k page. */ 8874 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 8875 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 8876 *page_size = 0x10000; 8877 break; 8878 case 2: /* 4k page. */ 8879 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8880 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 8881 *page_size = 0x1000; 8882 break; 8883 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 8884 if (type == 1) { 8885 /* ARMv6/XScale extended small page format */ 8886 if (arm_feature(env, ARM_FEATURE_XSCALE) 8887 || arm_feature(env, ARM_FEATURE_V6)) { 8888 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 8889 *page_size = 0x1000; 8890 } else { 8891 /* UNPREDICTABLE in ARMv5; we choose to take a 8892 * page translation fault. 8893 */ 8894 fi->type = ARMFault_Translation; 8895 goto do_fault; 8896 } 8897 } else { 8898 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 8899 *page_size = 0x400; 8900 } 8901 ap = (desc >> 4) & 3; 8902 break; 8903 default: 8904 /* Never happens, but compiler isn't smart enough to tell. */ 8905 abort(); 8906 } 8907 } 8908 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 8909 *prot |= *prot ? PAGE_EXEC : 0; 8910 if (!(*prot & (1 << access_type))) { 8911 /* Access permission fault. */ 8912 fi->type = ARMFault_Permission; 8913 goto do_fault; 8914 } 8915 *phys_ptr = phys_addr; 8916 return false; 8917 do_fault: 8918 fi->domain = domain; 8919 fi->level = level; 8920 return true; 8921 } 8922 8923 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 8924 MMUAccessType access_type, ARMMMUIdx mmu_idx, 8925 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 8926 target_ulong *page_size, ARMMMUFaultInfo *fi) 8927 { 8928 CPUState *cs = CPU(arm_env_get_cpu(env)); 8929 int level = 1; 8930 uint32_t table; 8931 uint32_t desc; 8932 uint32_t xn; 8933 uint32_t pxn = 0; 8934 int type; 8935 int ap; 8936 int domain = 0; 8937 int domain_prot; 8938 hwaddr phys_addr; 8939 uint32_t dacr; 8940 bool ns; 8941 8942 /* Pagetable walk. */ 8943 /* Lookup l1 descriptor. */ 8944 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 8945 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 8946 fi->type = ARMFault_Translation; 8947 goto do_fault; 8948 } 8949 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 8950 mmu_idx, fi); 8951 if (fi->type != ARMFault_None) { 8952 goto do_fault; 8953 } 8954 type = (desc & 3); 8955 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 8956 /* Section translation fault, or attempt to use the encoding 8957 * which is Reserved on implementations without PXN. 8958 */ 8959 fi->type = ARMFault_Translation; 8960 goto do_fault; 8961 } 8962 if ((type == 1) || !(desc & (1 << 18))) { 8963 /* Page or Section. */ 8964 domain = (desc >> 5) & 0x0f; 8965 } 8966 if (regime_el(env, mmu_idx) == 1) { 8967 dacr = env->cp15.dacr_ns; 8968 } else { 8969 dacr = env->cp15.dacr_s; 8970 } 8971 if (type == 1) { 8972 level = 2; 8973 } 8974 domain_prot = (dacr >> (domain * 2)) & 3; 8975 if (domain_prot == 0 || domain_prot == 2) { 8976 /* Section or Page domain fault */ 8977 fi->type = ARMFault_Domain; 8978 goto do_fault; 8979 } 8980 if (type != 1) { 8981 if (desc & (1 << 18)) { 8982 /* Supersection. */ 8983 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 8984 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 8985 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 8986 *page_size = 0x1000000; 8987 } else { 8988 /* Section. */ 8989 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 8990 *page_size = 0x100000; 8991 } 8992 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 8993 xn = desc & (1 << 4); 8994 pxn = desc & 1; 8995 ns = extract32(desc, 19, 1); 8996 } else { 8997 if (arm_feature(env, ARM_FEATURE_PXN)) { 8998 pxn = (desc >> 2) & 1; 8999 } 9000 ns = extract32(desc, 3, 1); 9001 /* Lookup l2 entry. */ 9002 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 9003 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9004 mmu_idx, fi); 9005 if (fi->type != ARMFault_None) { 9006 goto do_fault; 9007 } 9008 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 9009 switch (desc & 3) { 9010 case 0: /* Page translation fault. */ 9011 fi->type = ARMFault_Translation; 9012 goto do_fault; 9013 case 1: /* 64k page. */ 9014 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 9015 xn = desc & (1 << 15); 9016 *page_size = 0x10000; 9017 break; 9018 case 2: case 3: /* 4k page. */ 9019 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9020 xn = desc & 1; 9021 *page_size = 0x1000; 9022 break; 9023 default: 9024 /* Never happens, but compiler isn't smart enough to tell. */ 9025 abort(); 9026 } 9027 } 9028 if (domain_prot == 3) { 9029 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9030 } else { 9031 if (pxn && !regime_is_user(env, mmu_idx)) { 9032 xn = 1; 9033 } 9034 if (xn && access_type == MMU_INST_FETCH) { 9035 fi->type = ARMFault_Permission; 9036 goto do_fault; 9037 } 9038 9039 if (arm_feature(env, ARM_FEATURE_V6K) && 9040 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 9041 /* The simplified model uses AP[0] as an access control bit. */ 9042 if ((ap & 1) == 0) { 9043 /* Access flag fault. */ 9044 fi->type = ARMFault_AccessFlag; 9045 goto do_fault; 9046 } 9047 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 9048 } else { 9049 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 9050 } 9051 if (*prot && !xn) { 9052 *prot |= PAGE_EXEC; 9053 } 9054 if (!(*prot & (1 << access_type))) { 9055 /* Access permission fault. */ 9056 fi->type = ARMFault_Permission; 9057 goto do_fault; 9058 } 9059 } 9060 if (ns) { 9061 /* The NS bit will (as required by the architecture) have no effect if 9062 * the CPU doesn't support TZ or this is a non-secure translation 9063 * regime, because the attribute will already be non-secure. 9064 */ 9065 attrs->secure = false; 9066 } 9067 *phys_ptr = phys_addr; 9068 return false; 9069 do_fault: 9070 fi->domain = domain; 9071 fi->level = level; 9072 return true; 9073 } 9074 9075 /* 9076 * check_s2_mmu_setup 9077 * @cpu: ARMCPU 9078 * @is_aa64: True if the translation regime is in AArch64 state 9079 * @startlevel: Suggested starting level 9080 * @inputsize: Bitsize of IPAs 9081 * @stride: Page-table stride (See the ARM ARM) 9082 * 9083 * Returns true if the suggested S2 translation parameters are OK and 9084 * false otherwise. 9085 */ 9086 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 9087 int inputsize, int stride) 9088 { 9089 const int grainsize = stride + 3; 9090 int startsizecheck; 9091 9092 /* Negative levels are never allowed. */ 9093 if (level < 0) { 9094 return false; 9095 } 9096 9097 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 9098 if (startsizecheck < 1 || startsizecheck > stride + 4) { 9099 return false; 9100 } 9101 9102 if (is_aa64) { 9103 CPUARMState *env = &cpu->env; 9104 unsigned int pamax = arm_pamax(cpu); 9105 9106 switch (stride) { 9107 case 13: /* 64KB Pages. */ 9108 if (level == 0 || (level == 1 && pamax <= 42)) { 9109 return false; 9110 } 9111 break; 9112 case 11: /* 16KB Pages. */ 9113 if (level == 0 || (level == 1 && pamax <= 40)) { 9114 return false; 9115 } 9116 break; 9117 case 9: /* 4KB Pages. */ 9118 if (level == 0 && pamax <= 42) { 9119 return false; 9120 } 9121 break; 9122 default: 9123 g_assert_not_reached(); 9124 } 9125 9126 /* Inputsize checks. */ 9127 if (inputsize > pamax && 9128 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 9129 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 9130 return false; 9131 } 9132 } else { 9133 /* AArch32 only supports 4KB pages. Assert on that. */ 9134 assert(stride == 9); 9135 9136 if (level == 0) { 9137 return false; 9138 } 9139 } 9140 return true; 9141 } 9142 9143 /* Translate from the 4-bit stage 2 representation of 9144 * memory attributes (without cache-allocation hints) to 9145 * the 8-bit representation of the stage 1 MAIR registers 9146 * (which includes allocation hints). 9147 * 9148 * ref: shared/translation/attrs/S2AttrDecode() 9149 * .../S2ConvertAttrsHints() 9150 */ 9151 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 9152 { 9153 uint8_t hiattr = extract32(s2attrs, 2, 2); 9154 uint8_t loattr = extract32(s2attrs, 0, 2); 9155 uint8_t hihint = 0, lohint = 0; 9156 9157 if (hiattr != 0) { /* normal memory */ 9158 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 9159 hiattr = loattr = 1; /* non-cacheable */ 9160 } else { 9161 if (hiattr != 1) { /* Write-through or write-back */ 9162 hihint = 3; /* RW allocate */ 9163 } 9164 if (loattr != 1) { /* Write-through or write-back */ 9165 lohint = 3; /* RW allocate */ 9166 } 9167 } 9168 } 9169 9170 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 9171 } 9172 9173 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 9174 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9175 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 9176 target_ulong *page_size_ptr, 9177 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 9178 { 9179 ARMCPU *cpu = arm_env_get_cpu(env); 9180 CPUState *cs = CPU(cpu); 9181 /* Read an LPAE long-descriptor translation table. */ 9182 ARMFaultType fault_type = ARMFault_Translation; 9183 uint32_t level; 9184 uint32_t epd = 0; 9185 int32_t t0sz, t1sz; 9186 uint32_t tg; 9187 uint64_t ttbr; 9188 int ttbr_select; 9189 hwaddr descaddr, indexmask, indexmask_grainsize; 9190 uint32_t tableattrs; 9191 target_ulong page_size; 9192 uint32_t attrs; 9193 int32_t stride = 9; 9194 int32_t addrsize; 9195 int inputsize; 9196 int32_t tbi = 0; 9197 TCR *tcr = regime_tcr(env, mmu_idx); 9198 int ap, ns, xn, pxn; 9199 uint32_t el = regime_el(env, mmu_idx); 9200 bool ttbr1_valid = true; 9201 uint64_t descaddrmask; 9202 bool aarch64 = arm_el_is_aa64(env, el); 9203 9204 /* TODO: 9205 * This code does not handle the different format TCR for VTCR_EL2. 9206 * This code also does not support shareability levels. 9207 * Attribute and permission bit handling should also be checked when adding 9208 * support for those page table walks. 9209 */ 9210 if (aarch64) { 9211 level = 0; 9212 addrsize = 64; 9213 if (el > 1) { 9214 if (mmu_idx != ARMMMUIdx_S2NS) { 9215 tbi = extract64(tcr->raw_tcr, 20, 1); 9216 } 9217 } else { 9218 if (extract64(address, 55, 1)) { 9219 tbi = extract64(tcr->raw_tcr, 38, 1); 9220 } else { 9221 tbi = extract64(tcr->raw_tcr, 37, 1); 9222 } 9223 } 9224 tbi *= 8; 9225 9226 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it 9227 * invalid. 9228 */ 9229 if (el > 1) { 9230 ttbr1_valid = false; 9231 } 9232 } else { 9233 level = 1; 9234 addrsize = 32; 9235 /* There is no TTBR1 for EL2 */ 9236 if (el == 2) { 9237 ttbr1_valid = false; 9238 } 9239 } 9240 9241 /* Determine whether this address is in the region controlled by 9242 * TTBR0 or TTBR1 (or if it is in neither region and should fault). 9243 * This is a Non-secure PL0/1 stage 1 translation, so controlled by 9244 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32: 9245 */ 9246 if (aarch64) { 9247 /* AArch64 translation. */ 9248 t0sz = extract32(tcr->raw_tcr, 0, 6); 9249 t0sz = MIN(t0sz, 39); 9250 t0sz = MAX(t0sz, 16); 9251 } else if (mmu_idx != ARMMMUIdx_S2NS) { 9252 /* AArch32 stage 1 translation. */ 9253 t0sz = extract32(tcr->raw_tcr, 0, 3); 9254 } else { 9255 /* AArch32 stage 2 translation. */ 9256 bool sext = extract32(tcr->raw_tcr, 4, 1); 9257 bool sign = extract32(tcr->raw_tcr, 3, 1); 9258 /* Address size is 40-bit for a stage 2 translation, 9259 * and t0sz can be negative (from -8 to 7), 9260 * so we need to adjust it to use the TTBR selecting logic below. 9261 */ 9262 addrsize = 40; 9263 t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8; 9264 9265 /* If the sign-extend bit is not the same as t0sz[3], the result 9266 * is unpredictable. Flag this as a guest error. */ 9267 if (sign != sext) { 9268 qemu_log_mask(LOG_GUEST_ERROR, 9269 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 9270 } 9271 } 9272 t1sz = extract32(tcr->raw_tcr, 16, 6); 9273 if (aarch64) { 9274 t1sz = MIN(t1sz, 39); 9275 t1sz = MAX(t1sz, 16); 9276 } 9277 if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) { 9278 /* there is a ttbr0 region and we are in it (high bits all zero) */ 9279 ttbr_select = 0; 9280 } else if (ttbr1_valid && t1sz && 9281 !extract64(~address, addrsize - t1sz, t1sz - tbi)) { 9282 /* there is a ttbr1 region and we are in it (high bits all one) */ 9283 ttbr_select = 1; 9284 } else if (!t0sz) { 9285 /* ttbr0 region is "everything not in the ttbr1 region" */ 9286 ttbr_select = 0; 9287 } else if (!t1sz && ttbr1_valid) { 9288 /* ttbr1 region is "everything not in the ttbr0 region" */ 9289 ttbr_select = 1; 9290 } else { 9291 /* in the gap between the two regions, this is a Translation fault */ 9292 fault_type = ARMFault_Translation; 9293 goto do_fault; 9294 } 9295 9296 /* Note that QEMU ignores shareability and cacheability attributes, 9297 * so we don't need to do anything with the SH, ORGN, IRGN fields 9298 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 9299 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 9300 * implement any ASID-like capability so we can ignore it (instead 9301 * we will always flush the TLB any time the ASID is changed). 9302 */ 9303 if (ttbr_select == 0) { 9304 ttbr = regime_ttbr(env, mmu_idx, 0); 9305 if (el < 2) { 9306 epd = extract32(tcr->raw_tcr, 7, 1); 9307 } 9308 inputsize = addrsize - t0sz; 9309 9310 tg = extract32(tcr->raw_tcr, 14, 2); 9311 if (tg == 1) { /* 64KB pages */ 9312 stride = 13; 9313 } 9314 if (tg == 2) { /* 16KB pages */ 9315 stride = 11; 9316 } 9317 } else { 9318 /* We should only be here if TTBR1 is valid */ 9319 assert(ttbr1_valid); 9320 9321 ttbr = regime_ttbr(env, mmu_idx, 1); 9322 epd = extract32(tcr->raw_tcr, 23, 1); 9323 inputsize = addrsize - t1sz; 9324 9325 tg = extract32(tcr->raw_tcr, 30, 2); 9326 if (tg == 3) { /* 64KB pages */ 9327 stride = 13; 9328 } 9329 if (tg == 1) { /* 16KB pages */ 9330 stride = 11; 9331 } 9332 } 9333 9334 /* Here we should have set up all the parameters for the translation: 9335 * inputsize, ttbr, epd, stride, tbi 9336 */ 9337 9338 if (epd) { 9339 /* Translation table walk disabled => Translation fault on TLB miss 9340 * Note: This is always 0 on 64-bit EL2 and EL3. 9341 */ 9342 goto do_fault; 9343 } 9344 9345 if (mmu_idx != ARMMMUIdx_S2NS) { 9346 /* The starting level depends on the virtual address size (which can 9347 * be up to 48 bits) and the translation granule size. It indicates 9348 * the number of strides (stride bits at a time) needed to 9349 * consume the bits of the input address. In the pseudocode this is: 9350 * level = 4 - RoundUp((inputsize - grainsize) / stride) 9351 * where their 'inputsize' is our 'inputsize', 'grainsize' is 9352 * our 'stride + 3' and 'stride' is our 'stride'. 9353 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 9354 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 9355 * = 4 - (inputsize - 4) / stride; 9356 */ 9357 level = 4 - (inputsize - 4) / stride; 9358 } else { 9359 /* For stage 2 translations the starting level is specified by the 9360 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 9361 */ 9362 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 9363 uint32_t startlevel; 9364 bool ok; 9365 9366 if (!aarch64 || stride == 9) { 9367 /* AArch32 or 4KB pages */ 9368 startlevel = 2 - sl0; 9369 } else { 9370 /* 16KB or 64KB pages */ 9371 startlevel = 3 - sl0; 9372 } 9373 9374 /* Check that the starting level is valid. */ 9375 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 9376 inputsize, stride); 9377 if (!ok) { 9378 fault_type = ARMFault_Translation; 9379 goto do_fault; 9380 } 9381 level = startlevel; 9382 } 9383 9384 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 9385 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 9386 9387 /* Now we can extract the actual base address from the TTBR */ 9388 descaddr = extract64(ttbr, 0, 48); 9389 descaddr &= ~indexmask; 9390 9391 /* The address field in the descriptor goes up to bit 39 for ARMv7 9392 * but up to bit 47 for ARMv8, but we use the descaddrmask 9393 * up to bit 39 for AArch32, because we don't need other bits in that case 9394 * to construct next descriptor address (anyway they should be all zeroes). 9395 */ 9396 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 9397 ~indexmask_grainsize; 9398 9399 /* Secure accesses start with the page table in secure memory and 9400 * can be downgraded to non-secure at any step. Non-secure accesses 9401 * remain non-secure. We implement this by just ORing in the NSTable/NS 9402 * bits at each step. 9403 */ 9404 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 9405 for (;;) { 9406 uint64_t descriptor; 9407 bool nstable; 9408 9409 descaddr |= (address >> (stride * (4 - level))) & indexmask; 9410 descaddr &= ~7ULL; 9411 nstable = extract32(tableattrs, 4, 1); 9412 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 9413 if (fi->type != ARMFault_None) { 9414 goto do_fault; 9415 } 9416 9417 if (!(descriptor & 1) || 9418 (!(descriptor & 2) && (level == 3))) { 9419 /* Invalid, or the Reserved level 3 encoding */ 9420 goto do_fault; 9421 } 9422 descaddr = descriptor & descaddrmask; 9423 9424 if ((descriptor & 2) && (level < 3)) { 9425 /* Table entry. The top five bits are attributes which may 9426 * propagate down through lower levels of the table (and 9427 * which are all arranged so that 0 means "no effect", so 9428 * we can gather them up by ORing in the bits at each level). 9429 */ 9430 tableattrs |= extract64(descriptor, 59, 5); 9431 level++; 9432 indexmask = indexmask_grainsize; 9433 continue; 9434 } 9435 /* Block entry at level 1 or 2, or page entry at level 3. 9436 * These are basically the same thing, although the number 9437 * of bits we pull in from the vaddr varies. 9438 */ 9439 page_size = (1ULL << ((stride * (4 - level)) + 3)); 9440 descaddr |= (address & (page_size - 1)); 9441 /* Extract attributes from the descriptor */ 9442 attrs = extract64(descriptor, 2, 10) 9443 | (extract64(descriptor, 52, 12) << 10); 9444 9445 if (mmu_idx == ARMMMUIdx_S2NS) { 9446 /* Stage 2 table descriptors do not include any attribute fields */ 9447 break; 9448 } 9449 /* Merge in attributes from table descriptors */ 9450 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 9451 attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */ 9452 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 9453 * means "force PL1 access only", which means forcing AP[1] to 0. 9454 */ 9455 if (extract32(tableattrs, 2, 1)) { 9456 attrs &= ~(1 << 4); 9457 } 9458 attrs |= nstable << 3; /* NS */ 9459 break; 9460 } 9461 /* Here descaddr is the final physical address, and attributes 9462 * are all in attrs. 9463 */ 9464 fault_type = ARMFault_AccessFlag; 9465 if ((attrs & (1 << 8)) == 0) { 9466 /* Access flag */ 9467 goto do_fault; 9468 } 9469 9470 ap = extract32(attrs, 4, 2); 9471 xn = extract32(attrs, 12, 1); 9472 9473 if (mmu_idx == ARMMMUIdx_S2NS) { 9474 ns = true; 9475 *prot = get_S2prot(env, ap, xn); 9476 } else { 9477 ns = extract32(attrs, 3, 1); 9478 pxn = extract32(attrs, 11, 1); 9479 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 9480 } 9481 9482 fault_type = ARMFault_Permission; 9483 if (!(*prot & (1 << access_type))) { 9484 goto do_fault; 9485 } 9486 9487 if (ns) { 9488 /* The NS bit will (as required by the architecture) have no effect if 9489 * the CPU doesn't support TZ or this is a non-secure translation 9490 * regime, because the attribute will already be non-secure. 9491 */ 9492 txattrs->secure = false; 9493 } 9494 9495 if (cacheattrs != NULL) { 9496 if (mmu_idx == ARMMMUIdx_S2NS) { 9497 cacheattrs->attrs = convert_stage2_attrs(env, 9498 extract32(attrs, 0, 4)); 9499 } else { 9500 /* Index into MAIR registers for cache attributes */ 9501 uint8_t attrindx = extract32(attrs, 0, 3); 9502 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 9503 assert(attrindx <= 7); 9504 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 9505 } 9506 cacheattrs->shareability = extract32(attrs, 6, 2); 9507 } 9508 9509 *phys_ptr = descaddr; 9510 *page_size_ptr = page_size; 9511 return false; 9512 9513 do_fault: 9514 fi->type = fault_type; 9515 fi->level = level; 9516 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 9517 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); 9518 return true; 9519 } 9520 9521 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 9522 ARMMMUIdx mmu_idx, 9523 int32_t address, int *prot) 9524 { 9525 if (!arm_feature(env, ARM_FEATURE_M)) { 9526 *prot = PAGE_READ | PAGE_WRITE; 9527 switch (address) { 9528 case 0xF0000000 ... 0xFFFFFFFF: 9529 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 9530 /* hivecs execing is ok */ 9531 *prot |= PAGE_EXEC; 9532 } 9533 break; 9534 case 0x00000000 ... 0x7FFFFFFF: 9535 *prot |= PAGE_EXEC; 9536 break; 9537 } 9538 } else { 9539 /* Default system address map for M profile cores. 9540 * The architecture specifies which regions are execute-never; 9541 * at the MPU level no other checks are defined. 9542 */ 9543 switch (address) { 9544 case 0x00000000 ... 0x1fffffff: /* ROM */ 9545 case 0x20000000 ... 0x3fffffff: /* SRAM */ 9546 case 0x60000000 ... 0x7fffffff: /* RAM */ 9547 case 0x80000000 ... 0x9fffffff: /* RAM */ 9548 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 9549 break; 9550 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 9551 case 0xa0000000 ... 0xbfffffff: /* Device */ 9552 case 0xc0000000 ... 0xdfffffff: /* Device */ 9553 case 0xe0000000 ... 0xffffffff: /* System */ 9554 *prot = PAGE_READ | PAGE_WRITE; 9555 break; 9556 default: 9557 g_assert_not_reached(); 9558 } 9559 } 9560 } 9561 9562 static bool pmsav7_use_background_region(ARMCPU *cpu, 9563 ARMMMUIdx mmu_idx, bool is_user) 9564 { 9565 /* Return true if we should use the default memory map as a 9566 * "background" region if there are no hits against any MPU regions. 9567 */ 9568 CPUARMState *env = &cpu->env; 9569 9570 if (is_user) { 9571 return false; 9572 } 9573 9574 if (arm_feature(env, ARM_FEATURE_M)) { 9575 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 9576 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 9577 } else { 9578 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 9579 } 9580 } 9581 9582 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 9583 { 9584 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 9585 return arm_feature(env, ARM_FEATURE_M) && 9586 extract32(address, 20, 12) == 0xe00; 9587 } 9588 9589 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 9590 { 9591 /* True if address is in the M profile system region 9592 * 0xe0000000 - 0xffffffff 9593 */ 9594 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 9595 } 9596 9597 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 9598 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9599 hwaddr *phys_ptr, int *prot, 9600 target_ulong *page_size, 9601 ARMMMUFaultInfo *fi) 9602 { 9603 ARMCPU *cpu = arm_env_get_cpu(env); 9604 int n; 9605 bool is_user = regime_is_user(env, mmu_idx); 9606 9607 *phys_ptr = address; 9608 *page_size = TARGET_PAGE_SIZE; 9609 *prot = 0; 9610 9611 if (regime_translation_disabled(env, mmu_idx) || 9612 m_is_ppb_region(env, address)) { 9613 /* MPU disabled or M profile PPB access: use default memory map. 9614 * The other case which uses the default memory map in the 9615 * v7M ARM ARM pseudocode is exception vector reads from the vector 9616 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 9617 * which always does a direct read using address_space_ldl(), rather 9618 * than going via this function, so we don't need to check that here. 9619 */ 9620 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9621 } else { /* MPU enabled */ 9622 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9623 /* region search */ 9624 uint32_t base = env->pmsav7.drbar[n]; 9625 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 9626 uint32_t rmask; 9627 bool srdis = false; 9628 9629 if (!(env->pmsav7.drsr[n] & 0x1)) { 9630 continue; 9631 } 9632 9633 if (!rsize) { 9634 qemu_log_mask(LOG_GUEST_ERROR, 9635 "DRSR[%d]: Rsize field cannot be 0\n", n); 9636 continue; 9637 } 9638 rsize++; 9639 rmask = (1ull << rsize) - 1; 9640 9641 if (base & rmask) { 9642 qemu_log_mask(LOG_GUEST_ERROR, 9643 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 9644 "to DRSR region size, mask = 0x%" PRIx32 "\n", 9645 n, base, rmask); 9646 continue; 9647 } 9648 9649 if (address < base || address > base + rmask) { 9650 continue; 9651 } 9652 9653 /* Region matched */ 9654 9655 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 9656 int i, snd; 9657 uint32_t srdis_mask; 9658 9659 rsize -= 3; /* sub region size (power of 2) */ 9660 snd = ((address - base) >> rsize) & 0x7; 9661 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 9662 9663 srdis_mask = srdis ? 0x3 : 0x0; 9664 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 9665 /* This will check in groups of 2, 4 and then 8, whether 9666 * the subregion bits are consistent. rsize is incremented 9667 * back up to give the region size, considering consistent 9668 * adjacent subregions as one region. Stop testing if rsize 9669 * is already big enough for an entire QEMU page. 9670 */ 9671 int snd_rounded = snd & ~(i - 1); 9672 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 9673 snd_rounded + 8, i); 9674 if (srdis_mask ^ srdis_multi) { 9675 break; 9676 } 9677 srdis_mask = (srdis_mask << i) | srdis_mask; 9678 rsize++; 9679 } 9680 } 9681 if (srdis) { 9682 continue; 9683 } 9684 if (rsize < TARGET_PAGE_BITS) { 9685 *page_size = 1 << rsize; 9686 } 9687 break; 9688 } 9689 9690 if (n == -1) { /* no hits */ 9691 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 9692 /* background fault */ 9693 fi->type = ARMFault_Background; 9694 return true; 9695 } 9696 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9697 } else { /* a MPU hit! */ 9698 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 9699 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 9700 9701 if (m_is_system_region(env, address)) { 9702 /* System space is always execute never */ 9703 xn = 1; 9704 } 9705 9706 if (is_user) { /* User mode AP bit decoding */ 9707 switch (ap) { 9708 case 0: 9709 case 1: 9710 case 5: 9711 break; /* no access */ 9712 case 3: 9713 *prot |= PAGE_WRITE; 9714 /* fall through */ 9715 case 2: 9716 case 6: 9717 *prot |= PAGE_READ | PAGE_EXEC; 9718 break; 9719 case 7: 9720 /* for v7M, same as 6; for R profile a reserved value */ 9721 if (arm_feature(env, ARM_FEATURE_M)) { 9722 *prot |= PAGE_READ | PAGE_EXEC; 9723 break; 9724 } 9725 /* fall through */ 9726 default: 9727 qemu_log_mask(LOG_GUEST_ERROR, 9728 "DRACR[%d]: Bad value for AP bits: 0x%" 9729 PRIx32 "\n", n, ap); 9730 } 9731 } else { /* Priv. mode AP bits decoding */ 9732 switch (ap) { 9733 case 0: 9734 break; /* no access */ 9735 case 1: 9736 case 2: 9737 case 3: 9738 *prot |= PAGE_WRITE; 9739 /* fall through */ 9740 case 5: 9741 case 6: 9742 *prot |= PAGE_READ | PAGE_EXEC; 9743 break; 9744 case 7: 9745 /* for v7M, same as 6; for R profile a reserved value */ 9746 if (arm_feature(env, ARM_FEATURE_M)) { 9747 *prot |= PAGE_READ | PAGE_EXEC; 9748 break; 9749 } 9750 /* fall through */ 9751 default: 9752 qemu_log_mask(LOG_GUEST_ERROR, 9753 "DRACR[%d]: Bad value for AP bits: 0x%" 9754 PRIx32 "\n", n, ap); 9755 } 9756 } 9757 9758 /* execute never */ 9759 if (xn) { 9760 *prot &= ~PAGE_EXEC; 9761 } 9762 } 9763 } 9764 9765 fi->type = ARMFault_Permission; 9766 fi->level = 1; 9767 /* 9768 * Core QEMU code can't handle execution from small pages yet, so 9769 * don't try it. This way we'll get an MPU exception, rather than 9770 * eventually causing QEMU to exit in get_page_addr_code(). 9771 */ 9772 if (*page_size < TARGET_PAGE_SIZE && (*prot & PAGE_EXEC)) { 9773 qemu_log_mask(LOG_UNIMP, 9774 "MPU: No support for execution from regions " 9775 "smaller than 1K\n"); 9776 *prot &= ~PAGE_EXEC; 9777 } 9778 return !(*prot & (1 << access_type)); 9779 } 9780 9781 static bool v8m_is_sau_exempt(CPUARMState *env, 9782 uint32_t address, MMUAccessType access_type) 9783 { 9784 /* The architecture specifies that certain address ranges are 9785 * exempt from v8M SAU/IDAU checks. 9786 */ 9787 return 9788 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 9789 (address >= 0xe0000000 && address <= 0xe0002fff) || 9790 (address >= 0xe000e000 && address <= 0xe000efff) || 9791 (address >= 0xe002e000 && address <= 0xe002efff) || 9792 (address >= 0xe0040000 && address <= 0xe0041fff) || 9793 (address >= 0xe00ff000 && address <= 0xe00fffff); 9794 } 9795 9796 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 9797 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9798 V8M_SAttributes *sattrs) 9799 { 9800 /* Look up the security attributes for this address. Compare the 9801 * pseudocode SecurityCheck() function. 9802 * We assume the caller has zero-initialized *sattrs. 9803 */ 9804 ARMCPU *cpu = arm_env_get_cpu(env); 9805 int r; 9806 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 9807 int idau_region = IREGION_NOTVALID; 9808 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 9809 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 9810 9811 if (cpu->idau) { 9812 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 9813 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 9814 9815 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 9816 &idau_nsc); 9817 } 9818 9819 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 9820 /* 0xf0000000..0xffffffff is always S for insn fetches */ 9821 return; 9822 } 9823 9824 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 9825 sattrs->ns = !regime_is_secure(env, mmu_idx); 9826 return; 9827 } 9828 9829 if (idau_region != IREGION_NOTVALID) { 9830 sattrs->irvalid = true; 9831 sattrs->iregion = idau_region; 9832 } 9833 9834 switch (env->sau.ctrl & 3) { 9835 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 9836 break; 9837 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 9838 sattrs->ns = true; 9839 break; 9840 default: /* SAU.ENABLE == 1 */ 9841 for (r = 0; r < cpu->sau_sregion; r++) { 9842 if (env->sau.rlar[r] & 1) { 9843 uint32_t base = env->sau.rbar[r] & ~0x1f; 9844 uint32_t limit = env->sau.rlar[r] | 0x1f; 9845 9846 if (base <= address && limit >= address) { 9847 if (base > addr_page_base || limit < addr_page_limit) { 9848 sattrs->subpage = true; 9849 } 9850 if (sattrs->srvalid) { 9851 /* If we hit in more than one region then we must report 9852 * as Secure, not NS-Callable, with no valid region 9853 * number info. 9854 */ 9855 sattrs->ns = false; 9856 sattrs->nsc = false; 9857 sattrs->sregion = 0; 9858 sattrs->srvalid = false; 9859 break; 9860 } else { 9861 if (env->sau.rlar[r] & 2) { 9862 sattrs->nsc = true; 9863 } else { 9864 sattrs->ns = true; 9865 } 9866 sattrs->srvalid = true; 9867 sattrs->sregion = r; 9868 } 9869 } 9870 } 9871 } 9872 9873 /* The IDAU will override the SAU lookup results if it specifies 9874 * higher security than the SAU does. 9875 */ 9876 if (!idau_ns) { 9877 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 9878 sattrs->ns = false; 9879 sattrs->nsc = idau_nsc; 9880 } 9881 } 9882 break; 9883 } 9884 } 9885 9886 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 9887 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9888 hwaddr *phys_ptr, MemTxAttrs *txattrs, 9889 int *prot, bool *is_subpage, 9890 ARMMMUFaultInfo *fi, uint32_t *mregion) 9891 { 9892 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 9893 * that a full phys-to-virt translation does). 9894 * mregion is (if not NULL) set to the region number which matched, 9895 * or -1 if no region number is returned (MPU off, address did not 9896 * hit a region, address hit in multiple regions). 9897 * We set is_subpage to true if the region hit doesn't cover the 9898 * entire TARGET_PAGE the address is within. 9899 */ 9900 ARMCPU *cpu = arm_env_get_cpu(env); 9901 bool is_user = regime_is_user(env, mmu_idx); 9902 uint32_t secure = regime_is_secure(env, mmu_idx); 9903 int n; 9904 int matchregion = -1; 9905 bool hit = false; 9906 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 9907 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 9908 9909 *is_subpage = false; 9910 *phys_ptr = address; 9911 *prot = 0; 9912 if (mregion) { 9913 *mregion = -1; 9914 } 9915 9916 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 9917 * was an exception vector read from the vector table (which is always 9918 * done using the default system address map), because those accesses 9919 * are done in arm_v7m_load_vector(), which always does a direct 9920 * read using address_space_ldl(), rather than going via this function. 9921 */ 9922 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 9923 hit = true; 9924 } else if (m_is_ppb_region(env, address)) { 9925 hit = true; 9926 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 9927 hit = true; 9928 } else { 9929 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 9930 /* region search */ 9931 /* Note that the base address is bits [31:5] from the register 9932 * with bits [4:0] all zeroes, but the limit address is bits 9933 * [31:5] from the register with bits [4:0] all ones. 9934 */ 9935 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 9936 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 9937 9938 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 9939 /* Region disabled */ 9940 continue; 9941 } 9942 9943 if (address < base || address > limit) { 9944 continue; 9945 } 9946 9947 if (base > addr_page_base || limit < addr_page_limit) { 9948 *is_subpage = true; 9949 } 9950 9951 if (hit) { 9952 /* Multiple regions match -- always a failure (unlike 9953 * PMSAv7 where highest-numbered-region wins) 9954 */ 9955 fi->type = ARMFault_Permission; 9956 fi->level = 1; 9957 return true; 9958 } 9959 9960 matchregion = n; 9961 hit = true; 9962 } 9963 } 9964 9965 if (!hit) { 9966 /* background fault */ 9967 fi->type = ARMFault_Background; 9968 return true; 9969 } 9970 9971 if (matchregion == -1) { 9972 /* hit using the background region */ 9973 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 9974 } else { 9975 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 9976 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 9977 9978 if (m_is_system_region(env, address)) { 9979 /* System space is always execute never */ 9980 xn = 1; 9981 } 9982 9983 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 9984 if (*prot && !xn) { 9985 *prot |= PAGE_EXEC; 9986 } 9987 /* We don't need to look the attribute up in the MAIR0/MAIR1 9988 * registers because that only tells us about cacheability. 9989 */ 9990 if (mregion) { 9991 *mregion = matchregion; 9992 } 9993 } 9994 9995 fi->type = ARMFault_Permission; 9996 fi->level = 1; 9997 /* 9998 * Core QEMU code can't handle execution from small pages yet, so 9999 * don't try it. This means any attempted execution will generate 10000 * an MPU exception, rather than eventually causing QEMU to exit in 10001 * get_page_addr_code(). 10002 */ 10003 if (*is_subpage && (*prot & PAGE_EXEC)) { 10004 qemu_log_mask(LOG_UNIMP, 10005 "MPU: No support for execution from regions " 10006 "smaller than 1K\n"); 10007 *prot &= ~PAGE_EXEC; 10008 } 10009 return !(*prot & (1 << access_type)); 10010 } 10011 10012 10013 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 10014 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10015 hwaddr *phys_ptr, MemTxAttrs *txattrs, 10016 int *prot, target_ulong *page_size, 10017 ARMMMUFaultInfo *fi) 10018 { 10019 uint32_t secure = regime_is_secure(env, mmu_idx); 10020 V8M_SAttributes sattrs = {}; 10021 bool ret; 10022 bool mpu_is_subpage; 10023 10024 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10025 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 10026 if (access_type == MMU_INST_FETCH) { 10027 /* Instruction fetches always use the MMU bank and the 10028 * transaction attribute determined by the fetch address, 10029 * regardless of CPU state. This is painful for QEMU 10030 * to handle, because it would mean we need to encode 10031 * into the mmu_idx not just the (user, negpri) information 10032 * for the current security state but also that for the 10033 * other security state, which would balloon the number 10034 * of mmu_idx values needed alarmingly. 10035 * Fortunately we can avoid this because it's not actually 10036 * possible to arbitrarily execute code from memory with 10037 * the wrong security attribute: it will always generate 10038 * an exception of some kind or another, apart from the 10039 * special case of an NS CPU executing an SG instruction 10040 * in S&NSC memory. So we always just fail the translation 10041 * here and sort things out in the exception handler 10042 * (including possibly emulating an SG instruction). 10043 */ 10044 if (sattrs.ns != !secure) { 10045 if (sattrs.nsc) { 10046 fi->type = ARMFault_QEMU_NSCExec; 10047 } else { 10048 fi->type = ARMFault_QEMU_SFault; 10049 } 10050 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10051 *phys_ptr = address; 10052 *prot = 0; 10053 return true; 10054 } 10055 } else { 10056 /* For data accesses we always use the MMU bank indicated 10057 * by the current CPU state, but the security attributes 10058 * might downgrade a secure access to nonsecure. 10059 */ 10060 if (sattrs.ns) { 10061 txattrs->secure = false; 10062 } else if (!secure) { 10063 /* NS access to S memory must fault. 10064 * Architecturally we should first check whether the 10065 * MPU information for this address indicates that we 10066 * are doing an unaligned access to Device memory, which 10067 * should generate a UsageFault instead. QEMU does not 10068 * currently check for that kind of unaligned access though. 10069 * If we added it we would need to do so as a special case 10070 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 10071 */ 10072 fi->type = ARMFault_QEMU_SFault; 10073 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 10074 *phys_ptr = address; 10075 *prot = 0; 10076 return true; 10077 } 10078 } 10079 } 10080 10081 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 10082 txattrs, prot, &mpu_is_subpage, fi, NULL); 10083 /* 10084 * TODO: this is a temporary hack to ignore the fact that the SAU region 10085 * is smaller than a page if this is an executable region. We never 10086 * supported small MPU regions, but we did (accidentally) allow small 10087 * SAU regions, and if we now made small SAU regions not be executable 10088 * then this would break previously working guest code. We can't 10089 * remove this until/unless we implement support for execution from 10090 * small regions. 10091 */ 10092 if (*prot & PAGE_EXEC) { 10093 sattrs.subpage = false; 10094 } 10095 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 10096 return ret; 10097 } 10098 10099 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 10100 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10101 hwaddr *phys_ptr, int *prot, 10102 ARMMMUFaultInfo *fi) 10103 { 10104 int n; 10105 uint32_t mask; 10106 uint32_t base; 10107 bool is_user = regime_is_user(env, mmu_idx); 10108 10109 if (regime_translation_disabled(env, mmu_idx)) { 10110 /* MPU disabled. */ 10111 *phys_ptr = address; 10112 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10113 return false; 10114 } 10115 10116 *phys_ptr = address; 10117 for (n = 7; n >= 0; n--) { 10118 base = env->cp15.c6_region[n]; 10119 if ((base & 1) == 0) { 10120 continue; 10121 } 10122 mask = 1 << ((base >> 1) & 0x1f); 10123 /* Keep this shift separate from the above to avoid an 10124 (undefined) << 32. */ 10125 mask = (mask << 1) - 1; 10126 if (((base ^ address) & ~mask) == 0) { 10127 break; 10128 } 10129 } 10130 if (n < 0) { 10131 fi->type = ARMFault_Background; 10132 return true; 10133 } 10134 10135 if (access_type == MMU_INST_FETCH) { 10136 mask = env->cp15.pmsav5_insn_ap; 10137 } else { 10138 mask = env->cp15.pmsav5_data_ap; 10139 } 10140 mask = (mask >> (n * 4)) & 0xf; 10141 switch (mask) { 10142 case 0: 10143 fi->type = ARMFault_Permission; 10144 fi->level = 1; 10145 return true; 10146 case 1: 10147 if (is_user) { 10148 fi->type = ARMFault_Permission; 10149 fi->level = 1; 10150 return true; 10151 } 10152 *prot = PAGE_READ | PAGE_WRITE; 10153 break; 10154 case 2: 10155 *prot = PAGE_READ; 10156 if (!is_user) { 10157 *prot |= PAGE_WRITE; 10158 } 10159 break; 10160 case 3: 10161 *prot = PAGE_READ | PAGE_WRITE; 10162 break; 10163 case 5: 10164 if (is_user) { 10165 fi->type = ARMFault_Permission; 10166 fi->level = 1; 10167 return true; 10168 } 10169 *prot = PAGE_READ; 10170 break; 10171 case 6: 10172 *prot = PAGE_READ; 10173 break; 10174 default: 10175 /* Bad permission. */ 10176 fi->type = ARMFault_Permission; 10177 fi->level = 1; 10178 return true; 10179 } 10180 *prot |= PAGE_EXEC; 10181 return false; 10182 } 10183 10184 /* Combine either inner or outer cacheability attributes for normal 10185 * memory, according to table D4-42 and pseudocode procedure 10186 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 10187 * 10188 * NB: only stage 1 includes allocation hints (RW bits), leading to 10189 * some asymmetry. 10190 */ 10191 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 10192 { 10193 if (s1 == 4 || s2 == 4) { 10194 /* non-cacheable has precedence */ 10195 return 4; 10196 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 10197 /* stage 1 write-through takes precedence */ 10198 return s1; 10199 } else if (extract32(s2, 2, 2) == 2) { 10200 /* stage 2 write-through takes precedence, but the allocation hint 10201 * is still taken from stage 1 10202 */ 10203 return (2 << 2) | extract32(s1, 0, 2); 10204 } else { /* write-back */ 10205 return s1; 10206 } 10207 } 10208 10209 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 10210 * and CombineS1S2Desc() 10211 * 10212 * @s1: Attributes from stage 1 walk 10213 * @s2: Attributes from stage 2 walk 10214 */ 10215 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 10216 { 10217 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 10218 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 10219 ARMCacheAttrs ret; 10220 10221 /* Combine shareability attributes (table D4-43) */ 10222 if (s1.shareability == 2 || s2.shareability == 2) { 10223 /* if either are outer-shareable, the result is outer-shareable */ 10224 ret.shareability = 2; 10225 } else if (s1.shareability == 3 || s2.shareability == 3) { 10226 /* if either are inner-shareable, the result is inner-shareable */ 10227 ret.shareability = 3; 10228 } else { 10229 /* both non-shareable */ 10230 ret.shareability = 0; 10231 } 10232 10233 /* Combine memory type and cacheability attributes */ 10234 if (s1hi == 0 || s2hi == 0) { 10235 /* Device has precedence over normal */ 10236 if (s1lo == 0 || s2lo == 0) { 10237 /* nGnRnE has precedence over anything */ 10238 ret.attrs = 0; 10239 } else if (s1lo == 4 || s2lo == 4) { 10240 /* non-Reordering has precedence over Reordering */ 10241 ret.attrs = 4; /* nGnRE */ 10242 } else if (s1lo == 8 || s2lo == 8) { 10243 /* non-Gathering has precedence over Gathering */ 10244 ret.attrs = 8; /* nGRE */ 10245 } else { 10246 ret.attrs = 0xc; /* GRE */ 10247 } 10248 10249 /* Any location for which the resultant memory type is any 10250 * type of Device memory is always treated as Outer Shareable. 10251 */ 10252 ret.shareability = 2; 10253 } else { /* Normal memory */ 10254 /* Outer/inner cacheability combine independently */ 10255 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 10256 | combine_cacheattr_nibble(s1lo, s2lo); 10257 10258 if (ret.attrs == 0x44) { 10259 /* Any location for which the resultant memory type is Normal 10260 * Inner Non-cacheable, Outer Non-cacheable is always treated 10261 * as Outer Shareable. 10262 */ 10263 ret.shareability = 2; 10264 } 10265 } 10266 10267 return ret; 10268 } 10269 10270 10271 /* get_phys_addr - get the physical address for this virtual address 10272 * 10273 * Find the physical address corresponding to the given virtual address, 10274 * by doing a translation table walk on MMU based systems or using the 10275 * MPU state on MPU based systems. 10276 * 10277 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 10278 * prot and page_size may not be filled in, and the populated fsr value provides 10279 * information on why the translation aborted, in the format of a 10280 * DFSR/IFSR fault register, with the following caveats: 10281 * * we honour the short vs long DFSR format differences. 10282 * * the WnR bit is never set (the caller must do this). 10283 * * for PSMAv5 based systems we don't bother to return a full FSR format 10284 * value. 10285 * 10286 * @env: CPUARMState 10287 * @address: virtual address to get physical address for 10288 * @access_type: 0 for read, 1 for write, 2 for execute 10289 * @mmu_idx: MMU index indicating required translation regime 10290 * @phys_ptr: set to the physical address corresponding to the virtual address 10291 * @attrs: set to the memory transaction attributes to use 10292 * @prot: set to the permissions for the page containing phys_ptr 10293 * @page_size: set to the size of the page containing phys_ptr 10294 * @fi: set to fault info if the translation fails 10295 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 10296 */ 10297 static bool get_phys_addr(CPUARMState *env, target_ulong address, 10298 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10299 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10300 target_ulong *page_size, 10301 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10302 { 10303 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 10304 /* Call ourselves recursively to do the stage 1 and then stage 2 10305 * translations. 10306 */ 10307 if (arm_feature(env, ARM_FEATURE_EL2)) { 10308 hwaddr ipa; 10309 int s2_prot; 10310 int ret; 10311 ARMCacheAttrs cacheattrs2 = {}; 10312 10313 ret = get_phys_addr(env, address, access_type, 10314 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 10315 prot, page_size, fi, cacheattrs); 10316 10317 /* If S1 fails or S2 is disabled, return early. */ 10318 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 10319 *phys_ptr = ipa; 10320 return ret; 10321 } 10322 10323 /* S1 is done. Now do S2 translation. */ 10324 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS, 10325 phys_ptr, attrs, &s2_prot, 10326 page_size, fi, 10327 cacheattrs != NULL ? &cacheattrs2 : NULL); 10328 fi->s2addr = ipa; 10329 /* Combine the S1 and S2 perms. */ 10330 *prot &= s2_prot; 10331 10332 /* Combine the S1 and S2 cache attributes, if needed */ 10333 if (!ret && cacheattrs != NULL) { 10334 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 10335 } 10336 10337 return ret; 10338 } else { 10339 /* 10340 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 10341 */ 10342 mmu_idx = stage_1_mmu_idx(mmu_idx); 10343 } 10344 } 10345 10346 /* The page table entries may downgrade secure to non-secure, but 10347 * cannot upgrade an non-secure translation regime's attributes 10348 * to secure. 10349 */ 10350 attrs->secure = regime_is_secure(env, mmu_idx); 10351 attrs->user = regime_is_user(env, mmu_idx); 10352 10353 /* Fast Context Switch Extension. This doesn't exist at all in v8. 10354 * In v7 and earlier it affects all stage 1 translations. 10355 */ 10356 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS 10357 && !arm_feature(env, ARM_FEATURE_V8)) { 10358 if (regime_el(env, mmu_idx) == 3) { 10359 address += env->cp15.fcseidr_s; 10360 } else { 10361 address += env->cp15.fcseidr_ns; 10362 } 10363 } 10364 10365 if (arm_feature(env, ARM_FEATURE_PMSA)) { 10366 bool ret; 10367 *page_size = TARGET_PAGE_SIZE; 10368 10369 if (arm_feature(env, ARM_FEATURE_V8)) { 10370 /* PMSAv8 */ 10371 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 10372 phys_ptr, attrs, prot, page_size, fi); 10373 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10374 /* PMSAv7 */ 10375 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 10376 phys_ptr, prot, page_size, fi); 10377 } else { 10378 /* Pre-v7 MPU */ 10379 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 10380 phys_ptr, prot, fi); 10381 } 10382 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 10383 " mmu_idx %u -> %s (prot %c%c%c)\n", 10384 access_type == MMU_DATA_LOAD ? "reading" : 10385 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 10386 (uint32_t)address, mmu_idx, 10387 ret ? "Miss" : "Hit", 10388 *prot & PAGE_READ ? 'r' : '-', 10389 *prot & PAGE_WRITE ? 'w' : '-', 10390 *prot & PAGE_EXEC ? 'x' : '-'); 10391 10392 return ret; 10393 } 10394 10395 /* Definitely a real MMU, not an MPU */ 10396 10397 if (regime_translation_disabled(env, mmu_idx)) { 10398 /* MMU disabled. */ 10399 *phys_ptr = address; 10400 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10401 *page_size = TARGET_PAGE_SIZE; 10402 return 0; 10403 } 10404 10405 if (regime_using_lpae_format(env, mmu_idx)) { 10406 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 10407 phys_ptr, attrs, prot, page_size, 10408 fi, cacheattrs); 10409 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 10410 return get_phys_addr_v6(env, address, access_type, mmu_idx, 10411 phys_ptr, attrs, prot, page_size, fi); 10412 } else { 10413 return get_phys_addr_v5(env, address, access_type, mmu_idx, 10414 phys_ptr, prot, page_size, fi); 10415 } 10416 } 10417 10418 /* Walk the page table and (if the mapping exists) add the page 10419 * to the TLB. Return false on success, or true on failure. Populate 10420 * fsr with ARM DFSR/IFSR fault register format value on failure. 10421 */ 10422 bool arm_tlb_fill(CPUState *cs, vaddr address, 10423 MMUAccessType access_type, int mmu_idx, 10424 ARMMMUFaultInfo *fi) 10425 { 10426 ARMCPU *cpu = ARM_CPU(cs); 10427 CPUARMState *env = &cpu->env; 10428 hwaddr phys_addr; 10429 target_ulong page_size; 10430 int prot; 10431 int ret; 10432 MemTxAttrs attrs = {}; 10433 10434 ret = get_phys_addr(env, address, access_type, 10435 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, 10436 &attrs, &prot, &page_size, fi, NULL); 10437 if (!ret) { 10438 /* 10439 * Map a single [sub]page. Regions smaller than our declared 10440 * target page size are handled specially, so for those we 10441 * pass in the exact addresses. 10442 */ 10443 if (page_size >= TARGET_PAGE_SIZE) { 10444 phys_addr &= TARGET_PAGE_MASK; 10445 address &= TARGET_PAGE_MASK; 10446 } 10447 tlb_set_page_with_attrs(cs, address, phys_addr, attrs, 10448 prot, mmu_idx, page_size); 10449 return 0; 10450 } 10451 10452 return ret; 10453 } 10454 10455 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 10456 MemTxAttrs *attrs) 10457 { 10458 ARMCPU *cpu = ARM_CPU(cs); 10459 CPUARMState *env = &cpu->env; 10460 hwaddr phys_addr; 10461 target_ulong page_size; 10462 int prot; 10463 bool ret; 10464 ARMMMUFaultInfo fi = {}; 10465 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 10466 10467 *attrs = (MemTxAttrs) {}; 10468 10469 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 10470 attrs, &prot, &page_size, &fi, NULL); 10471 10472 if (ret) { 10473 return -1; 10474 } 10475 return phys_addr; 10476 } 10477 10478 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 10479 { 10480 uint32_t mask; 10481 unsigned el = arm_current_el(env); 10482 10483 /* First handle registers which unprivileged can read */ 10484 10485 switch (reg) { 10486 case 0 ... 7: /* xPSR sub-fields */ 10487 mask = 0; 10488 if ((reg & 1) && el) { 10489 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ 10490 } 10491 if (!(reg & 4)) { 10492 mask |= XPSR_NZCV | XPSR_Q; /* APSR */ 10493 } 10494 /* EPSR reads as zero */ 10495 return xpsr_read(env) & mask; 10496 break; 10497 case 20: /* CONTROL */ 10498 return env->v7m.control[env->v7m.secure]; 10499 case 0x94: /* CONTROL_NS */ 10500 /* We have to handle this here because unprivileged Secure code 10501 * can read the NS CONTROL register. 10502 */ 10503 if (!env->v7m.secure) { 10504 return 0; 10505 } 10506 return env->v7m.control[M_REG_NS]; 10507 } 10508 10509 if (el == 0) { 10510 return 0; /* unprivileged reads others as zero */ 10511 } 10512 10513 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10514 switch (reg) { 10515 case 0x88: /* MSP_NS */ 10516 if (!env->v7m.secure) { 10517 return 0; 10518 } 10519 return env->v7m.other_ss_msp; 10520 case 0x89: /* PSP_NS */ 10521 if (!env->v7m.secure) { 10522 return 0; 10523 } 10524 return env->v7m.other_ss_psp; 10525 case 0x8a: /* MSPLIM_NS */ 10526 if (!env->v7m.secure) { 10527 return 0; 10528 } 10529 return env->v7m.msplim[M_REG_NS]; 10530 case 0x8b: /* PSPLIM_NS */ 10531 if (!env->v7m.secure) { 10532 return 0; 10533 } 10534 return env->v7m.psplim[M_REG_NS]; 10535 case 0x90: /* PRIMASK_NS */ 10536 if (!env->v7m.secure) { 10537 return 0; 10538 } 10539 return env->v7m.primask[M_REG_NS]; 10540 case 0x91: /* BASEPRI_NS */ 10541 if (!env->v7m.secure) { 10542 return 0; 10543 } 10544 return env->v7m.basepri[M_REG_NS]; 10545 case 0x93: /* FAULTMASK_NS */ 10546 if (!env->v7m.secure) { 10547 return 0; 10548 } 10549 return env->v7m.faultmask[M_REG_NS]; 10550 case 0x98: /* SP_NS */ 10551 { 10552 /* This gives the non-secure SP selected based on whether we're 10553 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10554 */ 10555 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10556 10557 if (!env->v7m.secure) { 10558 return 0; 10559 } 10560 if (!arm_v7m_is_handler_mode(env) && spsel) { 10561 return env->v7m.other_ss_psp; 10562 } else { 10563 return env->v7m.other_ss_msp; 10564 } 10565 } 10566 default: 10567 break; 10568 } 10569 } 10570 10571 switch (reg) { 10572 case 8: /* MSP */ 10573 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; 10574 case 9: /* PSP */ 10575 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; 10576 case 10: /* MSPLIM */ 10577 if (!arm_feature(env, ARM_FEATURE_V8)) { 10578 goto bad_reg; 10579 } 10580 return env->v7m.msplim[env->v7m.secure]; 10581 case 11: /* PSPLIM */ 10582 if (!arm_feature(env, ARM_FEATURE_V8)) { 10583 goto bad_reg; 10584 } 10585 return env->v7m.psplim[env->v7m.secure]; 10586 case 16: /* PRIMASK */ 10587 return env->v7m.primask[env->v7m.secure]; 10588 case 17: /* BASEPRI */ 10589 case 18: /* BASEPRI_MAX */ 10590 return env->v7m.basepri[env->v7m.secure]; 10591 case 19: /* FAULTMASK */ 10592 return env->v7m.faultmask[env->v7m.secure]; 10593 default: 10594 bad_reg: 10595 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" 10596 " register %d\n", reg); 10597 return 0; 10598 } 10599 } 10600 10601 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) 10602 { 10603 /* We're passed bits [11..0] of the instruction; extract 10604 * SYSm and the mask bits. 10605 * Invalid combinations of SYSm and mask are UNPREDICTABLE; 10606 * we choose to treat them as if the mask bits were valid. 10607 * NB that the pseudocode 'mask' variable is bits [11..10], 10608 * whereas ours is [11..8]. 10609 */ 10610 uint32_t mask = extract32(maskreg, 8, 4); 10611 uint32_t reg = extract32(maskreg, 0, 8); 10612 10613 if (arm_current_el(env) == 0 && reg > 7) { 10614 /* only xPSR sub-fields may be written by unprivileged */ 10615 return; 10616 } 10617 10618 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 10619 switch (reg) { 10620 case 0x88: /* MSP_NS */ 10621 if (!env->v7m.secure) { 10622 return; 10623 } 10624 env->v7m.other_ss_msp = val; 10625 return; 10626 case 0x89: /* PSP_NS */ 10627 if (!env->v7m.secure) { 10628 return; 10629 } 10630 env->v7m.other_ss_psp = val; 10631 return; 10632 case 0x8a: /* MSPLIM_NS */ 10633 if (!env->v7m.secure) { 10634 return; 10635 } 10636 env->v7m.msplim[M_REG_NS] = val & ~7; 10637 return; 10638 case 0x8b: /* PSPLIM_NS */ 10639 if (!env->v7m.secure) { 10640 return; 10641 } 10642 env->v7m.psplim[M_REG_NS] = val & ~7; 10643 return; 10644 case 0x90: /* PRIMASK_NS */ 10645 if (!env->v7m.secure) { 10646 return; 10647 } 10648 env->v7m.primask[M_REG_NS] = val & 1; 10649 return; 10650 case 0x91: /* BASEPRI_NS */ 10651 if (!env->v7m.secure) { 10652 return; 10653 } 10654 env->v7m.basepri[M_REG_NS] = val & 0xff; 10655 return; 10656 case 0x93: /* FAULTMASK_NS */ 10657 if (!env->v7m.secure) { 10658 return; 10659 } 10660 env->v7m.faultmask[M_REG_NS] = val & 1; 10661 return; 10662 case 0x94: /* CONTROL_NS */ 10663 if (!env->v7m.secure) { 10664 return; 10665 } 10666 write_v7m_control_spsel_for_secstate(env, 10667 val & R_V7M_CONTROL_SPSEL_MASK, 10668 M_REG_NS); 10669 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK; 10670 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK; 10671 return; 10672 case 0x98: /* SP_NS */ 10673 { 10674 /* This gives the non-secure SP selected based on whether we're 10675 * currently in handler mode or not, using the NS CONTROL.SPSEL. 10676 */ 10677 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 10678 10679 if (!env->v7m.secure) { 10680 return; 10681 } 10682 if (!arm_v7m_is_handler_mode(env) && spsel) { 10683 env->v7m.other_ss_psp = val; 10684 } else { 10685 env->v7m.other_ss_msp = val; 10686 } 10687 return; 10688 } 10689 default: 10690 break; 10691 } 10692 } 10693 10694 switch (reg) { 10695 case 0 ... 7: /* xPSR sub-fields */ 10696 /* only APSR is actually writable */ 10697 if (!(reg & 4)) { 10698 uint32_t apsrmask = 0; 10699 10700 if (mask & 8) { 10701 apsrmask |= XPSR_NZCV | XPSR_Q; 10702 } 10703 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { 10704 apsrmask |= XPSR_GE; 10705 } 10706 xpsr_write(env, val, apsrmask); 10707 } 10708 break; 10709 case 8: /* MSP */ 10710 if (v7m_using_psp(env)) { 10711 env->v7m.other_sp = val; 10712 } else { 10713 env->regs[13] = val; 10714 } 10715 break; 10716 case 9: /* PSP */ 10717 if (v7m_using_psp(env)) { 10718 env->regs[13] = val; 10719 } else { 10720 env->v7m.other_sp = val; 10721 } 10722 break; 10723 case 10: /* MSPLIM */ 10724 if (!arm_feature(env, ARM_FEATURE_V8)) { 10725 goto bad_reg; 10726 } 10727 env->v7m.msplim[env->v7m.secure] = val & ~7; 10728 break; 10729 case 11: /* PSPLIM */ 10730 if (!arm_feature(env, ARM_FEATURE_V8)) { 10731 goto bad_reg; 10732 } 10733 env->v7m.psplim[env->v7m.secure] = val & ~7; 10734 break; 10735 case 16: /* PRIMASK */ 10736 env->v7m.primask[env->v7m.secure] = val & 1; 10737 break; 10738 case 17: /* BASEPRI */ 10739 env->v7m.basepri[env->v7m.secure] = val & 0xff; 10740 break; 10741 case 18: /* BASEPRI_MAX */ 10742 val &= 0xff; 10743 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] 10744 || env->v7m.basepri[env->v7m.secure] == 0)) { 10745 env->v7m.basepri[env->v7m.secure] = val; 10746 } 10747 break; 10748 case 19: /* FAULTMASK */ 10749 env->v7m.faultmask[env->v7m.secure] = val & 1; 10750 break; 10751 case 20: /* CONTROL */ 10752 /* Writing to the SPSEL bit only has an effect if we are in 10753 * thread mode; other bits can be updated by any privileged code. 10754 * write_v7m_control_spsel() deals with updating the SPSEL bit in 10755 * env->v7m.control, so we only need update the others. 10756 * For v7M, we must just ignore explicit writes to SPSEL in handler 10757 * mode; for v8M the write is permitted but will have no effect. 10758 */ 10759 if (arm_feature(env, ARM_FEATURE_V8) || 10760 !arm_v7m_is_handler_mode(env)) { 10761 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); 10762 } 10763 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; 10764 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; 10765 break; 10766 default: 10767 bad_reg: 10768 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" 10769 " register %d\n", reg); 10770 return; 10771 } 10772 } 10773 10774 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 10775 { 10776 /* Implement the TT instruction. op is bits [7:6] of the insn. */ 10777 bool forceunpriv = op & 1; 10778 bool alt = op & 2; 10779 V8M_SAttributes sattrs = {}; 10780 uint32_t tt_resp; 10781 bool r, rw, nsr, nsrw, mrvalid; 10782 int prot; 10783 ARMMMUFaultInfo fi = {}; 10784 MemTxAttrs attrs = {}; 10785 hwaddr phys_addr; 10786 ARMMMUIdx mmu_idx; 10787 uint32_t mregion; 10788 bool targetpriv; 10789 bool targetsec = env->v7m.secure; 10790 bool is_subpage; 10791 10792 /* Work out what the security state and privilege level we're 10793 * interested in is... 10794 */ 10795 if (alt) { 10796 targetsec = !targetsec; 10797 } 10798 10799 if (forceunpriv) { 10800 targetpriv = false; 10801 } else { 10802 targetpriv = arm_v7m_is_handler_mode(env) || 10803 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); 10804 } 10805 10806 /* ...and then figure out which MMU index this is */ 10807 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); 10808 10809 /* We know that the MPU and SAU don't care about the access type 10810 * for our purposes beyond that we don't want to claim to be 10811 * an insn fetch, so we arbitrarily call this a read. 10812 */ 10813 10814 /* MPU region info only available for privileged or if 10815 * inspecting the other MPU state. 10816 */ 10817 if (arm_current_el(env) != 0 || alt) { 10818 /* We can ignore the return value as prot is always set */ 10819 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, 10820 &phys_addr, &attrs, &prot, &is_subpage, 10821 &fi, &mregion); 10822 if (mregion == -1) { 10823 mrvalid = false; 10824 mregion = 0; 10825 } else { 10826 mrvalid = true; 10827 } 10828 r = prot & PAGE_READ; 10829 rw = prot & PAGE_WRITE; 10830 } else { 10831 r = false; 10832 rw = false; 10833 mrvalid = false; 10834 mregion = 0; 10835 } 10836 10837 if (env->v7m.secure) { 10838 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 10839 nsr = sattrs.ns && r; 10840 nsrw = sattrs.ns && rw; 10841 } else { 10842 sattrs.ns = true; 10843 nsr = false; 10844 nsrw = false; 10845 } 10846 10847 tt_resp = (sattrs.iregion << 24) | 10848 (sattrs.irvalid << 23) | 10849 ((!sattrs.ns) << 22) | 10850 (nsrw << 21) | 10851 (nsr << 20) | 10852 (rw << 19) | 10853 (r << 18) | 10854 (sattrs.srvalid << 17) | 10855 (mrvalid << 16) | 10856 (sattrs.sregion << 8) | 10857 mregion; 10858 10859 return tt_resp; 10860 } 10861 10862 #endif 10863 10864 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) 10865 { 10866 /* Implement DC ZVA, which zeroes a fixed-length block of memory. 10867 * Note that we do not implement the (architecturally mandated) 10868 * alignment fault for attempts to use this on Device memory 10869 * (which matches the usual QEMU behaviour of not implementing either 10870 * alignment faults or any memory attribute handling). 10871 */ 10872 10873 ARMCPU *cpu = arm_env_get_cpu(env); 10874 uint64_t blocklen = 4 << cpu->dcz_blocksize; 10875 uint64_t vaddr = vaddr_in & ~(blocklen - 1); 10876 10877 #ifndef CONFIG_USER_ONLY 10878 { 10879 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than 10880 * the block size so we might have to do more than one TLB lookup. 10881 * We know that in fact for any v8 CPU the page size is at least 4K 10882 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only 10883 * 1K as an artefact of legacy v5 subpage support being present in the 10884 * same QEMU executable. 10885 */ 10886 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); 10887 void *hostaddr[maxidx]; 10888 int try, i; 10889 unsigned mmu_idx = cpu_mmu_index(env, false); 10890 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 10891 10892 for (try = 0; try < 2; try++) { 10893 10894 for (i = 0; i < maxidx; i++) { 10895 hostaddr[i] = tlb_vaddr_to_host(env, 10896 vaddr + TARGET_PAGE_SIZE * i, 10897 1, mmu_idx); 10898 if (!hostaddr[i]) { 10899 break; 10900 } 10901 } 10902 if (i == maxidx) { 10903 /* If it's all in the TLB it's fair game for just writing to; 10904 * we know we don't need to update dirty status, etc. 10905 */ 10906 for (i = 0; i < maxidx - 1; i++) { 10907 memset(hostaddr[i], 0, TARGET_PAGE_SIZE); 10908 } 10909 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); 10910 return; 10911 } 10912 /* OK, try a store and see if we can populate the tlb. This 10913 * might cause an exception if the memory isn't writable, 10914 * in which case we will longjmp out of here. We must for 10915 * this purpose use the actual register value passed to us 10916 * so that we get the fault address right. 10917 */ 10918 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); 10919 /* Now we can populate the other TLB entries, if any */ 10920 for (i = 0; i < maxidx; i++) { 10921 uint64_t va = vaddr + TARGET_PAGE_SIZE * i; 10922 if (va != (vaddr_in & TARGET_PAGE_MASK)) { 10923 helper_ret_stb_mmu(env, va, 0, oi, GETPC()); 10924 } 10925 } 10926 } 10927 10928 /* Slow path (probably attempt to do this to an I/O device or 10929 * similar, or clearing of a block of code we have translations 10930 * cached for). Just do a series of byte writes as the architecture 10931 * demands. It's not worth trying to use a cpu_physical_memory_map(), 10932 * memset(), unmap() sequence here because: 10933 * + we'd need to account for the blocksize being larger than a page 10934 * + the direct-RAM access case is almost always going to be dealt 10935 * with in the fastpath code above, so there's no speed benefit 10936 * + we would have to deal with the map returning NULL because the 10937 * bounce buffer was in use 10938 */ 10939 for (i = 0; i < blocklen; i++) { 10940 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); 10941 } 10942 } 10943 #else 10944 memset(g2h(vaddr), 0, blocklen); 10945 #endif 10946 } 10947 10948 /* Note that signed overflow is undefined in C. The following routines are 10949 careful to use unsigned types where modulo arithmetic is required. 10950 Failure to do so _will_ break on newer gcc. */ 10951 10952 /* Signed saturating arithmetic. */ 10953 10954 /* Perform 16-bit signed saturating addition. */ 10955 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 10956 { 10957 uint16_t res; 10958 10959 res = a + b; 10960 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 10961 if (a & 0x8000) 10962 res = 0x8000; 10963 else 10964 res = 0x7fff; 10965 } 10966 return res; 10967 } 10968 10969 /* Perform 8-bit signed saturating addition. */ 10970 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 10971 { 10972 uint8_t res; 10973 10974 res = a + b; 10975 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 10976 if (a & 0x80) 10977 res = 0x80; 10978 else 10979 res = 0x7f; 10980 } 10981 return res; 10982 } 10983 10984 /* Perform 16-bit signed saturating subtraction. */ 10985 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 10986 { 10987 uint16_t res; 10988 10989 res = a - b; 10990 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 10991 if (a & 0x8000) 10992 res = 0x8000; 10993 else 10994 res = 0x7fff; 10995 } 10996 return res; 10997 } 10998 10999 /* Perform 8-bit signed saturating subtraction. */ 11000 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 11001 { 11002 uint8_t res; 11003 11004 res = a - b; 11005 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 11006 if (a & 0x80) 11007 res = 0x80; 11008 else 11009 res = 0x7f; 11010 } 11011 return res; 11012 } 11013 11014 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 11015 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 11016 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 11017 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 11018 #define PFX q 11019 11020 #include "op_addsub.h" 11021 11022 /* Unsigned saturating arithmetic. */ 11023 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 11024 { 11025 uint16_t res; 11026 res = a + b; 11027 if (res < a) 11028 res = 0xffff; 11029 return res; 11030 } 11031 11032 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 11033 { 11034 if (a > b) 11035 return a - b; 11036 else 11037 return 0; 11038 } 11039 11040 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 11041 { 11042 uint8_t res; 11043 res = a + b; 11044 if (res < a) 11045 res = 0xff; 11046 return res; 11047 } 11048 11049 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 11050 { 11051 if (a > b) 11052 return a - b; 11053 else 11054 return 0; 11055 } 11056 11057 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 11058 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 11059 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 11060 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 11061 #define PFX uq 11062 11063 #include "op_addsub.h" 11064 11065 /* Signed modulo arithmetic. */ 11066 #define SARITH16(a, b, n, op) do { \ 11067 int32_t sum; \ 11068 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 11069 RESULT(sum, n, 16); \ 11070 if (sum >= 0) \ 11071 ge |= 3 << (n * 2); \ 11072 } while(0) 11073 11074 #define SARITH8(a, b, n, op) do { \ 11075 int32_t sum; \ 11076 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 11077 RESULT(sum, n, 8); \ 11078 if (sum >= 0) \ 11079 ge |= 1 << n; \ 11080 } while(0) 11081 11082 11083 #define ADD16(a, b, n) SARITH16(a, b, n, +) 11084 #define SUB16(a, b, n) SARITH16(a, b, n, -) 11085 #define ADD8(a, b, n) SARITH8(a, b, n, +) 11086 #define SUB8(a, b, n) SARITH8(a, b, n, -) 11087 #define PFX s 11088 #define ARITH_GE 11089 11090 #include "op_addsub.h" 11091 11092 /* Unsigned modulo arithmetic. */ 11093 #define ADD16(a, b, n) do { \ 11094 uint32_t sum; \ 11095 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 11096 RESULT(sum, n, 16); \ 11097 if ((sum >> 16) == 1) \ 11098 ge |= 3 << (n * 2); \ 11099 } while(0) 11100 11101 #define ADD8(a, b, n) do { \ 11102 uint32_t sum; \ 11103 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 11104 RESULT(sum, n, 8); \ 11105 if ((sum >> 8) == 1) \ 11106 ge |= 1 << n; \ 11107 } while(0) 11108 11109 #define SUB16(a, b, n) do { \ 11110 uint32_t sum; \ 11111 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 11112 RESULT(sum, n, 16); \ 11113 if ((sum >> 16) == 0) \ 11114 ge |= 3 << (n * 2); \ 11115 } while(0) 11116 11117 #define SUB8(a, b, n) do { \ 11118 uint32_t sum; \ 11119 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 11120 RESULT(sum, n, 8); \ 11121 if ((sum >> 8) == 0) \ 11122 ge |= 1 << n; \ 11123 } while(0) 11124 11125 #define PFX u 11126 #define ARITH_GE 11127 11128 #include "op_addsub.h" 11129 11130 /* Halved signed arithmetic. */ 11131 #define ADD16(a, b, n) \ 11132 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 11133 #define SUB16(a, b, n) \ 11134 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 11135 #define ADD8(a, b, n) \ 11136 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 11137 #define SUB8(a, b, n) \ 11138 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 11139 #define PFX sh 11140 11141 #include "op_addsub.h" 11142 11143 /* Halved unsigned arithmetic. */ 11144 #define ADD16(a, b, n) \ 11145 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11146 #define SUB16(a, b, n) \ 11147 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11148 #define ADD8(a, b, n) \ 11149 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11150 #define SUB8(a, b, n) \ 11151 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11152 #define PFX uh 11153 11154 #include "op_addsub.h" 11155 11156 static inline uint8_t do_usad(uint8_t a, uint8_t b) 11157 { 11158 if (a > b) 11159 return a - b; 11160 else 11161 return b - a; 11162 } 11163 11164 /* Unsigned sum of absolute byte differences. */ 11165 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 11166 { 11167 uint32_t sum; 11168 sum = do_usad(a, b); 11169 sum += do_usad(a >> 8, b >> 8); 11170 sum += do_usad(a >> 16, b >>16); 11171 sum += do_usad(a >> 24, b >> 24); 11172 return sum; 11173 } 11174 11175 /* For ARMv6 SEL instruction. */ 11176 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 11177 { 11178 uint32_t mask; 11179 11180 mask = 0; 11181 if (flags & 1) 11182 mask |= 0xff; 11183 if (flags & 2) 11184 mask |= 0xff00; 11185 if (flags & 4) 11186 mask |= 0xff0000; 11187 if (flags & 8) 11188 mask |= 0xff000000; 11189 return (a & mask) | (b & ~mask); 11190 } 11191 11192 /* VFP support. We follow the convention used for VFP instructions: 11193 Single precision routines have a "s" suffix, double precision a 11194 "d" suffix. */ 11195 11196 /* Convert host exception flags to vfp form. */ 11197 static inline int vfp_exceptbits_from_host(int host_bits) 11198 { 11199 int target_bits = 0; 11200 11201 if (host_bits & float_flag_invalid) 11202 target_bits |= 1; 11203 if (host_bits & float_flag_divbyzero) 11204 target_bits |= 2; 11205 if (host_bits & float_flag_overflow) 11206 target_bits |= 4; 11207 if (host_bits & (float_flag_underflow | float_flag_output_denormal)) 11208 target_bits |= 8; 11209 if (host_bits & float_flag_inexact) 11210 target_bits |= 0x10; 11211 if (host_bits & float_flag_input_denormal) 11212 target_bits |= 0x80; 11213 return target_bits; 11214 } 11215 11216 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) 11217 { 11218 int i; 11219 uint32_t fpscr; 11220 11221 fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) 11222 | (env->vfp.vec_len << 16) 11223 | (env->vfp.vec_stride << 20); 11224 i = get_float_exception_flags(&env->vfp.fp_status); 11225 i |= get_float_exception_flags(&env->vfp.standard_fp_status); 11226 i |= get_float_exception_flags(&env->vfp.fp_status_f16); 11227 fpscr |= vfp_exceptbits_from_host(i); 11228 return fpscr; 11229 } 11230 11231 uint32_t vfp_get_fpscr(CPUARMState *env) 11232 { 11233 return HELPER(vfp_get_fpscr)(env); 11234 } 11235 11236 /* Convert vfp exception flags to target form. */ 11237 static inline int vfp_exceptbits_to_host(int target_bits) 11238 { 11239 int host_bits = 0; 11240 11241 if (target_bits & 1) 11242 host_bits |= float_flag_invalid; 11243 if (target_bits & 2) 11244 host_bits |= float_flag_divbyzero; 11245 if (target_bits & 4) 11246 host_bits |= float_flag_overflow; 11247 if (target_bits & 8) 11248 host_bits |= float_flag_underflow; 11249 if (target_bits & 0x10) 11250 host_bits |= float_flag_inexact; 11251 if (target_bits & 0x80) 11252 host_bits |= float_flag_input_denormal; 11253 return host_bits; 11254 } 11255 11256 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) 11257 { 11258 int i; 11259 uint32_t changed; 11260 11261 changed = env->vfp.xregs[ARM_VFP_FPSCR]; 11262 env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff); 11263 env->vfp.vec_len = (val >> 16) & 7; 11264 env->vfp.vec_stride = (val >> 20) & 3; 11265 11266 changed ^= val; 11267 if (changed & (3 << 22)) { 11268 i = (val >> 22) & 3; 11269 switch (i) { 11270 case FPROUNDING_TIEEVEN: 11271 i = float_round_nearest_even; 11272 break; 11273 case FPROUNDING_POSINF: 11274 i = float_round_up; 11275 break; 11276 case FPROUNDING_NEGINF: 11277 i = float_round_down; 11278 break; 11279 case FPROUNDING_ZERO: 11280 i = float_round_to_zero; 11281 break; 11282 } 11283 set_float_rounding_mode(i, &env->vfp.fp_status); 11284 set_float_rounding_mode(i, &env->vfp.fp_status_f16); 11285 } 11286 if (changed & FPCR_FZ16) { 11287 bool ftz_enabled = val & FPCR_FZ16; 11288 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 11289 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 11290 } 11291 if (changed & FPCR_FZ) { 11292 bool ftz_enabled = val & FPCR_FZ; 11293 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); 11294 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); 11295 } 11296 if (changed & FPCR_DN) { 11297 bool dnan_enabled = val & FPCR_DN; 11298 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); 11299 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); 11300 } 11301 11302 /* The exception flags are ORed together when we read fpscr so we 11303 * only need to preserve the current state in one of our 11304 * float_status values. 11305 */ 11306 i = vfp_exceptbits_to_host(val); 11307 set_float_exception_flags(i, &env->vfp.fp_status); 11308 set_float_exception_flags(0, &env->vfp.fp_status_f16); 11309 set_float_exception_flags(0, &env->vfp.standard_fp_status); 11310 } 11311 11312 void vfp_set_fpscr(CPUARMState *env, uint32_t val) 11313 { 11314 HELPER(vfp_set_fpscr)(env, val); 11315 } 11316 11317 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) 11318 11319 #define VFP_BINOP(name) \ 11320 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ 11321 { \ 11322 float_status *fpst = fpstp; \ 11323 return float32_ ## name(a, b, fpst); \ 11324 } \ 11325 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ 11326 { \ 11327 float_status *fpst = fpstp; \ 11328 return float64_ ## name(a, b, fpst); \ 11329 } 11330 VFP_BINOP(add) 11331 VFP_BINOP(sub) 11332 VFP_BINOP(mul) 11333 VFP_BINOP(div) 11334 VFP_BINOP(min) 11335 VFP_BINOP(max) 11336 VFP_BINOP(minnum) 11337 VFP_BINOP(maxnum) 11338 #undef VFP_BINOP 11339 11340 float32 VFP_HELPER(neg, s)(float32 a) 11341 { 11342 return float32_chs(a); 11343 } 11344 11345 float64 VFP_HELPER(neg, d)(float64 a) 11346 { 11347 return float64_chs(a); 11348 } 11349 11350 float32 VFP_HELPER(abs, s)(float32 a) 11351 { 11352 return float32_abs(a); 11353 } 11354 11355 float64 VFP_HELPER(abs, d)(float64 a) 11356 { 11357 return float64_abs(a); 11358 } 11359 11360 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) 11361 { 11362 return float32_sqrt(a, &env->vfp.fp_status); 11363 } 11364 11365 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) 11366 { 11367 return float64_sqrt(a, &env->vfp.fp_status); 11368 } 11369 11370 /* XXX: check quiet/signaling case */ 11371 #define DO_VFP_cmp(p, type) \ 11372 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ 11373 { \ 11374 uint32_t flags; \ 11375 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \ 11376 case 0: flags = 0x6; break; \ 11377 case -1: flags = 0x8; break; \ 11378 case 1: flags = 0x2; break; \ 11379 default: case 2: flags = 0x3; break; \ 11380 } \ 11381 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11382 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11383 } \ 11384 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ 11385 { \ 11386 uint32_t flags; \ 11387 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \ 11388 case 0: flags = 0x6; break; \ 11389 case -1: flags = 0x8; break; \ 11390 case 1: flags = 0x2; break; \ 11391 default: case 2: flags = 0x3; break; \ 11392 } \ 11393 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \ 11394 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \ 11395 } 11396 DO_VFP_cmp(s, float32) 11397 DO_VFP_cmp(d, float64) 11398 #undef DO_VFP_cmp 11399 11400 /* Integer to float and float to integer conversions */ 11401 11402 #define CONV_ITOF(name, ftype, fsz, sign) \ 11403 ftype HELPER(name)(uint32_t x, void *fpstp) \ 11404 { \ 11405 float_status *fpst = fpstp; \ 11406 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ 11407 } 11408 11409 #define CONV_FTOI(name, ftype, fsz, sign, round) \ 11410 uint32_t HELPER(name)(ftype x, void *fpstp) \ 11411 { \ 11412 float_status *fpst = fpstp; \ 11413 if (float##fsz##_is_any_nan(x)) { \ 11414 float_raise(float_flag_invalid, fpst); \ 11415 return 0; \ 11416 } \ 11417 return float##fsz##_to_##sign##int32##round(x, fpst); \ 11418 } 11419 11420 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \ 11421 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ 11422 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ 11423 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) 11424 11425 FLOAT_CONVS(si, h, uint32_t, 16, ) 11426 FLOAT_CONVS(si, s, float32, 32, ) 11427 FLOAT_CONVS(si, d, float64, 64, ) 11428 FLOAT_CONVS(ui, h, uint32_t, 16, u) 11429 FLOAT_CONVS(ui, s, float32, 32, u) 11430 FLOAT_CONVS(ui, d, float64, 64, u) 11431 11432 #undef CONV_ITOF 11433 #undef CONV_FTOI 11434 #undef FLOAT_CONVS 11435 11436 /* floating point conversion */ 11437 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) 11438 { 11439 return float32_to_float64(x, &env->vfp.fp_status); 11440 } 11441 11442 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) 11443 { 11444 return float64_to_float32(x, &env->vfp.fp_status); 11445 } 11446 11447 /* VFP3 fixed point conversion. */ 11448 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11449 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ 11450 void *fpstp) \ 11451 { \ 11452 float_status *fpst = fpstp; \ 11453 float##fsz tmp; \ 11454 tmp = itype##_to_##float##fsz(x, fpst); \ 11455 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \ 11456 } 11457 11458 /* Notice that we want only input-denormal exception flags from the 11459 * scalbn operation: the other possible flags (overflow+inexact if 11460 * we overflow to infinity, output-denormal) aren't correct for the 11461 * complete scale-and-convert operation. 11462 */ 11463 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \ 11464 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \ 11465 uint32_t shift, \ 11466 void *fpstp) \ 11467 { \ 11468 float_status *fpst = fpstp; \ 11469 int old_exc_flags = get_float_exception_flags(fpst); \ 11470 float##fsz tmp; \ 11471 if (float##fsz##_is_any_nan(x)) { \ 11472 float_raise(float_flag_invalid, fpst); \ 11473 return 0; \ 11474 } \ 11475 tmp = float##fsz##_scalbn(x, shift, fpst); \ 11476 old_exc_flags |= get_float_exception_flags(fpst) \ 11477 & float_flag_input_denormal; \ 11478 set_float_exception_flags(old_exc_flags, fpst); \ 11479 return float##fsz##_to_##itype##round(tmp, fpst); \ 11480 } 11481 11482 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ 11483 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11484 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \ 11485 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) 11486 11487 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ 11488 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 11489 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ) 11490 11491 VFP_CONV_FIX(sh, d, 64, 64, int16) 11492 VFP_CONV_FIX(sl, d, 64, 64, int32) 11493 VFP_CONV_FIX_A64(sq, d, 64, 64, int64) 11494 VFP_CONV_FIX(uh, d, 64, 64, uint16) 11495 VFP_CONV_FIX(ul, d, 64, 64, uint32) 11496 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) 11497 VFP_CONV_FIX(sh, s, 32, 32, int16) 11498 VFP_CONV_FIX(sl, s, 32, 32, int32) 11499 VFP_CONV_FIX_A64(sq, s, 32, 64, int64) 11500 VFP_CONV_FIX(uh, s, 32, 32, uint16) 11501 VFP_CONV_FIX(ul, s, 32, 32, uint32) 11502 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) 11503 11504 #undef VFP_CONV_FIX 11505 #undef VFP_CONV_FIX_FLOAT 11506 #undef VFP_CONV_FLOAT_FIX_ROUND 11507 #undef VFP_CONV_FIX_A64 11508 11509 /* Conversion to/from f16 can overflow to infinity before/after scaling. 11510 * Therefore we convert to f64, scale, and then convert f64 to f16; or 11511 * vice versa for conversion to integer. 11512 * 11513 * For 16- and 32-bit integers, the conversion to f64 never rounds. 11514 * For 64-bit integers, any integer that would cause rounding will also 11515 * overflow to f16 infinity, so there is no double rounding problem. 11516 */ 11517 11518 static float16 do_postscale_fp16(float64 f, int shift, float_status *fpst) 11519 { 11520 return float64_to_float16(float64_scalbn(f, -shift, fpst), true, fpst); 11521 } 11522 11523 uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) 11524 { 11525 return do_postscale_fp16(int32_to_float64(x, fpst), shift, fpst); 11526 } 11527 11528 uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) 11529 { 11530 return do_postscale_fp16(uint32_to_float64(x, fpst), shift, fpst); 11531 } 11532 11533 uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) 11534 { 11535 return do_postscale_fp16(int64_to_float64(x, fpst), shift, fpst); 11536 } 11537 11538 uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) 11539 { 11540 return do_postscale_fp16(uint64_to_float64(x, fpst), shift, fpst); 11541 } 11542 11543 static float64 do_prescale_fp16(float16 f, int shift, float_status *fpst) 11544 { 11545 if (unlikely(float16_is_any_nan(f))) { 11546 float_raise(float_flag_invalid, fpst); 11547 return 0; 11548 } else { 11549 int old_exc_flags = get_float_exception_flags(fpst); 11550 float64 ret; 11551 11552 ret = float16_to_float64(f, true, fpst); 11553 ret = float64_scalbn(ret, shift, fpst); 11554 old_exc_flags |= get_float_exception_flags(fpst) 11555 & float_flag_input_denormal; 11556 set_float_exception_flags(old_exc_flags, fpst); 11557 11558 return ret; 11559 } 11560 } 11561 11562 uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) 11563 { 11564 return float64_to_int16(do_prescale_fp16(x, shift, fpst), fpst); 11565 } 11566 11567 uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) 11568 { 11569 return float64_to_uint16(do_prescale_fp16(x, shift, fpst), fpst); 11570 } 11571 11572 uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) 11573 { 11574 return float64_to_int32(do_prescale_fp16(x, shift, fpst), fpst); 11575 } 11576 11577 uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) 11578 { 11579 return float64_to_uint32(do_prescale_fp16(x, shift, fpst), fpst); 11580 } 11581 11582 uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) 11583 { 11584 return float64_to_int64(do_prescale_fp16(x, shift, fpst), fpst); 11585 } 11586 11587 uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) 11588 { 11589 return float64_to_uint64(do_prescale_fp16(x, shift, fpst), fpst); 11590 } 11591 11592 /* Set the current fp rounding mode and return the old one. 11593 * The argument is a softfloat float_round_ value. 11594 */ 11595 uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) 11596 { 11597 float_status *fp_status = fpstp; 11598 11599 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11600 set_float_rounding_mode(rmode, fp_status); 11601 11602 return prev_rmode; 11603 } 11604 11605 /* Set the current fp rounding mode in the standard fp status and return 11606 * the old one. This is for NEON instructions that need to change the 11607 * rounding mode but wish to use the standard FPSCR values for everything 11608 * else. Always set the rounding mode back to the correct value after 11609 * modifying it. 11610 * The argument is a softfloat float_round_ value. 11611 */ 11612 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) 11613 { 11614 float_status *fp_status = &env->vfp.standard_fp_status; 11615 11616 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 11617 set_float_rounding_mode(rmode, fp_status); 11618 11619 return prev_rmode; 11620 } 11621 11622 /* Half precision conversions. */ 11623 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) 11624 { 11625 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11626 * it would affect flushing input denormals. 11627 */ 11628 float_status *fpst = fpstp; 11629 flag save = get_flush_inputs_to_zero(fpst); 11630 set_flush_inputs_to_zero(false, fpst); 11631 float32 r = float16_to_float32(a, !ahp_mode, fpst); 11632 set_flush_inputs_to_zero(save, fpst); 11633 return r; 11634 } 11635 11636 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) 11637 { 11638 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11639 * it would affect flushing output denormals. 11640 */ 11641 float_status *fpst = fpstp; 11642 flag save = get_flush_to_zero(fpst); 11643 set_flush_to_zero(false, fpst); 11644 float16 r = float32_to_float16(a, !ahp_mode, fpst); 11645 set_flush_to_zero(save, fpst); 11646 return r; 11647 } 11648 11649 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) 11650 { 11651 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11652 * it would affect flushing input denormals. 11653 */ 11654 float_status *fpst = fpstp; 11655 flag save = get_flush_inputs_to_zero(fpst); 11656 set_flush_inputs_to_zero(false, fpst); 11657 float64 r = float16_to_float64(a, !ahp_mode, fpst); 11658 set_flush_inputs_to_zero(save, fpst); 11659 return r; 11660 } 11661 11662 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) 11663 { 11664 /* Squash FZ16 to 0 for the duration of conversion. In this case, 11665 * it would affect flushing output denormals. 11666 */ 11667 float_status *fpst = fpstp; 11668 flag save = get_flush_to_zero(fpst); 11669 set_flush_to_zero(false, fpst); 11670 float16 r = float64_to_float16(a, !ahp_mode, fpst); 11671 set_flush_to_zero(save, fpst); 11672 return r; 11673 } 11674 11675 #define float32_two make_float32(0x40000000) 11676 #define float32_three make_float32(0x40400000) 11677 #define float32_one_point_five make_float32(0x3fc00000) 11678 11679 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) 11680 { 11681 float_status *s = &env->vfp.standard_fp_status; 11682 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 11683 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 11684 if (!(float32_is_zero(a) || float32_is_zero(b))) { 11685 float_raise(float_flag_input_denormal, s); 11686 } 11687 return float32_two; 11688 } 11689 return float32_sub(float32_two, float32_mul(a, b, s), s); 11690 } 11691 11692 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) 11693 { 11694 float_status *s = &env->vfp.standard_fp_status; 11695 float32 product; 11696 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 11697 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 11698 if (!(float32_is_zero(a) || float32_is_zero(b))) { 11699 float_raise(float_flag_input_denormal, s); 11700 } 11701 return float32_one_point_five; 11702 } 11703 product = float32_mul(a, b, s); 11704 return float32_div(float32_sub(float32_three, product, s), float32_two, s); 11705 } 11706 11707 /* NEON helpers. */ 11708 11709 /* Constants 256 and 512 are used in some helpers; we avoid relying on 11710 * int->float conversions at run-time. */ 11711 #define float64_256 make_float64(0x4070000000000000LL) 11712 #define float64_512 make_float64(0x4080000000000000LL) 11713 #define float16_maxnorm make_float16(0x7bff) 11714 #define float32_maxnorm make_float32(0x7f7fffff) 11715 #define float64_maxnorm make_float64(0x7fefffffffffffffLL) 11716 11717 /* Reciprocal functions 11718 * 11719 * The algorithm that must be used to calculate the estimate 11720 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate 11721 */ 11722 11723 /* See RecipEstimate() 11724 * 11725 * input is a 9 bit fixed point number 11726 * input range 256 .. 511 for a number from 0.5 <= x < 1.0. 11727 * result range 256 .. 511 for a number from 1.0 to 511/256. 11728 */ 11729 11730 static int recip_estimate(int input) 11731 { 11732 int a, b, r; 11733 assert(256 <= input && input < 512); 11734 a = (input * 2) + 1; 11735 b = (1 << 19) / a; 11736 r = (b + 1) >> 1; 11737 assert(256 <= r && r < 512); 11738 return r; 11739 } 11740 11741 /* 11742 * Common wrapper to call recip_estimate 11743 * 11744 * The parameters are exponent and 64 bit fraction (without implicit 11745 * bit) where the binary point is nominally at bit 52. Returns a 11746 * float64 which can then be rounded to the appropriate size by the 11747 * callee. 11748 */ 11749 11750 static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) 11751 { 11752 uint32_t scaled, estimate; 11753 uint64_t result_frac; 11754 int result_exp; 11755 11756 /* Handle sub-normals */ 11757 if (*exp == 0) { 11758 if (extract64(frac, 51, 1) == 0) { 11759 *exp = -1; 11760 frac <<= 2; 11761 } else { 11762 frac <<= 1; 11763 } 11764 } 11765 11766 /* scaled = UInt('1':fraction<51:44>) */ 11767 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 11768 estimate = recip_estimate(scaled); 11769 11770 result_exp = exp_off - *exp; 11771 result_frac = deposit64(0, 44, 8, estimate); 11772 if (result_exp == 0) { 11773 result_frac = deposit64(result_frac >> 1, 51, 1, 1); 11774 } else if (result_exp == -1) { 11775 result_frac = deposit64(result_frac >> 2, 50, 2, 1); 11776 result_exp = 0; 11777 } 11778 11779 *exp = result_exp; 11780 11781 return result_frac; 11782 } 11783 11784 static bool round_to_inf(float_status *fpst, bool sign_bit) 11785 { 11786 switch (fpst->float_rounding_mode) { 11787 case float_round_nearest_even: /* Round to Nearest */ 11788 return true; 11789 case float_round_up: /* Round to +Inf */ 11790 return !sign_bit; 11791 case float_round_down: /* Round to -Inf */ 11792 return sign_bit; 11793 case float_round_to_zero: /* Round to Zero */ 11794 return false; 11795 } 11796 11797 g_assert_not_reached(); 11798 } 11799 11800 uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) 11801 { 11802 float_status *fpst = fpstp; 11803 float16 f16 = float16_squash_input_denormal(input, fpst); 11804 uint32_t f16_val = float16_val(f16); 11805 uint32_t f16_sign = float16_is_neg(f16); 11806 int f16_exp = extract32(f16_val, 10, 5); 11807 uint32_t f16_frac = extract32(f16_val, 0, 10); 11808 uint64_t f64_frac; 11809 11810 if (float16_is_any_nan(f16)) { 11811 float16 nan = f16; 11812 if (float16_is_signaling_nan(f16, fpst)) { 11813 float_raise(float_flag_invalid, fpst); 11814 nan = float16_silence_nan(f16, fpst); 11815 } 11816 if (fpst->default_nan_mode) { 11817 nan = float16_default_nan(fpst); 11818 } 11819 return nan; 11820 } else if (float16_is_infinity(f16)) { 11821 return float16_set_sign(float16_zero, float16_is_neg(f16)); 11822 } else if (float16_is_zero(f16)) { 11823 float_raise(float_flag_divbyzero, fpst); 11824 return float16_set_sign(float16_infinity, float16_is_neg(f16)); 11825 } else if (float16_abs(f16) < (1 << 8)) { 11826 /* Abs(value) < 2.0^-16 */ 11827 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11828 if (round_to_inf(fpst, f16_sign)) { 11829 return float16_set_sign(float16_infinity, f16_sign); 11830 } else { 11831 return float16_set_sign(float16_maxnorm, f16_sign); 11832 } 11833 } else if (f16_exp >= 29 && fpst->flush_to_zero) { 11834 float_raise(float_flag_underflow, fpst); 11835 return float16_set_sign(float16_zero, float16_is_neg(f16)); 11836 } 11837 11838 f64_frac = call_recip_estimate(&f16_exp, 29, 11839 ((uint64_t) f16_frac) << (52 - 10)); 11840 11841 /* result = sign : result_exp<4:0> : fraction<51:42> */ 11842 f16_val = deposit32(0, 15, 1, f16_sign); 11843 f16_val = deposit32(f16_val, 10, 5, f16_exp); 11844 f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); 11845 return make_float16(f16_val); 11846 } 11847 11848 float32 HELPER(recpe_f32)(float32 input, void *fpstp) 11849 { 11850 float_status *fpst = fpstp; 11851 float32 f32 = float32_squash_input_denormal(input, fpst); 11852 uint32_t f32_val = float32_val(f32); 11853 bool f32_sign = float32_is_neg(f32); 11854 int f32_exp = extract32(f32_val, 23, 8); 11855 uint32_t f32_frac = extract32(f32_val, 0, 23); 11856 uint64_t f64_frac; 11857 11858 if (float32_is_any_nan(f32)) { 11859 float32 nan = f32; 11860 if (float32_is_signaling_nan(f32, fpst)) { 11861 float_raise(float_flag_invalid, fpst); 11862 nan = float32_silence_nan(f32, fpst); 11863 } 11864 if (fpst->default_nan_mode) { 11865 nan = float32_default_nan(fpst); 11866 } 11867 return nan; 11868 } else if (float32_is_infinity(f32)) { 11869 return float32_set_sign(float32_zero, float32_is_neg(f32)); 11870 } else if (float32_is_zero(f32)) { 11871 float_raise(float_flag_divbyzero, fpst); 11872 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 11873 } else if (float32_abs(f32) < (1ULL << 21)) { 11874 /* Abs(value) < 2.0^-128 */ 11875 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11876 if (round_to_inf(fpst, f32_sign)) { 11877 return float32_set_sign(float32_infinity, f32_sign); 11878 } else { 11879 return float32_set_sign(float32_maxnorm, f32_sign); 11880 } 11881 } else if (f32_exp >= 253 && fpst->flush_to_zero) { 11882 float_raise(float_flag_underflow, fpst); 11883 return float32_set_sign(float32_zero, float32_is_neg(f32)); 11884 } 11885 11886 f64_frac = call_recip_estimate(&f32_exp, 253, 11887 ((uint64_t) f32_frac) << (52 - 23)); 11888 11889 /* result = sign : result_exp<7:0> : fraction<51:29> */ 11890 f32_val = deposit32(0, 31, 1, f32_sign); 11891 f32_val = deposit32(f32_val, 23, 8, f32_exp); 11892 f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); 11893 return make_float32(f32_val); 11894 } 11895 11896 float64 HELPER(recpe_f64)(float64 input, void *fpstp) 11897 { 11898 float_status *fpst = fpstp; 11899 float64 f64 = float64_squash_input_denormal(input, fpst); 11900 uint64_t f64_val = float64_val(f64); 11901 bool f64_sign = float64_is_neg(f64); 11902 int f64_exp = extract64(f64_val, 52, 11); 11903 uint64_t f64_frac = extract64(f64_val, 0, 52); 11904 11905 /* Deal with any special cases */ 11906 if (float64_is_any_nan(f64)) { 11907 float64 nan = f64; 11908 if (float64_is_signaling_nan(f64, fpst)) { 11909 float_raise(float_flag_invalid, fpst); 11910 nan = float64_silence_nan(f64, fpst); 11911 } 11912 if (fpst->default_nan_mode) { 11913 nan = float64_default_nan(fpst); 11914 } 11915 return nan; 11916 } else if (float64_is_infinity(f64)) { 11917 return float64_set_sign(float64_zero, float64_is_neg(f64)); 11918 } else if (float64_is_zero(f64)) { 11919 float_raise(float_flag_divbyzero, fpst); 11920 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 11921 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { 11922 /* Abs(value) < 2.0^-1024 */ 11923 float_raise(float_flag_overflow | float_flag_inexact, fpst); 11924 if (round_to_inf(fpst, f64_sign)) { 11925 return float64_set_sign(float64_infinity, f64_sign); 11926 } else { 11927 return float64_set_sign(float64_maxnorm, f64_sign); 11928 } 11929 } else if (f64_exp >= 2045 && fpst->flush_to_zero) { 11930 float_raise(float_flag_underflow, fpst); 11931 return float64_set_sign(float64_zero, float64_is_neg(f64)); 11932 } 11933 11934 f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); 11935 11936 /* result = sign : result_exp<10:0> : fraction<51:0>; */ 11937 f64_val = deposit64(0, 63, 1, f64_sign); 11938 f64_val = deposit64(f64_val, 52, 11, f64_exp); 11939 f64_val = deposit64(f64_val, 0, 52, f64_frac); 11940 return make_float64(f64_val); 11941 } 11942 11943 /* The algorithm that must be used to calculate the estimate 11944 * is specified by the ARM ARM. 11945 */ 11946 11947 static int do_recip_sqrt_estimate(int a) 11948 { 11949 int b, estimate; 11950 11951 assert(128 <= a && a < 512); 11952 if (a < 256) { 11953 a = a * 2 + 1; 11954 } else { 11955 a = (a >> 1) << 1; 11956 a = (a + 1) * 2; 11957 } 11958 b = 512; 11959 while (a * (b + 1) * (b + 1) < (1 << 28)) { 11960 b += 1; 11961 } 11962 estimate = (b + 1) / 2; 11963 assert(256 <= estimate && estimate < 512); 11964 11965 return estimate; 11966 } 11967 11968 11969 static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) 11970 { 11971 int estimate; 11972 uint32_t scaled; 11973 11974 if (*exp == 0) { 11975 while (extract64(frac, 51, 1) == 0) { 11976 frac = frac << 1; 11977 *exp -= 1; 11978 } 11979 frac = extract64(frac, 0, 51) << 1; 11980 } 11981 11982 if (*exp & 1) { 11983 /* scaled = UInt('01':fraction<51:45>) */ 11984 scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); 11985 } else { 11986 /* scaled = UInt('1':fraction<51:44>) */ 11987 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 11988 } 11989 estimate = do_recip_sqrt_estimate(scaled); 11990 11991 *exp = (exp_off - *exp) / 2; 11992 return extract64(estimate, 0, 8) << 44; 11993 } 11994 11995 uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) 11996 { 11997 float_status *s = fpstp; 11998 float16 f16 = float16_squash_input_denormal(input, s); 11999 uint16_t val = float16_val(f16); 12000 bool f16_sign = float16_is_neg(f16); 12001 int f16_exp = extract32(val, 10, 5); 12002 uint16_t f16_frac = extract32(val, 0, 10); 12003 uint64_t f64_frac; 12004 12005 if (float16_is_any_nan(f16)) { 12006 float16 nan = f16; 12007 if (float16_is_signaling_nan(f16, s)) { 12008 float_raise(float_flag_invalid, s); 12009 nan = float16_silence_nan(f16, s); 12010 } 12011 if (s->default_nan_mode) { 12012 nan = float16_default_nan(s); 12013 } 12014 return nan; 12015 } else if (float16_is_zero(f16)) { 12016 float_raise(float_flag_divbyzero, s); 12017 return float16_set_sign(float16_infinity, f16_sign); 12018 } else if (f16_sign) { 12019 float_raise(float_flag_invalid, s); 12020 return float16_default_nan(s); 12021 } else if (float16_is_infinity(f16)) { 12022 return float16_zero; 12023 } 12024 12025 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 12026 * preserving the parity of the exponent. */ 12027 12028 f64_frac = ((uint64_t) f16_frac) << (52 - 10); 12029 12030 f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); 12031 12032 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ 12033 val = deposit32(0, 15, 1, f16_sign); 12034 val = deposit32(val, 10, 5, f16_exp); 12035 val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); 12036 return make_float16(val); 12037 } 12038 12039 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) 12040 { 12041 float_status *s = fpstp; 12042 float32 f32 = float32_squash_input_denormal(input, s); 12043 uint32_t val = float32_val(f32); 12044 uint32_t f32_sign = float32_is_neg(f32); 12045 int f32_exp = extract32(val, 23, 8); 12046 uint32_t f32_frac = extract32(val, 0, 23); 12047 uint64_t f64_frac; 12048 12049 if (float32_is_any_nan(f32)) { 12050 float32 nan = f32; 12051 if (float32_is_signaling_nan(f32, s)) { 12052 float_raise(float_flag_invalid, s); 12053 nan = float32_silence_nan(f32, s); 12054 } 12055 if (s->default_nan_mode) { 12056 nan = float32_default_nan(s); 12057 } 12058 return nan; 12059 } else if (float32_is_zero(f32)) { 12060 float_raise(float_flag_divbyzero, s); 12061 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 12062 } else if (float32_is_neg(f32)) { 12063 float_raise(float_flag_invalid, s); 12064 return float32_default_nan(s); 12065 } else if (float32_is_infinity(f32)) { 12066 return float32_zero; 12067 } 12068 12069 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 12070 * preserving the parity of the exponent. */ 12071 12072 f64_frac = ((uint64_t) f32_frac) << 29; 12073 12074 f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); 12075 12076 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ 12077 val = deposit32(0, 31, 1, f32_sign); 12078 val = deposit32(val, 23, 8, f32_exp); 12079 val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); 12080 return make_float32(val); 12081 } 12082 12083 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) 12084 { 12085 float_status *s = fpstp; 12086 float64 f64 = float64_squash_input_denormal(input, s); 12087 uint64_t val = float64_val(f64); 12088 bool f64_sign = float64_is_neg(f64); 12089 int f64_exp = extract64(val, 52, 11); 12090 uint64_t f64_frac = extract64(val, 0, 52); 12091 12092 if (float64_is_any_nan(f64)) { 12093 float64 nan = f64; 12094 if (float64_is_signaling_nan(f64, s)) { 12095 float_raise(float_flag_invalid, s); 12096 nan = float64_silence_nan(f64, s); 12097 } 12098 if (s->default_nan_mode) { 12099 nan = float64_default_nan(s); 12100 } 12101 return nan; 12102 } else if (float64_is_zero(f64)) { 12103 float_raise(float_flag_divbyzero, s); 12104 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 12105 } else if (float64_is_neg(f64)) { 12106 float_raise(float_flag_invalid, s); 12107 return float64_default_nan(s); 12108 } else if (float64_is_infinity(f64)) { 12109 return float64_zero; 12110 } 12111 12112 f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); 12113 12114 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ 12115 val = deposit64(0, 61, 1, f64_sign); 12116 val = deposit64(val, 52, 11, f64_exp); 12117 val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); 12118 return make_float64(val); 12119 } 12120 12121 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) 12122 { 12123 /* float_status *s = fpstp; */ 12124 int input, estimate; 12125 12126 if ((a & 0x80000000) == 0) { 12127 return 0xffffffff; 12128 } 12129 12130 input = extract32(a, 23, 9); 12131 estimate = recip_estimate(input); 12132 12133 return deposit32(0, (32 - 9), 9, estimate); 12134 } 12135 12136 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) 12137 { 12138 int estimate; 12139 12140 if ((a & 0xc0000000) == 0) { 12141 return 0xffffffff; 12142 } 12143 12144 estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); 12145 12146 return deposit32(0, 23, 9, estimate); 12147 } 12148 12149 /* VFPv4 fused multiply-accumulate */ 12150 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) 12151 { 12152 float_status *fpst = fpstp; 12153 return float32_muladd(a, b, c, 0, fpst); 12154 } 12155 12156 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) 12157 { 12158 float_status *fpst = fpstp; 12159 return float64_muladd(a, b, c, 0, fpst); 12160 } 12161 12162 /* ARMv8 round to integral */ 12163 float32 HELPER(rints_exact)(float32 x, void *fp_status) 12164 { 12165 return float32_round_to_int(x, fp_status); 12166 } 12167 12168 float64 HELPER(rintd_exact)(float64 x, void *fp_status) 12169 { 12170 return float64_round_to_int(x, fp_status); 12171 } 12172 12173 float32 HELPER(rints)(float32 x, void *fp_status) 12174 { 12175 int old_flags = get_float_exception_flags(fp_status), new_flags; 12176 float32 ret; 12177 12178 ret = float32_round_to_int(x, fp_status); 12179 12180 /* Suppress any inexact exceptions the conversion produced */ 12181 if (!(old_flags & float_flag_inexact)) { 12182 new_flags = get_float_exception_flags(fp_status); 12183 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 12184 } 12185 12186 return ret; 12187 } 12188 12189 float64 HELPER(rintd)(float64 x, void *fp_status) 12190 { 12191 int old_flags = get_float_exception_flags(fp_status), new_flags; 12192 float64 ret; 12193 12194 ret = float64_round_to_int(x, fp_status); 12195 12196 new_flags = get_float_exception_flags(fp_status); 12197 12198 /* Suppress any inexact exceptions the conversion produced */ 12199 if (!(old_flags & float_flag_inexact)) { 12200 new_flags = get_float_exception_flags(fp_status); 12201 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 12202 } 12203 12204 return ret; 12205 } 12206 12207 /* Convert ARM rounding mode to softfloat */ 12208 int arm_rmode_to_sf(int rmode) 12209 { 12210 switch (rmode) { 12211 case FPROUNDING_TIEAWAY: 12212 rmode = float_round_ties_away; 12213 break; 12214 case FPROUNDING_ODD: 12215 /* FIXME: add support for TIEAWAY and ODD */ 12216 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", 12217 rmode); 12218 case FPROUNDING_TIEEVEN: 12219 default: 12220 rmode = float_round_nearest_even; 12221 break; 12222 case FPROUNDING_POSINF: 12223 rmode = float_round_up; 12224 break; 12225 case FPROUNDING_NEGINF: 12226 rmode = float_round_down; 12227 break; 12228 case FPROUNDING_ZERO: 12229 rmode = float_round_to_zero; 12230 break; 12231 } 12232 return rmode; 12233 } 12234 12235 /* CRC helpers. 12236 * The upper bytes of val (above the number specified by 'bytes') must have 12237 * been zeroed out by the caller. 12238 */ 12239 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 12240 { 12241 uint8_t buf[4]; 12242 12243 stl_le_p(buf, val); 12244 12245 /* zlib crc32 converts the accumulator and output to one's complement. */ 12246 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 12247 } 12248 12249 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 12250 { 12251 uint8_t buf[4]; 12252 12253 stl_le_p(buf, val); 12254 12255 /* Linux crc32c converts the output to one's complement. */ 12256 return crc32c(acc, buf, bytes) ^ 0xffffffff; 12257 } 12258 12259 /* Return the exception level to which FP-disabled exceptions should 12260 * be taken, or 0 if FP is enabled. 12261 */ 12262 static inline int fp_exception_el(CPUARMState *env) 12263 { 12264 #ifndef CONFIG_USER_ONLY 12265 int fpen; 12266 int cur_el = arm_current_el(env); 12267 12268 /* CPACR and the CPTR registers don't exist before v6, so FP is 12269 * always accessible 12270 */ 12271 if (!arm_feature(env, ARM_FEATURE_V6)) { 12272 return 0; 12273 } 12274 12275 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 12276 * 0, 2 : trap EL0 and EL1/PL1 accesses 12277 * 1 : trap only EL0 accesses 12278 * 3 : trap no accesses 12279 */ 12280 fpen = extract32(env->cp15.cpacr_el1, 20, 2); 12281 switch (fpen) { 12282 case 0: 12283 case 2: 12284 if (cur_el == 0 || cur_el == 1) { 12285 /* Trap to PL1, which might be EL1 or EL3 */ 12286 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 12287 return 3; 12288 } 12289 return 1; 12290 } 12291 if (cur_el == 3 && !is_a64(env)) { 12292 /* Secure PL1 running at EL3 */ 12293 return 3; 12294 } 12295 break; 12296 case 1: 12297 if (cur_el == 0) { 12298 return 1; 12299 } 12300 break; 12301 case 3: 12302 break; 12303 } 12304 12305 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 12306 * check because zero bits in the registers mean "don't trap". 12307 */ 12308 12309 /* CPTR_EL2 : present in v7VE or v8 */ 12310 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 12311 && !arm_is_secure_below_el3(env)) { 12312 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 12313 return 2; 12314 } 12315 12316 /* CPTR_EL3 : present in v8 */ 12317 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 12318 /* Trap all FP ops to EL3 */ 12319 return 3; 12320 } 12321 #endif 12322 return 0; 12323 } 12324 12325 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12326 target_ulong *cs_base, uint32_t *pflags) 12327 { 12328 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false)); 12329 int fp_el = fp_exception_el(env); 12330 uint32_t flags; 12331 12332 if (is_a64(env)) { 12333 int sve_el = sve_exception_el(env); 12334 uint32_t zcr_len; 12335 12336 *pc = env->pc; 12337 flags = ARM_TBFLAG_AARCH64_STATE_MASK; 12338 /* Get control bits for tagged addresses */ 12339 flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT); 12340 flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT); 12341 flags |= sve_el << ARM_TBFLAG_SVEEXC_EL_SHIFT; 12342 12343 /* If SVE is disabled, but FP is enabled, 12344 then the effective len is 0. */ 12345 if (sve_el != 0 && fp_el == 0) { 12346 zcr_len = 0; 12347 } else { 12348 int current_el = arm_current_el(env); 12349 12350 zcr_len = env->vfp.zcr_el[current_el <= 1 ? 1 : current_el]; 12351 zcr_len &= 0xf; 12352 if (current_el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { 12353 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 12354 } 12355 if (current_el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { 12356 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 12357 } 12358 } 12359 flags |= zcr_len << ARM_TBFLAG_ZCR_LEN_SHIFT; 12360 } else { 12361 *pc = env->regs[15]; 12362 flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) 12363 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT) 12364 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT) 12365 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT) 12366 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT); 12367 if (!(access_secure_reg(env))) { 12368 flags |= ARM_TBFLAG_NS_MASK; 12369 } 12370 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) 12371 || arm_el_is_aa64(env, 1)) { 12372 flags |= ARM_TBFLAG_VFPEN_MASK; 12373 } 12374 flags |= (extract32(env->cp15.c15_cpar, 0, 2) 12375 << ARM_TBFLAG_XSCALE_CPAR_SHIFT); 12376 } 12377 12378 flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT); 12379 12380 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12381 * states defined in the ARM ARM for software singlestep: 12382 * SS_ACTIVE PSTATE.SS State 12383 * 0 x Inactive (the TB flag for SS is always 0) 12384 * 1 0 Active-pending 12385 * 1 1 Active-not-pending 12386 */ 12387 if (arm_singlestep_active(env)) { 12388 flags |= ARM_TBFLAG_SS_ACTIVE_MASK; 12389 if (is_a64(env)) { 12390 if (env->pstate & PSTATE_SS) { 12391 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12392 } 12393 } else { 12394 if (env->uncached_cpsr & PSTATE_SS) { 12395 flags |= ARM_TBFLAG_PSTATE_SS_MASK; 12396 } 12397 } 12398 } 12399 if (arm_cpu_data_is_big_endian(env)) { 12400 flags |= ARM_TBFLAG_BE_DATA_MASK; 12401 } 12402 flags |= fp_el << ARM_TBFLAG_FPEXC_EL_SHIFT; 12403 12404 if (arm_v7m_is_handler_mode(env)) { 12405 flags |= ARM_TBFLAG_HANDLER_MASK; 12406 } 12407 12408 *pflags = flags; 12409 *cs_base = 0; 12410 } 12411