1 /* 2 * ARM generic helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/units.h" 11 #include "target/arm/idau.h" 12 #include "trace.h" 13 #include "cpu.h" 14 #include "internals.h" 15 #include "exec/gdbstub.h" 16 #include "exec/helper-proto.h" 17 #include "qemu/host-utils.h" 18 #include "qemu/main-loop.h" 19 #include "qemu/bitops.h" 20 #include "qemu/crc32c.h" 21 #include "qemu/qemu-print.h" 22 #include "exec/exec-all.h" 23 #include <zlib.h> /* For crc32 */ 24 #include "hw/irq.h" 25 #include "hw/semihosting/semihost.h" 26 #include "sysemu/cpus.h" 27 #include "sysemu/kvm.h" 28 #include "sysemu/tcg.h" 29 #include "qemu/range.h" 30 #include "qapi/qapi-commands-machine-target.h" 31 #include "qapi/error.h" 32 #include "qemu/guest-random.h" 33 #ifdef CONFIG_TCG 34 #include "arm_ldst.h" 35 #include "exec/cpu_ldst.h" 36 #endif 37 38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 39 40 #ifndef CONFIG_USER_ONLY 41 42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 43 MMUAccessType access_type, ARMMMUIdx mmu_idx, 44 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 45 target_ulong *page_size_ptr, 46 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 47 #endif 48 49 static void switch_mode(CPUARMState *env, int mode); 50 51 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 52 { 53 int nregs; 54 55 /* VFP data registers are always little-endian. */ 56 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 57 if (reg < nregs) { 58 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 59 return 8; 60 } 61 if (arm_feature(env, ARM_FEATURE_NEON)) { 62 /* Aliases for Q regs. */ 63 nregs += 16; 64 if (reg < nregs) { 65 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 66 stq_le_p(buf, q[0]); 67 stq_le_p(buf + 8, q[1]); 68 return 16; 69 } 70 } 71 switch (reg - nregs) { 72 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 73 case 1: stl_p(buf, vfp_get_fpscr(env)); return 4; 74 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 75 } 76 return 0; 77 } 78 79 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 80 { 81 int nregs; 82 83 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 84 if (reg < nregs) { 85 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 86 return 8; 87 } 88 if (arm_feature(env, ARM_FEATURE_NEON)) { 89 nregs += 16; 90 if (reg < nregs) { 91 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 92 q[0] = ldq_le_p(buf); 93 q[1] = ldq_le_p(buf + 8); 94 return 16; 95 } 96 } 97 switch (reg - nregs) { 98 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 99 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; 100 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 101 } 102 return 0; 103 } 104 105 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 106 { 107 switch (reg) { 108 case 0 ... 31: 109 /* 128 bit FP register */ 110 { 111 uint64_t *q = aa64_vfp_qreg(env, reg); 112 stq_le_p(buf, q[0]); 113 stq_le_p(buf + 8, q[1]); 114 return 16; 115 } 116 case 32: 117 /* FPSR */ 118 stl_p(buf, vfp_get_fpsr(env)); 119 return 4; 120 case 33: 121 /* FPCR */ 122 stl_p(buf, vfp_get_fpcr(env)); 123 return 4; 124 default: 125 return 0; 126 } 127 } 128 129 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 130 { 131 switch (reg) { 132 case 0 ... 31: 133 /* 128 bit FP register */ 134 { 135 uint64_t *q = aa64_vfp_qreg(env, reg); 136 q[0] = ldq_le_p(buf); 137 q[1] = ldq_le_p(buf + 8); 138 return 16; 139 } 140 case 32: 141 /* FPSR */ 142 vfp_set_fpsr(env, ldl_p(buf)); 143 return 4; 144 case 33: 145 /* FPCR */ 146 vfp_set_fpcr(env, ldl_p(buf)); 147 return 4; 148 default: 149 return 0; 150 } 151 } 152 153 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 154 { 155 assert(ri->fieldoffset); 156 if (cpreg_field_is_64bit(ri)) { 157 return CPREG_FIELD64(env, ri); 158 } else { 159 return CPREG_FIELD32(env, ri); 160 } 161 } 162 163 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 164 uint64_t value) 165 { 166 assert(ri->fieldoffset); 167 if (cpreg_field_is_64bit(ri)) { 168 CPREG_FIELD64(env, ri) = value; 169 } else { 170 CPREG_FIELD32(env, ri) = value; 171 } 172 } 173 174 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 175 { 176 return (char *)env + ri->fieldoffset; 177 } 178 179 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 180 { 181 /* Raw read of a coprocessor register (as needed for migration, etc). */ 182 if (ri->type & ARM_CP_CONST) { 183 return ri->resetvalue; 184 } else if (ri->raw_readfn) { 185 return ri->raw_readfn(env, ri); 186 } else if (ri->readfn) { 187 return ri->readfn(env, ri); 188 } else { 189 return raw_read(env, ri); 190 } 191 } 192 193 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 194 uint64_t v) 195 { 196 /* Raw write of a coprocessor register (as needed for migration, etc). 197 * Note that constant registers are treated as write-ignored; the 198 * caller should check for success by whether a readback gives the 199 * value written. 200 */ 201 if (ri->type & ARM_CP_CONST) { 202 return; 203 } else if (ri->raw_writefn) { 204 ri->raw_writefn(env, ri, v); 205 } else if (ri->writefn) { 206 ri->writefn(env, ri, v); 207 } else { 208 raw_write(env, ri, v); 209 } 210 } 211 212 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 213 { 214 ARMCPU *cpu = env_archcpu(env); 215 const ARMCPRegInfo *ri; 216 uint32_t key; 217 218 key = cpu->dyn_xml.cpregs_keys[reg]; 219 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 220 if (ri) { 221 if (cpreg_field_is_64bit(ri)) { 222 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 223 } else { 224 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 225 } 226 } 227 return 0; 228 } 229 230 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 231 { 232 return 0; 233 } 234 235 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 236 { 237 /* Return true if the regdef would cause an assertion if you called 238 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 239 * program bug for it not to have the NO_RAW flag). 240 * NB that returning false here doesn't necessarily mean that calling 241 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 242 * read/write access functions which are safe for raw use" from "has 243 * read/write access functions which have side effects but has forgotten 244 * to provide raw access functions". 245 * The tests here line up with the conditions in read/write_raw_cp_reg() 246 * and assertions in raw_read()/raw_write(). 247 */ 248 if ((ri->type & ARM_CP_CONST) || 249 ri->fieldoffset || 250 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 251 return false; 252 } 253 return true; 254 } 255 256 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 257 { 258 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 259 int i; 260 bool ok = true; 261 262 for (i = 0; i < cpu->cpreg_array_len; i++) { 263 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 264 const ARMCPRegInfo *ri; 265 uint64_t newval; 266 267 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 268 if (!ri) { 269 ok = false; 270 continue; 271 } 272 if (ri->type & ARM_CP_NO_RAW) { 273 continue; 274 } 275 276 newval = read_raw_cp_reg(&cpu->env, ri); 277 if (kvm_sync) { 278 /* 279 * Only sync if the previous list->cpustate sync succeeded. 280 * Rather than tracking the success/failure state for every 281 * item in the list, we just recheck "does the raw write we must 282 * have made in write_list_to_cpustate() read back OK" here. 283 */ 284 uint64_t oldval = cpu->cpreg_values[i]; 285 286 if (oldval == newval) { 287 continue; 288 } 289 290 write_raw_cp_reg(&cpu->env, ri, oldval); 291 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 292 continue; 293 } 294 295 write_raw_cp_reg(&cpu->env, ri, newval); 296 } 297 cpu->cpreg_values[i] = newval; 298 } 299 return ok; 300 } 301 302 bool write_list_to_cpustate(ARMCPU *cpu) 303 { 304 int i; 305 bool ok = true; 306 307 for (i = 0; i < cpu->cpreg_array_len; i++) { 308 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 309 uint64_t v = cpu->cpreg_values[i]; 310 const ARMCPRegInfo *ri; 311 312 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 313 if (!ri) { 314 ok = false; 315 continue; 316 } 317 if (ri->type & ARM_CP_NO_RAW) { 318 continue; 319 } 320 /* Write value and confirm it reads back as written 321 * (to catch read-only registers and partially read-only 322 * registers where the incoming migration value doesn't match) 323 */ 324 write_raw_cp_reg(&cpu->env, ri, v); 325 if (read_raw_cp_reg(&cpu->env, ri) != v) { 326 ok = false; 327 } 328 } 329 return ok; 330 } 331 332 static void add_cpreg_to_list(gpointer key, gpointer opaque) 333 { 334 ARMCPU *cpu = opaque; 335 uint64_t regidx; 336 const ARMCPRegInfo *ri; 337 338 regidx = *(uint32_t *)key; 339 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 340 341 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 342 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 343 /* The value array need not be initialized at this point */ 344 cpu->cpreg_array_len++; 345 } 346 } 347 348 static void count_cpreg(gpointer key, gpointer opaque) 349 { 350 ARMCPU *cpu = opaque; 351 uint64_t regidx; 352 const ARMCPRegInfo *ri; 353 354 regidx = *(uint32_t *)key; 355 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 356 357 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 358 cpu->cpreg_array_len++; 359 } 360 } 361 362 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 363 { 364 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 365 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 366 367 if (aidx > bidx) { 368 return 1; 369 } 370 if (aidx < bidx) { 371 return -1; 372 } 373 return 0; 374 } 375 376 void init_cpreg_list(ARMCPU *cpu) 377 { 378 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 379 * Note that we require cpreg_tuples[] to be sorted by key ID. 380 */ 381 GList *keys; 382 int arraylen; 383 384 keys = g_hash_table_get_keys(cpu->cp_regs); 385 keys = g_list_sort(keys, cpreg_key_compare); 386 387 cpu->cpreg_array_len = 0; 388 389 g_list_foreach(keys, count_cpreg, cpu); 390 391 arraylen = cpu->cpreg_array_len; 392 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 393 cpu->cpreg_values = g_new(uint64_t, arraylen); 394 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 395 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 396 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 397 cpu->cpreg_array_len = 0; 398 399 g_list_foreach(keys, add_cpreg_to_list, cpu); 400 401 assert(cpu->cpreg_array_len == arraylen); 402 403 g_list_free(keys); 404 } 405 406 /* 407 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 408 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 409 * 410 * access_el3_aa32ns: Used to check AArch32 register views. 411 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 412 */ 413 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 414 const ARMCPRegInfo *ri, 415 bool isread) 416 { 417 bool secure = arm_is_secure_below_el3(env); 418 419 assert(!arm_el_is_aa64(env, 3)); 420 if (secure) { 421 return CP_ACCESS_TRAP_UNCATEGORIZED; 422 } 423 return CP_ACCESS_OK; 424 } 425 426 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 427 const ARMCPRegInfo *ri, 428 bool isread) 429 { 430 if (!arm_el_is_aa64(env, 3)) { 431 return access_el3_aa32ns(env, ri, isread); 432 } 433 return CP_ACCESS_OK; 434 } 435 436 /* Some secure-only AArch32 registers trap to EL3 if used from 437 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 438 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 439 * We assume that the .access field is set to PL1_RW. 440 */ 441 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 442 const ARMCPRegInfo *ri, 443 bool isread) 444 { 445 if (arm_current_el(env) == 3) { 446 return CP_ACCESS_OK; 447 } 448 if (arm_is_secure_below_el3(env)) { 449 return CP_ACCESS_TRAP_EL3; 450 } 451 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 452 return CP_ACCESS_TRAP_UNCATEGORIZED; 453 } 454 455 /* Check for traps to "powerdown debug" registers, which are controlled 456 * by MDCR.TDOSA 457 */ 458 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 459 bool isread) 460 { 461 int el = arm_current_el(env); 462 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || 463 (env->cp15.mdcr_el2 & MDCR_TDE) || 464 (arm_hcr_el2_eff(env) & HCR_TGE); 465 466 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { 467 return CP_ACCESS_TRAP_EL2; 468 } 469 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 470 return CP_ACCESS_TRAP_EL3; 471 } 472 return CP_ACCESS_OK; 473 } 474 475 /* Check for traps to "debug ROM" registers, which are controlled 476 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 477 */ 478 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 479 bool isread) 480 { 481 int el = arm_current_el(env); 482 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || 483 (env->cp15.mdcr_el2 & MDCR_TDE) || 484 (arm_hcr_el2_eff(env) & HCR_TGE); 485 486 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { 487 return CP_ACCESS_TRAP_EL2; 488 } 489 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 490 return CP_ACCESS_TRAP_EL3; 491 } 492 return CP_ACCESS_OK; 493 } 494 495 /* Check for traps to general debug registers, which are controlled 496 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 497 */ 498 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 499 bool isread) 500 { 501 int el = arm_current_el(env); 502 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || 503 (env->cp15.mdcr_el2 & MDCR_TDE) || 504 (arm_hcr_el2_eff(env) & HCR_TGE); 505 506 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { 507 return CP_ACCESS_TRAP_EL2; 508 } 509 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 510 return CP_ACCESS_TRAP_EL3; 511 } 512 return CP_ACCESS_OK; 513 } 514 515 /* Check for traps to performance monitor registers, which are controlled 516 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 517 */ 518 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 519 bool isread) 520 { 521 int el = arm_current_el(env); 522 523 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 524 && !arm_is_secure_below_el3(env)) { 525 return CP_ACCESS_TRAP_EL2; 526 } 527 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 528 return CP_ACCESS_TRAP_EL3; 529 } 530 return CP_ACCESS_OK; 531 } 532 533 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 534 { 535 ARMCPU *cpu = env_archcpu(env); 536 537 raw_write(env, ri, value); 538 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 539 } 540 541 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 542 { 543 ARMCPU *cpu = env_archcpu(env); 544 545 if (raw_read(env, ri) != value) { 546 /* Unlike real hardware the qemu TLB uses virtual addresses, 547 * not modified virtual addresses, so this causes a TLB flush. 548 */ 549 tlb_flush(CPU(cpu)); 550 raw_write(env, ri, value); 551 } 552 } 553 554 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 555 uint64_t value) 556 { 557 ARMCPU *cpu = env_archcpu(env); 558 559 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 560 && !extended_addresses_enabled(env)) { 561 /* For VMSA (when not using the LPAE long descriptor page table 562 * format) this register includes the ASID, so do a TLB flush. 563 * For PMSA it is purely a process ID and no action is needed. 564 */ 565 tlb_flush(CPU(cpu)); 566 } 567 raw_write(env, ri, value); 568 } 569 570 /* IS variants of TLB operations must affect all cores */ 571 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 572 uint64_t value) 573 { 574 CPUState *cs = env_cpu(env); 575 576 tlb_flush_all_cpus_synced(cs); 577 } 578 579 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 580 uint64_t value) 581 { 582 CPUState *cs = env_cpu(env); 583 584 tlb_flush_all_cpus_synced(cs); 585 } 586 587 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 588 uint64_t value) 589 { 590 CPUState *cs = env_cpu(env); 591 592 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 593 } 594 595 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 596 uint64_t value) 597 { 598 CPUState *cs = env_cpu(env); 599 600 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 601 } 602 603 /* 604 * Non-IS variants of TLB operations are upgraded to 605 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to 606 * force broadcast of these operations. 607 */ 608 static bool tlb_force_broadcast(CPUARMState *env) 609 { 610 return (env->cp15.hcr_el2 & HCR_FB) && 611 arm_current_el(env) == 1 && arm_is_secure_below_el3(env); 612 } 613 614 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 615 uint64_t value) 616 { 617 /* Invalidate all (TLBIALL) */ 618 CPUState *cs = env_cpu(env); 619 620 if (tlb_force_broadcast(env)) { 621 tlb_flush_all_cpus_synced(cs); 622 } else { 623 tlb_flush(cs); 624 } 625 } 626 627 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 628 uint64_t value) 629 { 630 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 631 CPUState *cs = env_cpu(env); 632 633 value &= TARGET_PAGE_MASK; 634 if (tlb_force_broadcast(env)) { 635 tlb_flush_page_all_cpus_synced(cs, value); 636 } else { 637 tlb_flush_page(cs, value); 638 } 639 } 640 641 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 642 uint64_t value) 643 { 644 /* Invalidate by ASID (TLBIASID) */ 645 CPUState *cs = env_cpu(env); 646 647 if (tlb_force_broadcast(env)) { 648 tlb_flush_all_cpus_synced(cs); 649 } else { 650 tlb_flush(cs); 651 } 652 } 653 654 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 655 uint64_t value) 656 { 657 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 658 CPUState *cs = env_cpu(env); 659 660 value &= TARGET_PAGE_MASK; 661 if (tlb_force_broadcast(env)) { 662 tlb_flush_page_all_cpus_synced(cs, value); 663 } else { 664 tlb_flush_page(cs, value); 665 } 666 } 667 668 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 669 uint64_t value) 670 { 671 CPUState *cs = env_cpu(env); 672 673 tlb_flush_by_mmuidx(cs, 674 ARMMMUIdxBit_E10_1 | 675 ARMMMUIdxBit_E10_1_PAN | 676 ARMMMUIdxBit_E10_0 | 677 ARMMMUIdxBit_Stage2); 678 } 679 680 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 681 uint64_t value) 682 { 683 CPUState *cs = env_cpu(env); 684 685 tlb_flush_by_mmuidx_all_cpus_synced(cs, 686 ARMMMUIdxBit_E10_1 | 687 ARMMMUIdxBit_E10_1_PAN | 688 ARMMMUIdxBit_E10_0 | 689 ARMMMUIdxBit_Stage2); 690 } 691 692 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 693 uint64_t value) 694 { 695 /* Invalidate by IPA. This has to invalidate any structures that 696 * contain only stage 2 translation information, but does not need 697 * to apply to structures that contain combined stage 1 and stage 2 698 * translation information. 699 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 700 */ 701 CPUState *cs = env_cpu(env); 702 uint64_t pageaddr; 703 704 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 705 return; 706 } 707 708 pageaddr = sextract64(value << 12, 0, 40); 709 710 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); 711 } 712 713 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 714 uint64_t value) 715 { 716 CPUState *cs = env_cpu(env); 717 uint64_t pageaddr; 718 719 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 720 return; 721 } 722 723 pageaddr = sextract64(value << 12, 0, 40); 724 725 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 726 ARMMMUIdxBit_Stage2); 727 } 728 729 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 730 uint64_t value) 731 { 732 CPUState *cs = env_cpu(env); 733 734 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); 735 } 736 737 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 738 uint64_t value) 739 { 740 CPUState *cs = env_cpu(env); 741 742 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); 743 } 744 745 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 746 uint64_t value) 747 { 748 CPUState *cs = env_cpu(env); 749 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 750 751 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); 752 } 753 754 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 755 uint64_t value) 756 { 757 CPUState *cs = env_cpu(env); 758 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 759 760 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 761 ARMMMUIdxBit_E2); 762 } 763 764 static const ARMCPRegInfo cp_reginfo[] = { 765 /* Define the secure and non-secure FCSE identifier CP registers 766 * separately because there is no secure bank in V8 (no _EL3). This allows 767 * the secure register to be properly reset and migrated. There is also no 768 * v8 EL1 version of the register so the non-secure instance stands alone. 769 */ 770 { .name = "FCSEIDR", 771 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 772 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 773 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 774 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 775 { .name = "FCSEIDR_S", 776 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 777 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 778 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 779 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 780 /* Define the secure and non-secure context identifier CP registers 781 * separately because there is no secure bank in V8 (no _EL3). This allows 782 * the secure register to be properly reset and migrated. In the 783 * non-secure case, the 32-bit register will have reset and migration 784 * disabled during registration as it is handled by the 64-bit instance. 785 */ 786 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 787 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 788 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 789 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 790 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 791 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 792 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 793 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 794 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 795 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 796 REGINFO_SENTINEL 797 }; 798 799 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 800 /* NB: Some of these registers exist in v8 but with more precise 801 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 802 */ 803 /* MMU Domain access control / MPU write buffer control */ 804 { .name = "DACR", 805 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 806 .access = PL1_RW, .resetvalue = 0, 807 .writefn = dacr_write, .raw_writefn = raw_write, 808 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 809 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 810 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 811 * For v6 and v5, these mappings are overly broad. 812 */ 813 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 814 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 815 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 816 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 817 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 818 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 819 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 820 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 821 /* Cache maintenance ops; some of this space may be overridden later. */ 822 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 823 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 824 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 825 REGINFO_SENTINEL 826 }; 827 828 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 829 /* Not all pre-v6 cores implemented this WFI, so this is slightly 830 * over-broad. 831 */ 832 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 833 .access = PL1_W, .type = ARM_CP_WFI }, 834 REGINFO_SENTINEL 835 }; 836 837 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 838 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 839 * is UNPREDICTABLE; we choose to NOP as most implementations do). 840 */ 841 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 842 .access = PL1_W, .type = ARM_CP_WFI }, 843 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 844 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 845 * OMAPCP will override this space. 846 */ 847 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 848 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 849 .resetvalue = 0 }, 850 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 851 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 852 .resetvalue = 0 }, 853 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 854 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 855 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 856 .resetvalue = 0 }, 857 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 858 * implementing it as RAZ means the "debug architecture version" bits 859 * will read as a reserved value, which should cause Linux to not try 860 * to use the debug hardware. 861 */ 862 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 863 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 864 /* MMU TLB control. Note that the wildcarding means we cover not just 865 * the unified TLB ops but also the dside/iside/inner-shareable variants. 866 */ 867 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 868 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 869 .type = ARM_CP_NO_RAW }, 870 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 871 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 872 .type = ARM_CP_NO_RAW }, 873 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 874 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 875 .type = ARM_CP_NO_RAW }, 876 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 877 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 878 .type = ARM_CP_NO_RAW }, 879 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 880 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 881 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 882 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 883 REGINFO_SENTINEL 884 }; 885 886 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 887 uint64_t value) 888 { 889 uint32_t mask = 0; 890 891 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 892 if (!arm_feature(env, ARM_FEATURE_V8)) { 893 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 894 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 895 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 896 */ 897 if (arm_feature(env, ARM_FEATURE_VFP)) { 898 /* VFP coprocessor: cp10 & cp11 [23:20] */ 899 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 900 901 if (!arm_feature(env, ARM_FEATURE_NEON)) { 902 /* ASEDIS [31] bit is RAO/WI */ 903 value |= (1 << 31); 904 } 905 906 /* VFPv3 and upwards with NEON implement 32 double precision 907 * registers (D0-D31). 908 */ 909 if (!arm_feature(env, ARM_FEATURE_NEON) || 910 !arm_feature(env, ARM_FEATURE_VFP3)) { 911 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 912 value |= (1 << 30); 913 } 914 } 915 value &= mask; 916 } 917 918 /* 919 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 920 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 921 */ 922 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 923 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 924 value &= ~(0xf << 20); 925 value |= env->cp15.cpacr_el1 & (0xf << 20); 926 } 927 928 env->cp15.cpacr_el1 = value; 929 } 930 931 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) 932 { 933 /* 934 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 935 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 936 */ 937 uint64_t value = env->cp15.cpacr_el1; 938 939 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 940 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 941 value &= ~(0xf << 20); 942 } 943 return value; 944 } 945 946 947 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 948 { 949 /* Call cpacr_write() so that we reset with the correct RAO bits set 950 * for our CPU features. 951 */ 952 cpacr_write(env, ri, 0); 953 } 954 955 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 956 bool isread) 957 { 958 if (arm_feature(env, ARM_FEATURE_V8)) { 959 /* Check if CPACR accesses are to be trapped to EL2 */ 960 if (arm_current_el(env) == 1 && 961 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 962 return CP_ACCESS_TRAP_EL2; 963 /* Check if CPACR accesses are to be trapped to EL3 */ 964 } else if (arm_current_el(env) < 3 && 965 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 966 return CP_ACCESS_TRAP_EL3; 967 } 968 } 969 970 return CP_ACCESS_OK; 971 } 972 973 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 974 bool isread) 975 { 976 /* Check if CPTR accesses are set to trap to EL3 */ 977 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 978 return CP_ACCESS_TRAP_EL3; 979 } 980 981 return CP_ACCESS_OK; 982 } 983 984 static const ARMCPRegInfo v6_cp_reginfo[] = { 985 /* prefetch by MVA in v6, NOP in v7 */ 986 { .name = "MVA_prefetch", 987 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 988 .access = PL1_W, .type = ARM_CP_NOP }, 989 /* We need to break the TB after ISB to execute self-modifying code 990 * correctly and also to take any pending interrupts immediately. 991 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 992 */ 993 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 994 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 995 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 996 .access = PL0_W, .type = ARM_CP_NOP }, 997 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 998 .access = PL0_W, .type = ARM_CP_NOP }, 999 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 1000 .access = PL1_RW, 1001 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 1002 offsetof(CPUARMState, cp15.ifar_ns) }, 1003 .resetvalue = 0, }, 1004 /* Watchpoint Fault Address Register : should actually only be present 1005 * for 1136, 1176, 11MPCore. 1006 */ 1007 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 1008 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 1009 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 1010 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 1011 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 1012 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, 1013 REGINFO_SENTINEL 1014 }; 1015 1016 /* Definitions for the PMU registers */ 1017 #define PMCRN_MASK 0xf800 1018 #define PMCRN_SHIFT 11 1019 #define PMCRLC 0x40 1020 #define PMCRDP 0x20 1021 #define PMCRX 0x10 1022 #define PMCRD 0x8 1023 #define PMCRC 0x4 1024 #define PMCRP 0x2 1025 #define PMCRE 0x1 1026 /* 1027 * Mask of PMCR bits writeable by guest (not including WO bits like C, P, 1028 * which can be written as 1 to trigger behaviour but which stay RAZ). 1029 */ 1030 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1031 1032 #define PMXEVTYPER_P 0x80000000 1033 #define PMXEVTYPER_U 0x40000000 1034 #define PMXEVTYPER_NSK 0x20000000 1035 #define PMXEVTYPER_NSU 0x10000000 1036 #define PMXEVTYPER_NSH 0x08000000 1037 #define PMXEVTYPER_M 0x04000000 1038 #define PMXEVTYPER_MT 0x02000000 1039 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1040 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1041 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1042 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1043 PMXEVTYPER_EVTCOUNT) 1044 1045 #define PMCCFILTR 0xf8000000 1046 #define PMCCFILTR_M PMXEVTYPER_M 1047 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1048 1049 static inline uint32_t pmu_num_counters(CPUARMState *env) 1050 { 1051 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 1052 } 1053 1054 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1055 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1056 { 1057 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 1058 } 1059 1060 typedef struct pm_event { 1061 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 1062 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 1063 bool (*supported)(CPUARMState *); 1064 /* 1065 * Retrieve the current count of the underlying event. The programmed 1066 * counters hold a difference from the return value from this function 1067 */ 1068 uint64_t (*get_count)(CPUARMState *); 1069 /* 1070 * Return how many nanoseconds it will take (at a minimum) for count events 1071 * to occur. A negative value indicates the counter will never overflow, or 1072 * that the counter has otherwise arranged for the overflow bit to be set 1073 * and the PMU interrupt to be raised on overflow. 1074 */ 1075 int64_t (*ns_per_count)(uint64_t); 1076 } pm_event; 1077 1078 static bool event_always_supported(CPUARMState *env) 1079 { 1080 return true; 1081 } 1082 1083 static uint64_t swinc_get_count(CPUARMState *env) 1084 { 1085 /* 1086 * SW_INCR events are written directly to the pmevcntr's by writes to 1087 * PMSWINC, so there is no underlying count maintained by the PMU itself 1088 */ 1089 return 0; 1090 } 1091 1092 static int64_t swinc_ns_per(uint64_t ignored) 1093 { 1094 return -1; 1095 } 1096 1097 /* 1098 * Return the underlying cycle count for the PMU cycle counters. If we're in 1099 * usermode, simply return 0. 1100 */ 1101 static uint64_t cycles_get_count(CPUARMState *env) 1102 { 1103 #ifndef CONFIG_USER_ONLY 1104 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1105 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1106 #else 1107 return cpu_get_host_ticks(); 1108 #endif 1109 } 1110 1111 #ifndef CONFIG_USER_ONLY 1112 static int64_t cycles_ns_per(uint64_t cycles) 1113 { 1114 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 1115 } 1116 1117 static bool instructions_supported(CPUARMState *env) 1118 { 1119 return use_icount == 1 /* Precise instruction counting */; 1120 } 1121 1122 static uint64_t instructions_get_count(CPUARMState *env) 1123 { 1124 return (uint64_t)cpu_get_icount_raw(); 1125 } 1126 1127 static int64_t instructions_ns_per(uint64_t icount) 1128 { 1129 return cpu_icount_to_ns((int64_t)icount); 1130 } 1131 #endif 1132 1133 static bool pmu_8_1_events_supported(CPUARMState *env) 1134 { 1135 /* For events which are supported in any v8.1 PMU */ 1136 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); 1137 } 1138 1139 static bool pmu_8_4_events_supported(CPUARMState *env) 1140 { 1141 /* For events which are supported in any v8.1 PMU */ 1142 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); 1143 } 1144 1145 static uint64_t zero_event_get_count(CPUARMState *env) 1146 { 1147 /* For events which on QEMU never fire, so their count is always zero */ 1148 return 0; 1149 } 1150 1151 static int64_t zero_event_ns_per(uint64_t cycles) 1152 { 1153 /* An event which never fires can never overflow */ 1154 return -1; 1155 } 1156 1157 static const pm_event pm_events[] = { 1158 { .number = 0x000, /* SW_INCR */ 1159 .supported = event_always_supported, 1160 .get_count = swinc_get_count, 1161 .ns_per_count = swinc_ns_per, 1162 }, 1163 #ifndef CONFIG_USER_ONLY 1164 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 1165 .supported = instructions_supported, 1166 .get_count = instructions_get_count, 1167 .ns_per_count = instructions_ns_per, 1168 }, 1169 { .number = 0x011, /* CPU_CYCLES, Cycle */ 1170 .supported = event_always_supported, 1171 .get_count = cycles_get_count, 1172 .ns_per_count = cycles_ns_per, 1173 }, 1174 #endif 1175 { .number = 0x023, /* STALL_FRONTEND */ 1176 .supported = pmu_8_1_events_supported, 1177 .get_count = zero_event_get_count, 1178 .ns_per_count = zero_event_ns_per, 1179 }, 1180 { .number = 0x024, /* STALL_BACKEND */ 1181 .supported = pmu_8_1_events_supported, 1182 .get_count = zero_event_get_count, 1183 .ns_per_count = zero_event_ns_per, 1184 }, 1185 { .number = 0x03c, /* STALL */ 1186 .supported = pmu_8_4_events_supported, 1187 .get_count = zero_event_get_count, 1188 .ns_per_count = zero_event_ns_per, 1189 }, 1190 }; 1191 1192 /* 1193 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 1194 * events (i.e. the statistical profiling extension), this implementation 1195 * should first be updated to something sparse instead of the current 1196 * supported_event_map[] array. 1197 */ 1198 #define MAX_EVENT_ID 0x3c 1199 #define UNSUPPORTED_EVENT UINT16_MAX 1200 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 1201 1202 /* 1203 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 1204 * of ARM event numbers to indices in our pm_events array. 1205 * 1206 * Note: Events in the 0x40XX range are not currently supported. 1207 */ 1208 void pmu_init(ARMCPU *cpu) 1209 { 1210 unsigned int i; 1211 1212 /* 1213 * Empty supported_event_map and cpu->pmceid[01] before adding supported 1214 * events to them 1215 */ 1216 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 1217 supported_event_map[i] = UNSUPPORTED_EVENT; 1218 } 1219 cpu->pmceid0 = 0; 1220 cpu->pmceid1 = 0; 1221 1222 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 1223 const pm_event *cnt = &pm_events[i]; 1224 assert(cnt->number <= MAX_EVENT_ID); 1225 /* We do not currently support events in the 0x40xx range */ 1226 assert(cnt->number <= 0x3f); 1227 1228 if (cnt->supported(&cpu->env)) { 1229 supported_event_map[cnt->number] = i; 1230 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); 1231 if (cnt->number & 0x20) { 1232 cpu->pmceid1 |= event_mask; 1233 } else { 1234 cpu->pmceid0 |= event_mask; 1235 } 1236 } 1237 } 1238 } 1239 1240 /* 1241 * Check at runtime whether a PMU event is supported for the current machine 1242 */ 1243 static bool event_supported(uint16_t number) 1244 { 1245 if (number > MAX_EVENT_ID) { 1246 return false; 1247 } 1248 return supported_event_map[number] != UNSUPPORTED_EVENT; 1249 } 1250 1251 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 1252 bool isread) 1253 { 1254 /* Performance monitor registers user accessibility is controlled 1255 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 1256 * trapping to EL2 or EL3 for other accesses. 1257 */ 1258 int el = arm_current_el(env); 1259 1260 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 1261 return CP_ACCESS_TRAP; 1262 } 1263 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 1264 && !arm_is_secure_below_el3(env)) { 1265 return CP_ACCESS_TRAP_EL2; 1266 } 1267 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 1268 return CP_ACCESS_TRAP_EL3; 1269 } 1270 1271 return CP_ACCESS_OK; 1272 } 1273 1274 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 1275 const ARMCPRegInfo *ri, 1276 bool isread) 1277 { 1278 /* ER: event counter read trap control */ 1279 if (arm_feature(env, ARM_FEATURE_V8) 1280 && arm_current_el(env) == 0 1281 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 1282 && isread) { 1283 return CP_ACCESS_OK; 1284 } 1285 1286 return pmreg_access(env, ri, isread); 1287 } 1288 1289 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1290 const ARMCPRegInfo *ri, 1291 bool isread) 1292 { 1293 /* SW: software increment write trap control */ 1294 if (arm_feature(env, ARM_FEATURE_V8) 1295 && arm_current_el(env) == 0 1296 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1297 && !isread) { 1298 return CP_ACCESS_OK; 1299 } 1300 1301 return pmreg_access(env, ri, isread); 1302 } 1303 1304 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1305 const ARMCPRegInfo *ri, 1306 bool isread) 1307 { 1308 /* ER: event counter read trap control */ 1309 if (arm_feature(env, ARM_FEATURE_V8) 1310 && arm_current_el(env) == 0 1311 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1312 return CP_ACCESS_OK; 1313 } 1314 1315 return pmreg_access(env, ri, isread); 1316 } 1317 1318 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1319 const ARMCPRegInfo *ri, 1320 bool isread) 1321 { 1322 /* CR: cycle counter read trap control */ 1323 if (arm_feature(env, ARM_FEATURE_V8) 1324 && arm_current_el(env) == 0 1325 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1326 && isread) { 1327 return CP_ACCESS_OK; 1328 } 1329 1330 return pmreg_access(env, ri, isread); 1331 } 1332 1333 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using 1334 * the current EL, security state, and register configuration. 1335 */ 1336 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 1337 { 1338 uint64_t filter; 1339 bool e, p, u, nsk, nsu, nsh, m; 1340 bool enabled, prohibited, filtered; 1341 bool secure = arm_is_secure(env); 1342 int el = arm_current_el(env); 1343 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; 1344 1345 if (!arm_feature(env, ARM_FEATURE_PMU)) { 1346 return false; 1347 } 1348 1349 if (!arm_feature(env, ARM_FEATURE_EL2) || 1350 (counter < hpmn || counter == 31)) { 1351 e = env->cp15.c9_pmcr & PMCRE; 1352 } else { 1353 e = env->cp15.mdcr_el2 & MDCR_HPME; 1354 } 1355 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 1356 1357 if (!secure) { 1358 if (el == 2 && (counter < hpmn || counter == 31)) { 1359 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; 1360 } else { 1361 prohibited = false; 1362 } 1363 } else { 1364 prohibited = arm_feature(env, ARM_FEATURE_EL3) && 1365 (env->cp15.mdcr_el3 & MDCR_SPME); 1366 } 1367 1368 if (prohibited && counter == 31) { 1369 prohibited = env->cp15.c9_pmcr & PMCRDP; 1370 } 1371 1372 if (counter == 31) { 1373 filter = env->cp15.pmccfiltr_el0; 1374 } else { 1375 filter = env->cp15.c14_pmevtyper[counter]; 1376 } 1377 1378 p = filter & PMXEVTYPER_P; 1379 u = filter & PMXEVTYPER_U; 1380 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1381 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1382 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1383 m = arm_el_is_aa64(env, 1) && 1384 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1385 1386 if (el == 0) { 1387 filtered = secure ? u : u != nsu; 1388 } else if (el == 1) { 1389 filtered = secure ? p : p != nsk; 1390 } else if (el == 2) { 1391 filtered = !nsh; 1392 } else { /* EL3 */ 1393 filtered = m != p; 1394 } 1395 1396 if (counter != 31) { 1397 /* 1398 * If not checking PMCCNTR, ensure the counter is setup to an event we 1399 * support 1400 */ 1401 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1402 if (!event_supported(event)) { 1403 return false; 1404 } 1405 } 1406 1407 return enabled && !prohibited && !filtered; 1408 } 1409 1410 static void pmu_update_irq(CPUARMState *env) 1411 { 1412 ARMCPU *cpu = env_archcpu(env); 1413 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1414 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1415 } 1416 1417 /* 1418 * Ensure c15_ccnt is the guest-visible count so that operations such as 1419 * enabling/disabling the counter or filtering, modifying the count itself, 1420 * etc. can be done logically. This is essentially a no-op if the counter is 1421 * not enabled at the time of the call. 1422 */ 1423 static void pmccntr_op_start(CPUARMState *env) 1424 { 1425 uint64_t cycles = cycles_get_count(env); 1426 1427 if (pmu_counter_enabled(env, 31)) { 1428 uint64_t eff_cycles = cycles; 1429 if (env->cp15.c9_pmcr & PMCRD) { 1430 /* Increment once every 64 processor clock cycles */ 1431 eff_cycles /= 64; 1432 } 1433 1434 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1435 1436 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1437 1ull << 63 : 1ull << 31; 1438 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1439 env->cp15.c9_pmovsr |= (1 << 31); 1440 pmu_update_irq(env); 1441 } 1442 1443 env->cp15.c15_ccnt = new_pmccntr; 1444 } 1445 env->cp15.c15_ccnt_delta = cycles; 1446 } 1447 1448 /* 1449 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1450 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1451 * pmccntr_op_start. 1452 */ 1453 static void pmccntr_op_finish(CPUARMState *env) 1454 { 1455 if (pmu_counter_enabled(env, 31)) { 1456 #ifndef CONFIG_USER_ONLY 1457 /* Calculate when the counter will next overflow */ 1458 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1459 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1460 remaining_cycles = (uint32_t)remaining_cycles; 1461 } 1462 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1463 1464 if (overflow_in > 0) { 1465 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1466 overflow_in; 1467 ARMCPU *cpu = env_archcpu(env); 1468 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1469 } 1470 #endif 1471 1472 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1473 if (env->cp15.c9_pmcr & PMCRD) { 1474 /* Increment once every 64 processor clock cycles */ 1475 prev_cycles /= 64; 1476 } 1477 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1478 } 1479 } 1480 1481 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1482 { 1483 1484 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1485 uint64_t count = 0; 1486 if (event_supported(event)) { 1487 uint16_t event_idx = supported_event_map[event]; 1488 count = pm_events[event_idx].get_count(env); 1489 } 1490 1491 if (pmu_counter_enabled(env, counter)) { 1492 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1493 1494 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { 1495 env->cp15.c9_pmovsr |= (1 << counter); 1496 pmu_update_irq(env); 1497 } 1498 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1499 } 1500 env->cp15.c14_pmevcntr_delta[counter] = count; 1501 } 1502 1503 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1504 { 1505 if (pmu_counter_enabled(env, counter)) { 1506 #ifndef CONFIG_USER_ONLY 1507 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1508 uint16_t event_idx = supported_event_map[event]; 1509 uint64_t delta = UINT32_MAX - 1510 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; 1511 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); 1512 1513 if (overflow_in > 0) { 1514 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1515 overflow_in; 1516 ARMCPU *cpu = env_archcpu(env); 1517 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1518 } 1519 #endif 1520 1521 env->cp15.c14_pmevcntr_delta[counter] -= 1522 env->cp15.c14_pmevcntr[counter]; 1523 } 1524 } 1525 1526 void pmu_op_start(CPUARMState *env) 1527 { 1528 unsigned int i; 1529 pmccntr_op_start(env); 1530 for (i = 0; i < pmu_num_counters(env); i++) { 1531 pmevcntr_op_start(env, i); 1532 } 1533 } 1534 1535 void pmu_op_finish(CPUARMState *env) 1536 { 1537 unsigned int i; 1538 pmccntr_op_finish(env); 1539 for (i = 0; i < pmu_num_counters(env); i++) { 1540 pmevcntr_op_finish(env, i); 1541 } 1542 } 1543 1544 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1545 { 1546 pmu_op_start(&cpu->env); 1547 } 1548 1549 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1550 { 1551 pmu_op_finish(&cpu->env); 1552 } 1553 1554 void arm_pmu_timer_cb(void *opaque) 1555 { 1556 ARMCPU *cpu = opaque; 1557 1558 /* 1559 * Update all the counter values based on the current underlying counts, 1560 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1561 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1562 * counter may expire. 1563 */ 1564 pmu_op_start(&cpu->env); 1565 pmu_op_finish(&cpu->env); 1566 } 1567 1568 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1569 uint64_t value) 1570 { 1571 pmu_op_start(env); 1572 1573 if (value & PMCRC) { 1574 /* The counter has been reset */ 1575 env->cp15.c15_ccnt = 0; 1576 } 1577 1578 if (value & PMCRP) { 1579 unsigned int i; 1580 for (i = 0; i < pmu_num_counters(env); i++) { 1581 env->cp15.c14_pmevcntr[i] = 0; 1582 } 1583 } 1584 1585 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; 1586 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); 1587 1588 pmu_op_finish(env); 1589 } 1590 1591 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1592 uint64_t value) 1593 { 1594 unsigned int i; 1595 for (i = 0; i < pmu_num_counters(env); i++) { 1596 /* Increment a counter's count iff: */ 1597 if ((value & (1 << i)) && /* counter's bit is set */ 1598 /* counter is enabled and not filtered */ 1599 pmu_counter_enabled(env, i) && 1600 /* counter is SW_INCR */ 1601 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1602 pmevcntr_op_start(env, i); 1603 1604 /* 1605 * Detect if this write causes an overflow since we can't predict 1606 * PMSWINC overflows like we can for other events 1607 */ 1608 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1609 1610 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { 1611 env->cp15.c9_pmovsr |= (1 << i); 1612 pmu_update_irq(env); 1613 } 1614 1615 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1616 1617 pmevcntr_op_finish(env, i); 1618 } 1619 } 1620 } 1621 1622 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1623 { 1624 uint64_t ret; 1625 pmccntr_op_start(env); 1626 ret = env->cp15.c15_ccnt; 1627 pmccntr_op_finish(env); 1628 return ret; 1629 } 1630 1631 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1632 uint64_t value) 1633 { 1634 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1635 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1636 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1637 * accessed. 1638 */ 1639 env->cp15.c9_pmselr = value & 0x1f; 1640 } 1641 1642 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1643 uint64_t value) 1644 { 1645 pmccntr_op_start(env); 1646 env->cp15.c15_ccnt = value; 1647 pmccntr_op_finish(env); 1648 } 1649 1650 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1651 uint64_t value) 1652 { 1653 uint64_t cur_val = pmccntr_read(env, NULL); 1654 1655 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1656 } 1657 1658 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1659 uint64_t value) 1660 { 1661 pmccntr_op_start(env); 1662 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1663 pmccntr_op_finish(env); 1664 } 1665 1666 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1667 uint64_t value) 1668 { 1669 pmccntr_op_start(env); 1670 /* M is not accessible from AArch32 */ 1671 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1672 (value & PMCCFILTR); 1673 pmccntr_op_finish(env); 1674 } 1675 1676 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1677 { 1678 /* M is not visible in AArch32 */ 1679 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1680 } 1681 1682 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1683 uint64_t value) 1684 { 1685 value &= pmu_counter_mask(env); 1686 env->cp15.c9_pmcnten |= value; 1687 } 1688 1689 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1690 uint64_t value) 1691 { 1692 value &= pmu_counter_mask(env); 1693 env->cp15.c9_pmcnten &= ~value; 1694 } 1695 1696 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1697 uint64_t value) 1698 { 1699 value &= pmu_counter_mask(env); 1700 env->cp15.c9_pmovsr &= ~value; 1701 pmu_update_irq(env); 1702 } 1703 1704 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1705 uint64_t value) 1706 { 1707 value &= pmu_counter_mask(env); 1708 env->cp15.c9_pmovsr |= value; 1709 pmu_update_irq(env); 1710 } 1711 1712 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1713 uint64_t value, const uint8_t counter) 1714 { 1715 if (counter == 31) { 1716 pmccfiltr_write(env, ri, value); 1717 } else if (counter < pmu_num_counters(env)) { 1718 pmevcntr_op_start(env, counter); 1719 1720 /* 1721 * If this counter's event type is changing, store the current 1722 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1723 * pmevcntr_op_finish has the correct baseline when it converts back to 1724 * a delta. 1725 */ 1726 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1727 PMXEVTYPER_EVTCOUNT; 1728 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1729 if (old_event != new_event) { 1730 uint64_t count = 0; 1731 if (event_supported(new_event)) { 1732 uint16_t event_idx = supported_event_map[new_event]; 1733 count = pm_events[event_idx].get_count(env); 1734 } 1735 env->cp15.c14_pmevcntr_delta[counter] = count; 1736 } 1737 1738 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1739 pmevcntr_op_finish(env, counter); 1740 } 1741 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1742 * PMSELR value is equal to or greater than the number of implemented 1743 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1744 */ 1745 } 1746 1747 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1748 const uint8_t counter) 1749 { 1750 if (counter == 31) { 1751 return env->cp15.pmccfiltr_el0; 1752 } else if (counter < pmu_num_counters(env)) { 1753 return env->cp15.c14_pmevtyper[counter]; 1754 } else { 1755 /* 1756 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1757 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1758 */ 1759 return 0; 1760 } 1761 } 1762 1763 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1764 uint64_t value) 1765 { 1766 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1767 pmevtyper_write(env, ri, value, counter); 1768 } 1769 1770 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1771 uint64_t value) 1772 { 1773 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1774 env->cp15.c14_pmevtyper[counter] = value; 1775 1776 /* 1777 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1778 * pmu_op_finish calls when loading saved state for a migration. Because 1779 * we're potentially updating the type of event here, the value written to 1780 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a 1781 * different counter type. Therefore, we need to set this value to the 1782 * current count for the counter type we're writing so that pmu_op_finish 1783 * has the correct count for its calculation. 1784 */ 1785 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1786 if (event_supported(event)) { 1787 uint16_t event_idx = supported_event_map[event]; 1788 env->cp15.c14_pmevcntr_delta[counter] = 1789 pm_events[event_idx].get_count(env); 1790 } 1791 } 1792 1793 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1794 { 1795 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1796 return pmevtyper_read(env, ri, counter); 1797 } 1798 1799 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1800 uint64_t value) 1801 { 1802 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1803 } 1804 1805 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1806 { 1807 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1808 } 1809 1810 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1811 uint64_t value, uint8_t counter) 1812 { 1813 if (counter < pmu_num_counters(env)) { 1814 pmevcntr_op_start(env, counter); 1815 env->cp15.c14_pmevcntr[counter] = value; 1816 pmevcntr_op_finish(env, counter); 1817 } 1818 /* 1819 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1820 * are CONSTRAINED UNPREDICTABLE. 1821 */ 1822 } 1823 1824 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1825 uint8_t counter) 1826 { 1827 if (counter < pmu_num_counters(env)) { 1828 uint64_t ret; 1829 pmevcntr_op_start(env, counter); 1830 ret = env->cp15.c14_pmevcntr[counter]; 1831 pmevcntr_op_finish(env, counter); 1832 return ret; 1833 } else { 1834 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1835 * are CONSTRAINED UNPREDICTABLE. */ 1836 return 0; 1837 } 1838 } 1839 1840 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1841 uint64_t value) 1842 { 1843 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1844 pmevcntr_write(env, ri, value, counter); 1845 } 1846 1847 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1848 { 1849 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1850 return pmevcntr_read(env, ri, counter); 1851 } 1852 1853 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1854 uint64_t value) 1855 { 1856 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1857 assert(counter < pmu_num_counters(env)); 1858 env->cp15.c14_pmevcntr[counter] = value; 1859 pmevcntr_write(env, ri, value, counter); 1860 } 1861 1862 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1863 { 1864 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1865 assert(counter < pmu_num_counters(env)); 1866 return env->cp15.c14_pmevcntr[counter]; 1867 } 1868 1869 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1870 uint64_t value) 1871 { 1872 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1873 } 1874 1875 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1876 { 1877 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1878 } 1879 1880 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1881 uint64_t value) 1882 { 1883 if (arm_feature(env, ARM_FEATURE_V8)) { 1884 env->cp15.c9_pmuserenr = value & 0xf; 1885 } else { 1886 env->cp15.c9_pmuserenr = value & 1; 1887 } 1888 } 1889 1890 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1891 uint64_t value) 1892 { 1893 /* We have no event counters so only the C bit can be changed */ 1894 value &= pmu_counter_mask(env); 1895 env->cp15.c9_pminten |= value; 1896 pmu_update_irq(env); 1897 } 1898 1899 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1900 uint64_t value) 1901 { 1902 value &= pmu_counter_mask(env); 1903 env->cp15.c9_pminten &= ~value; 1904 pmu_update_irq(env); 1905 } 1906 1907 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1908 uint64_t value) 1909 { 1910 /* Note that even though the AArch64 view of this register has bits 1911 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1912 * architectural requirements for bits which are RES0 only in some 1913 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1914 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1915 */ 1916 raw_write(env, ri, value & ~0x1FULL); 1917 } 1918 1919 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1920 { 1921 /* Begin with base v8.0 state. */ 1922 uint32_t valid_mask = 0x3fff; 1923 ARMCPU *cpu = env_archcpu(env); 1924 1925 if (arm_el_is_aa64(env, 3)) { 1926 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ 1927 valid_mask &= ~SCR_NET; 1928 } else { 1929 valid_mask &= ~(SCR_RW | SCR_ST); 1930 } 1931 1932 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1933 valid_mask &= ~SCR_HCE; 1934 1935 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1936 * supported if EL2 exists. The bit is UNK/SBZP when 1937 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1938 * when EL2 is unavailable. 1939 * On ARMv8, this bit is always available. 1940 */ 1941 if (arm_feature(env, ARM_FEATURE_V7) && 1942 !arm_feature(env, ARM_FEATURE_V8)) { 1943 valid_mask &= ~SCR_SMD; 1944 } 1945 } 1946 if (cpu_isar_feature(aa64_lor, cpu)) { 1947 valid_mask |= SCR_TLOR; 1948 } 1949 if (cpu_isar_feature(aa64_pauth, cpu)) { 1950 valid_mask |= SCR_API | SCR_APK; 1951 } 1952 1953 /* Clear all-context RES0 bits. */ 1954 value &= valid_mask; 1955 raw_write(env, ri, value); 1956 } 1957 1958 static CPAccessResult access_aa64_tid2(CPUARMState *env, 1959 const ARMCPRegInfo *ri, 1960 bool isread) 1961 { 1962 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { 1963 return CP_ACCESS_TRAP_EL2; 1964 } 1965 1966 return CP_ACCESS_OK; 1967 } 1968 1969 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1970 { 1971 ARMCPU *cpu = env_archcpu(env); 1972 1973 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1974 * bank 1975 */ 1976 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1977 ri->secure & ARM_CP_SECSTATE_S); 1978 1979 return cpu->ccsidr[index]; 1980 } 1981 1982 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1983 uint64_t value) 1984 { 1985 raw_write(env, ri, value & 0xf); 1986 } 1987 1988 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1989 { 1990 CPUState *cs = env_cpu(env); 1991 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 1992 uint64_t ret = 0; 1993 bool allow_virt = (arm_current_el(env) == 1 && 1994 (!arm_is_secure_below_el3(env) || 1995 (env->cp15.scr_el3 & SCR_EEL2))); 1996 1997 if (allow_virt && (hcr_el2 & HCR_IMO)) { 1998 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 1999 ret |= CPSR_I; 2000 } 2001 } else { 2002 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 2003 ret |= CPSR_I; 2004 } 2005 } 2006 2007 if (allow_virt && (hcr_el2 & HCR_FMO)) { 2008 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 2009 ret |= CPSR_F; 2010 } 2011 } else { 2012 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 2013 ret |= CPSR_F; 2014 } 2015 } 2016 2017 /* External aborts are not possible in QEMU so A bit is always clear */ 2018 return ret; 2019 } 2020 2021 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 2022 bool isread) 2023 { 2024 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { 2025 return CP_ACCESS_TRAP_EL2; 2026 } 2027 2028 return CP_ACCESS_OK; 2029 } 2030 2031 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 2032 bool isread) 2033 { 2034 if (arm_feature(env, ARM_FEATURE_V8)) { 2035 return access_aa64_tid1(env, ri, isread); 2036 } 2037 2038 return CP_ACCESS_OK; 2039 } 2040 2041 static const ARMCPRegInfo v7_cp_reginfo[] = { 2042 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 2043 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 2044 .access = PL1_W, .type = ARM_CP_NOP }, 2045 /* Performance monitors are implementation defined in v7, 2046 * but with an ARM recommended set of registers, which we 2047 * follow. 2048 * 2049 * Performance registers fall into three categories: 2050 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 2051 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 2052 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 2053 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 2054 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 2055 */ 2056 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 2057 .access = PL0_RW, .type = ARM_CP_ALIAS, 2058 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2059 .writefn = pmcntenset_write, 2060 .accessfn = pmreg_access, 2061 .raw_writefn = raw_write }, 2062 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 2063 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 2064 .access = PL0_RW, .accessfn = pmreg_access, 2065 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 2066 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 2067 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 2068 .access = PL0_RW, 2069 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2070 .accessfn = pmreg_access, 2071 .writefn = pmcntenclr_write, 2072 .type = ARM_CP_ALIAS }, 2073 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 2074 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 2075 .access = PL0_RW, .accessfn = pmreg_access, 2076 .type = ARM_CP_ALIAS, 2077 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 2078 .writefn = pmcntenclr_write }, 2079 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 2080 .access = PL0_RW, .type = ARM_CP_IO, 2081 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2082 .accessfn = pmreg_access, 2083 .writefn = pmovsr_write, 2084 .raw_writefn = raw_write }, 2085 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 2086 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 2087 .access = PL0_RW, .accessfn = pmreg_access, 2088 .type = ARM_CP_ALIAS | ARM_CP_IO, 2089 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2090 .writefn = pmovsr_write, 2091 .raw_writefn = raw_write }, 2092 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 2093 .access = PL0_W, .accessfn = pmreg_access_swinc, 2094 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2095 .writefn = pmswinc_write }, 2096 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 2097 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 2098 .access = PL0_W, .accessfn = pmreg_access_swinc, 2099 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2100 .writefn = pmswinc_write }, 2101 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 2102 .access = PL0_RW, .type = ARM_CP_ALIAS, 2103 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 2104 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 2105 .raw_writefn = raw_write}, 2106 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 2107 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 2108 .access = PL0_RW, .accessfn = pmreg_access_selr, 2109 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 2110 .writefn = pmselr_write, .raw_writefn = raw_write, }, 2111 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 2112 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 2113 .readfn = pmccntr_read, .writefn = pmccntr_write32, 2114 .accessfn = pmreg_access_ccntr }, 2115 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 2116 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 2117 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 2118 .type = ARM_CP_IO, 2119 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 2120 .readfn = pmccntr_read, .writefn = pmccntr_write, 2121 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 2122 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 2123 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 2124 .access = PL0_RW, .accessfn = pmreg_access, 2125 .type = ARM_CP_ALIAS | ARM_CP_IO, 2126 .resetvalue = 0, }, 2127 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 2128 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 2129 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 2130 .access = PL0_RW, .accessfn = pmreg_access, 2131 .type = ARM_CP_IO, 2132 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 2133 .resetvalue = 0, }, 2134 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 2135 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2136 .accessfn = pmreg_access, 2137 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2138 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 2139 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 2140 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2141 .accessfn = pmreg_access, 2142 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2143 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 2144 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2145 .accessfn = pmreg_access_xevcntr, 2146 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2147 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 2148 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 2149 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2150 .accessfn = pmreg_access_xevcntr, 2151 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2152 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 2153 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 2154 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 2155 .resetvalue = 0, 2156 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2157 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2158 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2159 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2160 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2161 .resetvalue = 0, 2162 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2163 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2164 .access = PL1_RW, .accessfn = access_tpm, 2165 .type = ARM_CP_ALIAS | ARM_CP_IO, 2166 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2167 .resetvalue = 0, 2168 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2169 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2170 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2171 .access = PL1_RW, .accessfn = access_tpm, 2172 .type = ARM_CP_IO, 2173 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2174 .writefn = pmintenset_write, .raw_writefn = raw_write, 2175 .resetvalue = 0x0 }, 2176 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2177 .access = PL1_RW, .accessfn = access_tpm, 2178 .type = ARM_CP_ALIAS | ARM_CP_IO, 2179 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2180 .writefn = pmintenclr_write, }, 2181 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2182 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2183 .access = PL1_RW, .accessfn = access_tpm, 2184 .type = ARM_CP_ALIAS | ARM_CP_IO, 2185 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2186 .writefn = pmintenclr_write }, 2187 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2188 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2189 .access = PL1_R, 2190 .accessfn = access_aa64_tid2, 2191 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2192 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2193 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2194 .access = PL1_RW, 2195 .accessfn = access_aa64_tid2, 2196 .writefn = csselr_write, .resetvalue = 0, 2197 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2198 offsetof(CPUARMState, cp15.csselr_ns) } }, 2199 /* Auxiliary ID register: this actually has an IMPDEF value but for now 2200 * just RAZ for all cores: 2201 */ 2202 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2203 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2204 .access = PL1_R, .type = ARM_CP_CONST, 2205 .accessfn = access_aa64_tid1, 2206 .resetvalue = 0 }, 2207 /* Auxiliary fault status registers: these also are IMPDEF, and we 2208 * choose to RAZ/WI for all cores. 2209 */ 2210 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2211 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2212 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 2213 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2214 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2215 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 2216 /* MAIR can just read-as-written because we don't implement caches 2217 * and so don't need to care about memory attributes. 2218 */ 2219 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2220 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2221 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2222 .resetvalue = 0 }, 2223 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2224 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2225 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2226 .resetvalue = 0 }, 2227 /* For non-long-descriptor page tables these are PRRR and NMRR; 2228 * regardless they still act as reads-as-written for QEMU. 2229 */ 2230 /* MAIR0/1 are defined separately from their 64-bit counterpart which 2231 * allows them to assign the correct fieldoffset based on the endianness 2232 * handled in the field definitions. 2233 */ 2234 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2235 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 2236 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2237 offsetof(CPUARMState, cp15.mair0_ns) }, 2238 .resetfn = arm_cp_reset_ignore }, 2239 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2240 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 2241 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2242 offsetof(CPUARMState, cp15.mair1_ns) }, 2243 .resetfn = arm_cp_reset_ignore }, 2244 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2245 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2246 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2247 /* 32 bit ITLB invalidates */ 2248 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 2249 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2250 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 2251 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2252 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 2253 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2254 /* 32 bit DTLB invalidates */ 2255 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 2256 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2257 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 2258 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2259 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 2260 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2261 /* 32 bit TLB invalidates */ 2262 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 2263 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2264 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 2265 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2266 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 2267 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2268 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 2269 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 2270 REGINFO_SENTINEL 2271 }; 2272 2273 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 2274 /* 32 bit TLB invalidates, Inner Shareable */ 2275 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 2276 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 2277 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 2278 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 2279 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 2280 .type = ARM_CP_NO_RAW, .access = PL1_W, 2281 .writefn = tlbiasid_is_write }, 2282 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 2283 .type = ARM_CP_NO_RAW, .access = PL1_W, 2284 .writefn = tlbimvaa_is_write }, 2285 REGINFO_SENTINEL 2286 }; 2287 2288 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2289 /* PMOVSSET is not implemented in v7 before v7ve */ 2290 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2291 .access = PL0_RW, .accessfn = pmreg_access, 2292 .type = ARM_CP_ALIAS | ARM_CP_IO, 2293 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2294 .writefn = pmovsset_write, 2295 .raw_writefn = raw_write }, 2296 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2297 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2298 .access = PL0_RW, .accessfn = pmreg_access, 2299 .type = ARM_CP_ALIAS | ARM_CP_IO, 2300 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2301 .writefn = pmovsset_write, 2302 .raw_writefn = raw_write }, 2303 REGINFO_SENTINEL 2304 }; 2305 2306 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2307 uint64_t value) 2308 { 2309 value &= 1; 2310 env->teecr = value; 2311 } 2312 2313 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2314 bool isread) 2315 { 2316 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2317 return CP_ACCESS_TRAP; 2318 } 2319 return CP_ACCESS_OK; 2320 } 2321 2322 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2323 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2324 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2325 .resetvalue = 0, 2326 .writefn = teecr_write }, 2327 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2328 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2329 .accessfn = teehbr_access, .resetvalue = 0 }, 2330 REGINFO_SENTINEL 2331 }; 2332 2333 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2334 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2335 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2336 .access = PL0_RW, 2337 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2338 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2339 .access = PL0_RW, 2340 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2341 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2342 .resetfn = arm_cp_reset_ignore }, 2343 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2344 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2345 .access = PL0_R|PL1_W, 2346 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2347 .resetvalue = 0}, 2348 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2349 .access = PL0_R|PL1_W, 2350 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2351 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2352 .resetfn = arm_cp_reset_ignore }, 2353 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2354 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2355 .access = PL1_RW, 2356 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2357 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2358 .access = PL1_RW, 2359 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2360 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2361 .resetvalue = 0 }, 2362 REGINFO_SENTINEL 2363 }; 2364 2365 #ifndef CONFIG_USER_ONLY 2366 2367 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2368 bool isread) 2369 { 2370 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2371 * Writable only at the highest implemented exception level. 2372 */ 2373 int el = arm_current_el(env); 2374 uint64_t hcr; 2375 uint32_t cntkctl; 2376 2377 switch (el) { 2378 case 0: 2379 hcr = arm_hcr_el2_eff(env); 2380 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2381 cntkctl = env->cp15.cnthctl_el2; 2382 } else { 2383 cntkctl = env->cp15.c14_cntkctl; 2384 } 2385 if (!extract32(cntkctl, 0, 2)) { 2386 return CP_ACCESS_TRAP; 2387 } 2388 break; 2389 case 1: 2390 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2391 arm_is_secure_below_el3(env)) { 2392 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2393 return CP_ACCESS_TRAP_UNCATEGORIZED; 2394 } 2395 break; 2396 case 2: 2397 case 3: 2398 break; 2399 } 2400 2401 if (!isread && el < arm_highest_el(env)) { 2402 return CP_ACCESS_TRAP_UNCATEGORIZED; 2403 } 2404 2405 return CP_ACCESS_OK; 2406 } 2407 2408 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2409 bool isread) 2410 { 2411 unsigned int cur_el = arm_current_el(env); 2412 bool secure = arm_is_secure(env); 2413 uint64_t hcr = arm_hcr_el2_eff(env); 2414 2415 switch (cur_el) { 2416 case 0: 2417 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ 2418 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2419 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) 2420 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2421 } 2422 2423 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ 2424 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2425 return CP_ACCESS_TRAP; 2426 } 2427 2428 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */ 2429 if (hcr & HCR_E2H) { 2430 if (timeridx == GTIMER_PHYS && 2431 !extract32(env->cp15.cnthctl_el2, 10, 1)) { 2432 return CP_ACCESS_TRAP_EL2; 2433 } 2434 } else { 2435 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2436 if (arm_feature(env, ARM_FEATURE_EL2) && 2437 timeridx == GTIMER_PHYS && !secure && 2438 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 2439 return CP_ACCESS_TRAP_EL2; 2440 } 2441 } 2442 break; 2443 2444 case 1: 2445 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ 2446 if (arm_feature(env, ARM_FEATURE_EL2) && 2447 timeridx == GTIMER_PHYS && !secure && 2448 (hcr & HCR_E2H 2449 ? !extract32(env->cp15.cnthctl_el2, 10, 1) 2450 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { 2451 return CP_ACCESS_TRAP_EL2; 2452 } 2453 break; 2454 } 2455 return CP_ACCESS_OK; 2456 } 2457 2458 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2459 bool isread) 2460 { 2461 unsigned int cur_el = arm_current_el(env); 2462 bool secure = arm_is_secure(env); 2463 uint64_t hcr = arm_hcr_el2_eff(env); 2464 2465 switch (cur_el) { 2466 case 0: 2467 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2468 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ 2469 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) 2470 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2471 } 2472 2473 /* 2474 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from 2475 * EL0 if EL0[PV]TEN is zero. 2476 */ 2477 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2478 return CP_ACCESS_TRAP; 2479 } 2480 /* fall through */ 2481 2482 case 1: 2483 if (arm_feature(env, ARM_FEATURE_EL2) && 2484 timeridx == GTIMER_PHYS && !secure) { 2485 if (hcr & HCR_E2H) { 2486 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ 2487 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { 2488 return CP_ACCESS_TRAP_EL2; 2489 } 2490 } else { 2491 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2492 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { 2493 return CP_ACCESS_TRAP_EL2; 2494 } 2495 } 2496 } 2497 break; 2498 } 2499 return CP_ACCESS_OK; 2500 } 2501 2502 static CPAccessResult gt_pct_access(CPUARMState *env, 2503 const ARMCPRegInfo *ri, 2504 bool isread) 2505 { 2506 return gt_counter_access(env, GTIMER_PHYS, isread); 2507 } 2508 2509 static CPAccessResult gt_vct_access(CPUARMState *env, 2510 const ARMCPRegInfo *ri, 2511 bool isread) 2512 { 2513 return gt_counter_access(env, GTIMER_VIRT, isread); 2514 } 2515 2516 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2517 bool isread) 2518 { 2519 return gt_timer_access(env, GTIMER_PHYS, isread); 2520 } 2521 2522 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2523 bool isread) 2524 { 2525 return gt_timer_access(env, GTIMER_VIRT, isread); 2526 } 2527 2528 static CPAccessResult gt_stimer_access(CPUARMState *env, 2529 const ARMCPRegInfo *ri, 2530 bool isread) 2531 { 2532 /* The AArch64 register view of the secure physical timer is 2533 * always accessible from EL3, and configurably accessible from 2534 * Secure EL1. 2535 */ 2536 switch (arm_current_el(env)) { 2537 case 1: 2538 if (!arm_is_secure(env)) { 2539 return CP_ACCESS_TRAP; 2540 } 2541 if (!(env->cp15.scr_el3 & SCR_ST)) { 2542 return CP_ACCESS_TRAP_EL3; 2543 } 2544 return CP_ACCESS_OK; 2545 case 0: 2546 case 2: 2547 return CP_ACCESS_TRAP; 2548 case 3: 2549 return CP_ACCESS_OK; 2550 default: 2551 g_assert_not_reached(); 2552 } 2553 } 2554 2555 static uint64_t gt_get_countervalue(CPUARMState *env) 2556 { 2557 ARMCPU *cpu = env_archcpu(env); 2558 2559 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); 2560 } 2561 2562 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2563 { 2564 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2565 2566 if (gt->ctl & 1) { 2567 /* Timer enabled: calculate and set current ISTATUS, irq, and 2568 * reset timer to when ISTATUS next has to change 2569 */ 2570 uint64_t offset = timeridx == GTIMER_VIRT ? 2571 cpu->env.cp15.cntvoff_el2 : 0; 2572 uint64_t count = gt_get_countervalue(&cpu->env); 2573 /* Note that this must be unsigned 64 bit arithmetic: */ 2574 int istatus = count - offset >= gt->cval; 2575 uint64_t nexttick; 2576 int irqstate; 2577 2578 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2579 2580 irqstate = (istatus && !(gt->ctl & 2)); 2581 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2582 2583 if (istatus) { 2584 /* Next transition is when count rolls back over to zero */ 2585 nexttick = UINT64_MAX; 2586 } else { 2587 /* Next transition is when we hit cval */ 2588 nexttick = gt->cval + offset; 2589 } 2590 /* Note that the desired next expiry time might be beyond the 2591 * signed-64-bit range of a QEMUTimer -- in this case we just 2592 * set the timer for as far in the future as possible. When the 2593 * timer expires we will reset the timer for any remaining period. 2594 */ 2595 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { 2596 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); 2597 } else { 2598 timer_mod(cpu->gt_timer[timeridx], nexttick); 2599 } 2600 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 2601 } else { 2602 /* Timer disabled: ISTATUS and timer output always clear */ 2603 gt->ctl &= ~4; 2604 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 2605 timer_del(cpu->gt_timer[timeridx]); 2606 trace_arm_gt_recalc_disabled(timeridx); 2607 } 2608 } 2609 2610 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2611 int timeridx) 2612 { 2613 ARMCPU *cpu = env_archcpu(env); 2614 2615 timer_del(cpu->gt_timer[timeridx]); 2616 } 2617 2618 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2619 { 2620 return gt_get_countervalue(env); 2621 } 2622 2623 static uint64_t gt_virt_cnt_offset(CPUARMState *env) 2624 { 2625 uint64_t hcr; 2626 2627 switch (arm_current_el(env)) { 2628 case 2: 2629 hcr = arm_hcr_el2_eff(env); 2630 if (hcr & HCR_E2H) { 2631 return 0; 2632 } 2633 break; 2634 case 0: 2635 hcr = arm_hcr_el2_eff(env); 2636 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2637 return 0; 2638 } 2639 break; 2640 } 2641 2642 return env->cp15.cntvoff_el2; 2643 } 2644 2645 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2646 { 2647 return gt_get_countervalue(env) - gt_virt_cnt_offset(env); 2648 } 2649 2650 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2651 int timeridx, 2652 uint64_t value) 2653 { 2654 trace_arm_gt_cval_write(timeridx, value); 2655 env->cp15.c14_timer[timeridx].cval = value; 2656 gt_recalc_timer(env_archcpu(env), timeridx); 2657 } 2658 2659 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2660 int timeridx) 2661 { 2662 uint64_t offset = 0; 2663 2664 switch (timeridx) { 2665 case GTIMER_VIRT: 2666 case GTIMER_HYPVIRT: 2667 offset = gt_virt_cnt_offset(env); 2668 break; 2669 } 2670 2671 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2672 (gt_get_countervalue(env) - offset)); 2673 } 2674 2675 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2676 int timeridx, 2677 uint64_t value) 2678 { 2679 uint64_t offset = 0; 2680 2681 switch (timeridx) { 2682 case GTIMER_VIRT: 2683 case GTIMER_HYPVIRT: 2684 offset = gt_virt_cnt_offset(env); 2685 break; 2686 } 2687 2688 trace_arm_gt_tval_write(timeridx, value); 2689 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2690 sextract64(value, 0, 32); 2691 gt_recalc_timer(env_archcpu(env), timeridx); 2692 } 2693 2694 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2695 int timeridx, 2696 uint64_t value) 2697 { 2698 ARMCPU *cpu = env_archcpu(env); 2699 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2700 2701 trace_arm_gt_ctl_write(timeridx, value); 2702 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2703 if ((oldval ^ value) & 1) { 2704 /* Enable toggled */ 2705 gt_recalc_timer(cpu, timeridx); 2706 } else if ((oldval ^ value) & 2) { 2707 /* IMASK toggled: don't need to recalculate, 2708 * just set the interrupt line based on ISTATUS 2709 */ 2710 int irqstate = (oldval & 4) && !(value & 2); 2711 2712 trace_arm_gt_imask_toggle(timeridx, irqstate); 2713 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2714 } 2715 } 2716 2717 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2718 { 2719 gt_timer_reset(env, ri, GTIMER_PHYS); 2720 } 2721 2722 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2723 uint64_t value) 2724 { 2725 gt_cval_write(env, ri, GTIMER_PHYS, value); 2726 } 2727 2728 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2729 { 2730 return gt_tval_read(env, ri, GTIMER_PHYS); 2731 } 2732 2733 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2734 uint64_t value) 2735 { 2736 gt_tval_write(env, ri, GTIMER_PHYS, value); 2737 } 2738 2739 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2740 uint64_t value) 2741 { 2742 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2743 } 2744 2745 static int gt_phys_redir_timeridx(CPUARMState *env) 2746 { 2747 switch (arm_mmu_idx(env)) { 2748 case ARMMMUIdx_E20_0: 2749 case ARMMMUIdx_E20_2: 2750 case ARMMMUIdx_E20_2_PAN: 2751 return GTIMER_HYP; 2752 default: 2753 return GTIMER_PHYS; 2754 } 2755 } 2756 2757 static int gt_virt_redir_timeridx(CPUARMState *env) 2758 { 2759 switch (arm_mmu_idx(env)) { 2760 case ARMMMUIdx_E20_0: 2761 case ARMMMUIdx_E20_2: 2762 case ARMMMUIdx_E20_2_PAN: 2763 return GTIMER_HYPVIRT; 2764 default: 2765 return GTIMER_VIRT; 2766 } 2767 } 2768 2769 static uint64_t gt_phys_redir_cval_read(CPUARMState *env, 2770 const ARMCPRegInfo *ri) 2771 { 2772 int timeridx = gt_phys_redir_timeridx(env); 2773 return env->cp15.c14_timer[timeridx].cval; 2774 } 2775 2776 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2777 uint64_t value) 2778 { 2779 int timeridx = gt_phys_redir_timeridx(env); 2780 gt_cval_write(env, ri, timeridx, value); 2781 } 2782 2783 static uint64_t gt_phys_redir_tval_read(CPUARMState *env, 2784 const ARMCPRegInfo *ri) 2785 { 2786 int timeridx = gt_phys_redir_timeridx(env); 2787 return gt_tval_read(env, ri, timeridx); 2788 } 2789 2790 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2791 uint64_t value) 2792 { 2793 int timeridx = gt_phys_redir_timeridx(env); 2794 gt_tval_write(env, ri, timeridx, value); 2795 } 2796 2797 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, 2798 const ARMCPRegInfo *ri) 2799 { 2800 int timeridx = gt_phys_redir_timeridx(env); 2801 return env->cp15.c14_timer[timeridx].ctl; 2802 } 2803 2804 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2805 uint64_t value) 2806 { 2807 int timeridx = gt_phys_redir_timeridx(env); 2808 gt_ctl_write(env, ri, timeridx, value); 2809 } 2810 2811 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2812 { 2813 gt_timer_reset(env, ri, GTIMER_VIRT); 2814 } 2815 2816 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2817 uint64_t value) 2818 { 2819 gt_cval_write(env, ri, GTIMER_VIRT, value); 2820 } 2821 2822 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2823 { 2824 return gt_tval_read(env, ri, GTIMER_VIRT); 2825 } 2826 2827 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2828 uint64_t value) 2829 { 2830 gt_tval_write(env, ri, GTIMER_VIRT, value); 2831 } 2832 2833 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2834 uint64_t value) 2835 { 2836 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2837 } 2838 2839 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2840 uint64_t value) 2841 { 2842 ARMCPU *cpu = env_archcpu(env); 2843 2844 trace_arm_gt_cntvoff_write(value); 2845 raw_write(env, ri, value); 2846 gt_recalc_timer(cpu, GTIMER_VIRT); 2847 } 2848 2849 static uint64_t gt_virt_redir_cval_read(CPUARMState *env, 2850 const ARMCPRegInfo *ri) 2851 { 2852 int timeridx = gt_virt_redir_timeridx(env); 2853 return env->cp15.c14_timer[timeridx].cval; 2854 } 2855 2856 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2857 uint64_t value) 2858 { 2859 int timeridx = gt_virt_redir_timeridx(env); 2860 gt_cval_write(env, ri, timeridx, value); 2861 } 2862 2863 static uint64_t gt_virt_redir_tval_read(CPUARMState *env, 2864 const ARMCPRegInfo *ri) 2865 { 2866 int timeridx = gt_virt_redir_timeridx(env); 2867 return gt_tval_read(env, ri, timeridx); 2868 } 2869 2870 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2871 uint64_t value) 2872 { 2873 int timeridx = gt_virt_redir_timeridx(env); 2874 gt_tval_write(env, ri, timeridx, value); 2875 } 2876 2877 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, 2878 const ARMCPRegInfo *ri) 2879 { 2880 int timeridx = gt_virt_redir_timeridx(env); 2881 return env->cp15.c14_timer[timeridx].ctl; 2882 } 2883 2884 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2885 uint64_t value) 2886 { 2887 int timeridx = gt_virt_redir_timeridx(env); 2888 gt_ctl_write(env, ri, timeridx, value); 2889 } 2890 2891 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2892 { 2893 gt_timer_reset(env, ri, GTIMER_HYP); 2894 } 2895 2896 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2897 uint64_t value) 2898 { 2899 gt_cval_write(env, ri, GTIMER_HYP, value); 2900 } 2901 2902 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2903 { 2904 return gt_tval_read(env, ri, GTIMER_HYP); 2905 } 2906 2907 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2908 uint64_t value) 2909 { 2910 gt_tval_write(env, ri, GTIMER_HYP, value); 2911 } 2912 2913 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2914 uint64_t value) 2915 { 2916 gt_ctl_write(env, ri, GTIMER_HYP, value); 2917 } 2918 2919 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2920 { 2921 gt_timer_reset(env, ri, GTIMER_SEC); 2922 } 2923 2924 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2925 uint64_t value) 2926 { 2927 gt_cval_write(env, ri, GTIMER_SEC, value); 2928 } 2929 2930 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2931 { 2932 return gt_tval_read(env, ri, GTIMER_SEC); 2933 } 2934 2935 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2936 uint64_t value) 2937 { 2938 gt_tval_write(env, ri, GTIMER_SEC, value); 2939 } 2940 2941 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2942 uint64_t value) 2943 { 2944 gt_ctl_write(env, ri, GTIMER_SEC, value); 2945 } 2946 2947 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2948 { 2949 gt_timer_reset(env, ri, GTIMER_HYPVIRT); 2950 } 2951 2952 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2953 uint64_t value) 2954 { 2955 gt_cval_write(env, ri, GTIMER_HYPVIRT, value); 2956 } 2957 2958 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2959 { 2960 return gt_tval_read(env, ri, GTIMER_HYPVIRT); 2961 } 2962 2963 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2964 uint64_t value) 2965 { 2966 gt_tval_write(env, ri, GTIMER_HYPVIRT, value); 2967 } 2968 2969 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2970 uint64_t value) 2971 { 2972 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); 2973 } 2974 2975 void arm_gt_ptimer_cb(void *opaque) 2976 { 2977 ARMCPU *cpu = opaque; 2978 2979 gt_recalc_timer(cpu, GTIMER_PHYS); 2980 } 2981 2982 void arm_gt_vtimer_cb(void *opaque) 2983 { 2984 ARMCPU *cpu = opaque; 2985 2986 gt_recalc_timer(cpu, GTIMER_VIRT); 2987 } 2988 2989 void arm_gt_htimer_cb(void *opaque) 2990 { 2991 ARMCPU *cpu = opaque; 2992 2993 gt_recalc_timer(cpu, GTIMER_HYP); 2994 } 2995 2996 void arm_gt_stimer_cb(void *opaque) 2997 { 2998 ARMCPU *cpu = opaque; 2999 3000 gt_recalc_timer(cpu, GTIMER_SEC); 3001 } 3002 3003 void arm_gt_hvtimer_cb(void *opaque) 3004 { 3005 ARMCPU *cpu = opaque; 3006 3007 gt_recalc_timer(cpu, GTIMER_HYPVIRT); 3008 } 3009 3010 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) 3011 { 3012 ARMCPU *cpu = env_archcpu(env); 3013 3014 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; 3015 } 3016 3017 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3018 /* Note that CNTFRQ is purely reads-as-written for the benefit 3019 * of software; writing it doesn't actually change the timer frequency. 3020 * Our reset value matches the fixed frequency we implement the timer at. 3021 */ 3022 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 3023 .type = ARM_CP_ALIAS, 3024 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 3025 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 3026 }, 3027 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3028 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3029 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 3030 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3031 .resetfn = arm_gt_cntfrq_reset, 3032 }, 3033 /* overall control: mostly access permissions */ 3034 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 3035 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 3036 .access = PL1_RW, 3037 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 3038 .resetvalue = 0, 3039 }, 3040 /* per-timer control */ 3041 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3042 .secure = ARM_CP_SECSTATE_NS, 3043 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3044 .accessfn = gt_ptimer_access, 3045 .fieldoffset = offsetoflow32(CPUARMState, 3046 cp15.c14_timer[GTIMER_PHYS].ctl), 3047 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3048 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3049 }, 3050 { .name = "CNTP_CTL_S", 3051 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3052 .secure = ARM_CP_SECSTATE_S, 3053 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3054 .accessfn = gt_ptimer_access, 3055 .fieldoffset = offsetoflow32(CPUARMState, 3056 cp15.c14_timer[GTIMER_SEC].ctl), 3057 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3058 }, 3059 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 3060 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 3061 .type = ARM_CP_IO, .access = PL0_RW, 3062 .accessfn = gt_ptimer_access, 3063 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 3064 .resetvalue = 0, 3065 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3066 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3067 }, 3068 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 3069 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3070 .accessfn = gt_vtimer_access, 3071 .fieldoffset = offsetoflow32(CPUARMState, 3072 cp15.c14_timer[GTIMER_VIRT].ctl), 3073 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3074 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3075 }, 3076 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 3077 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 3078 .type = ARM_CP_IO, .access = PL0_RW, 3079 .accessfn = gt_vtimer_access, 3080 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 3081 .resetvalue = 0, 3082 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3083 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3084 }, 3085 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 3086 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3087 .secure = ARM_CP_SECSTATE_NS, 3088 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3089 .accessfn = gt_ptimer_access, 3090 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3091 }, 3092 { .name = "CNTP_TVAL_S", 3093 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3094 .secure = ARM_CP_SECSTATE_S, 3095 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3096 .accessfn = gt_ptimer_access, 3097 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 3098 }, 3099 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3100 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 3101 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3102 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 3103 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3104 }, 3105 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 3106 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3107 .accessfn = gt_vtimer_access, 3108 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3109 }, 3110 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3111 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 3112 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3113 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 3114 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3115 }, 3116 /* The counter itself */ 3117 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 3118 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3119 .accessfn = gt_pct_access, 3120 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 3121 }, 3122 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 3123 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 3124 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3125 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 3126 }, 3127 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 3128 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3129 .accessfn = gt_vct_access, 3130 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 3131 }, 3132 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3133 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3134 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3135 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 3136 }, 3137 /* Comparison value, indicating when the timer goes off */ 3138 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 3139 .secure = ARM_CP_SECSTATE_NS, 3140 .access = PL0_RW, 3141 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3142 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3143 .accessfn = gt_ptimer_access, 3144 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3145 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3146 }, 3147 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 3148 .secure = ARM_CP_SECSTATE_S, 3149 .access = PL0_RW, 3150 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3151 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3152 .accessfn = gt_ptimer_access, 3153 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3154 }, 3155 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3156 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 3157 .access = PL0_RW, 3158 .type = ARM_CP_IO, 3159 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3160 .resetvalue = 0, .accessfn = gt_ptimer_access, 3161 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3162 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3163 }, 3164 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 3165 .access = PL0_RW, 3166 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3167 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3168 .accessfn = gt_vtimer_access, 3169 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3170 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3171 }, 3172 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3173 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 3174 .access = PL0_RW, 3175 .type = ARM_CP_IO, 3176 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3177 .resetvalue = 0, .accessfn = gt_vtimer_access, 3178 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3179 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3180 }, 3181 /* Secure timer -- this is actually restricted to only EL3 3182 * and configurably Secure-EL1 via the accessfn. 3183 */ 3184 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 3185 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 3186 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 3187 .accessfn = gt_stimer_access, 3188 .readfn = gt_sec_tval_read, 3189 .writefn = gt_sec_tval_write, 3190 .resetfn = gt_sec_timer_reset, 3191 }, 3192 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 3193 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 3194 .type = ARM_CP_IO, .access = PL1_RW, 3195 .accessfn = gt_stimer_access, 3196 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 3197 .resetvalue = 0, 3198 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3199 }, 3200 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 3201 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 3202 .type = ARM_CP_IO, .access = PL1_RW, 3203 .accessfn = gt_stimer_access, 3204 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3205 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3206 }, 3207 REGINFO_SENTINEL 3208 }; 3209 3210 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, 3211 bool isread) 3212 { 3213 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { 3214 return CP_ACCESS_TRAP; 3215 } 3216 return CP_ACCESS_OK; 3217 } 3218 3219 #else 3220 3221 /* In user-mode most of the generic timer registers are inaccessible 3222 * however modern kernels (4.12+) allow access to cntvct_el0 3223 */ 3224 3225 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 3226 { 3227 ARMCPU *cpu = env_archcpu(env); 3228 3229 /* Currently we have no support for QEMUTimer in linux-user so we 3230 * can't call gt_get_countervalue(env), instead we directly 3231 * call the lower level functions. 3232 */ 3233 return cpu_get_clock() / gt_cntfrq_period_ns(cpu); 3234 } 3235 3236 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3237 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3238 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3239 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 3240 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3241 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 3242 }, 3243 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3244 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3245 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3246 .readfn = gt_virt_cnt_read, 3247 }, 3248 REGINFO_SENTINEL 3249 }; 3250 3251 #endif 3252 3253 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3254 { 3255 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3256 raw_write(env, ri, value); 3257 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3258 raw_write(env, ri, value & 0xfffff6ff); 3259 } else { 3260 raw_write(env, ri, value & 0xfffff1ff); 3261 } 3262 } 3263 3264 #ifndef CONFIG_USER_ONLY 3265 /* get_phys_addr() isn't present for user-mode-only targets */ 3266 3267 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 3268 bool isread) 3269 { 3270 if (ri->opc2 & 4) { 3271 /* The ATS12NSO* operations must trap to EL3 if executed in 3272 * Secure EL1 (which can only happen if EL3 is AArch64). 3273 * They are simply UNDEF if executed from NS EL1. 3274 * They function normally from EL2 or EL3. 3275 */ 3276 if (arm_current_el(env) == 1) { 3277 if (arm_is_secure_below_el3(env)) { 3278 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 3279 } 3280 return CP_ACCESS_TRAP_UNCATEGORIZED; 3281 } 3282 } 3283 return CP_ACCESS_OK; 3284 } 3285 3286 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 3287 MMUAccessType access_type, ARMMMUIdx mmu_idx) 3288 { 3289 hwaddr phys_addr; 3290 target_ulong page_size; 3291 int prot; 3292 bool ret; 3293 uint64_t par64; 3294 bool format64 = false; 3295 MemTxAttrs attrs = {}; 3296 ARMMMUFaultInfo fi = {}; 3297 ARMCacheAttrs cacheattrs = {}; 3298 3299 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 3300 &prot, &page_size, &fi, &cacheattrs); 3301 3302 if (ret) { 3303 /* 3304 * Some kinds of translation fault must cause exceptions rather 3305 * than being reported in the PAR. 3306 */ 3307 int current_el = arm_current_el(env); 3308 int target_el; 3309 uint32_t syn, fsr, fsc; 3310 bool take_exc = false; 3311 3312 if (fi.s1ptw && current_el == 1 && !arm_is_secure(env) 3313 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 3314 /* 3315 * Synchronous stage 2 fault on an access made as part of the 3316 * translation table walk for AT S1E0* or AT S1E1* insn 3317 * executed from NS EL1. If this is a synchronous external abort 3318 * and SCR_EL3.EA == 1, then we take a synchronous external abort 3319 * to EL3. Otherwise the fault is taken as an exception to EL2, 3320 * and HPFAR_EL2 holds the faulting IPA. 3321 */ 3322 if (fi.type == ARMFault_SyncExternalOnWalk && 3323 (env->cp15.scr_el3 & SCR_EA)) { 3324 target_el = 3; 3325 } else { 3326 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; 3327 target_el = 2; 3328 } 3329 take_exc = true; 3330 } else if (fi.type == ARMFault_SyncExternalOnWalk) { 3331 /* 3332 * Synchronous external aborts during a translation table walk 3333 * are taken as Data Abort exceptions. 3334 */ 3335 if (fi.stage2) { 3336 if (current_el == 3) { 3337 target_el = 3; 3338 } else { 3339 target_el = 2; 3340 } 3341 } else { 3342 target_el = exception_target_el(env); 3343 } 3344 take_exc = true; 3345 } 3346 3347 if (take_exc) { 3348 /* Construct FSR and FSC using same logic as arm_deliver_fault() */ 3349 if (target_el == 2 || arm_el_is_aa64(env, target_el) || 3350 arm_s1_regime_using_lpae_format(env, mmu_idx)) { 3351 fsr = arm_fi_to_lfsc(&fi); 3352 fsc = extract32(fsr, 0, 6); 3353 } else { 3354 fsr = arm_fi_to_sfsc(&fi); 3355 fsc = 0x3f; 3356 } 3357 /* 3358 * Report exception with ESR indicating a fault due to a 3359 * translation table walk for a cache maintenance instruction. 3360 */ 3361 syn = syn_data_abort_no_iss(current_el == target_el, 3362 fi.ea, 1, fi.s1ptw, 1, fsc); 3363 env->exception.vaddress = value; 3364 env->exception.fsr = fsr; 3365 raise_exception(env, EXCP_DATA_ABORT, syn, target_el); 3366 } 3367 } 3368 3369 if (is_a64(env)) { 3370 format64 = true; 3371 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 3372 /* 3373 * ATS1Cxx: 3374 * * TTBCR.EAE determines whether the result is returned using the 3375 * 32-bit or the 64-bit PAR format 3376 * * Instructions executed in Hyp mode always use the 64bit format 3377 * 3378 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 3379 * * The Non-secure TTBCR.EAE bit is set to 1 3380 * * The implementation includes EL2, and the value of HCR.VM is 1 3381 * 3382 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 3383 * 3384 * ATS1Hx always uses the 64bit format. 3385 */ 3386 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 3387 3388 if (arm_feature(env, ARM_FEATURE_EL2)) { 3389 if (mmu_idx == ARMMMUIdx_E10_0 || 3390 mmu_idx == ARMMMUIdx_E10_1 || 3391 mmu_idx == ARMMMUIdx_E10_1_PAN) { 3392 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 3393 } else { 3394 format64 |= arm_current_el(env) == 2; 3395 } 3396 } 3397 } 3398 3399 if (format64) { 3400 /* Create a 64-bit PAR */ 3401 par64 = (1 << 11); /* LPAE bit always set */ 3402 if (!ret) { 3403 par64 |= phys_addr & ~0xfffULL; 3404 if (!attrs.secure) { 3405 par64 |= (1 << 9); /* NS */ 3406 } 3407 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 3408 par64 |= cacheattrs.shareability << 7; /* SH */ 3409 } else { 3410 uint32_t fsr = arm_fi_to_lfsc(&fi); 3411 3412 par64 |= 1; /* F */ 3413 par64 |= (fsr & 0x3f) << 1; /* FS */ 3414 if (fi.stage2) { 3415 par64 |= (1 << 9); /* S */ 3416 } 3417 if (fi.s1ptw) { 3418 par64 |= (1 << 8); /* PTW */ 3419 } 3420 } 3421 } else { 3422 /* fsr is a DFSR/IFSR value for the short descriptor 3423 * translation table format (with WnR always clear). 3424 * Convert it to a 32-bit PAR. 3425 */ 3426 if (!ret) { 3427 /* We do not set any attribute bits in the PAR */ 3428 if (page_size == (1 << 24) 3429 && arm_feature(env, ARM_FEATURE_V7)) { 3430 par64 = (phys_addr & 0xff000000) | (1 << 1); 3431 } else { 3432 par64 = phys_addr & 0xfffff000; 3433 } 3434 if (!attrs.secure) { 3435 par64 |= (1 << 9); /* NS */ 3436 } 3437 } else { 3438 uint32_t fsr = arm_fi_to_sfsc(&fi); 3439 3440 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3441 ((fsr & 0xf) << 1) | 1; 3442 } 3443 } 3444 return par64; 3445 } 3446 3447 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3448 { 3449 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3450 uint64_t par64; 3451 ARMMMUIdx mmu_idx; 3452 int el = arm_current_el(env); 3453 bool secure = arm_is_secure_below_el3(env); 3454 3455 switch (ri->opc2 & 6) { 3456 case 0: 3457 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ 3458 switch (el) { 3459 case 3: 3460 mmu_idx = ARMMMUIdx_SE3; 3461 break; 3462 case 2: 3463 g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */ 3464 /* fall through */ 3465 case 1: 3466 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { 3467 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN 3468 : ARMMMUIdx_Stage1_E1_PAN); 3469 } else { 3470 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; 3471 } 3472 break; 3473 default: 3474 g_assert_not_reached(); 3475 } 3476 break; 3477 case 2: 3478 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3479 switch (el) { 3480 case 3: 3481 mmu_idx = ARMMMUIdx_SE10_0; 3482 break; 3483 case 2: 3484 mmu_idx = ARMMMUIdx_Stage1_E0; 3485 break; 3486 case 1: 3487 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; 3488 break; 3489 default: 3490 g_assert_not_reached(); 3491 } 3492 break; 3493 case 4: 3494 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3495 mmu_idx = ARMMMUIdx_E10_1; 3496 break; 3497 case 6: 3498 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3499 mmu_idx = ARMMMUIdx_E10_0; 3500 break; 3501 default: 3502 g_assert_not_reached(); 3503 } 3504 3505 par64 = do_ats_write(env, value, access_type, mmu_idx); 3506 3507 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3508 } 3509 3510 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3511 uint64_t value) 3512 { 3513 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3514 uint64_t par64; 3515 3516 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); 3517 3518 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3519 } 3520 3521 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3522 bool isread) 3523 { 3524 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 3525 return CP_ACCESS_TRAP; 3526 } 3527 return CP_ACCESS_OK; 3528 } 3529 3530 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3531 uint64_t value) 3532 { 3533 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3534 ARMMMUIdx mmu_idx; 3535 int secure = arm_is_secure_below_el3(env); 3536 3537 switch (ri->opc2 & 6) { 3538 case 0: 3539 switch (ri->opc1) { 3540 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ 3541 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { 3542 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN 3543 : ARMMMUIdx_Stage1_E1_PAN); 3544 } else { 3545 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; 3546 } 3547 break; 3548 case 4: /* AT S1E2R, AT S1E2W */ 3549 mmu_idx = ARMMMUIdx_E2; 3550 break; 3551 case 6: /* AT S1E3R, AT S1E3W */ 3552 mmu_idx = ARMMMUIdx_SE3; 3553 break; 3554 default: 3555 g_assert_not_reached(); 3556 } 3557 break; 3558 case 2: /* AT S1E0R, AT S1E0W */ 3559 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; 3560 break; 3561 case 4: /* AT S12E1R, AT S12E1W */ 3562 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; 3563 break; 3564 case 6: /* AT S12E0R, AT S12E0W */ 3565 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; 3566 break; 3567 default: 3568 g_assert_not_reached(); 3569 } 3570 3571 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 3572 } 3573 #endif 3574 3575 static const ARMCPRegInfo vapa_cp_reginfo[] = { 3576 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 3577 .access = PL1_RW, .resetvalue = 0, 3578 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 3579 offsetoflow32(CPUARMState, cp15.par_ns) }, 3580 .writefn = par_write }, 3581 #ifndef CONFIG_USER_ONLY 3582 /* This underdecoding is safe because the reginfo is NO_RAW. */ 3583 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 3584 .access = PL1_W, .accessfn = ats_access, 3585 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 3586 #endif 3587 REGINFO_SENTINEL 3588 }; 3589 3590 /* Return basic MPU access permission bits. */ 3591 static uint32_t simple_mpu_ap_bits(uint32_t val) 3592 { 3593 uint32_t ret; 3594 uint32_t mask; 3595 int i; 3596 ret = 0; 3597 mask = 3; 3598 for (i = 0; i < 16; i += 2) { 3599 ret |= (val >> i) & mask; 3600 mask <<= 2; 3601 } 3602 return ret; 3603 } 3604 3605 /* Pad basic MPU access permission bits to extended format. */ 3606 static uint32_t extended_mpu_ap_bits(uint32_t val) 3607 { 3608 uint32_t ret; 3609 uint32_t mask; 3610 int i; 3611 ret = 0; 3612 mask = 3; 3613 for (i = 0; i < 16; i += 2) { 3614 ret |= (val & mask) << i; 3615 mask <<= 2; 3616 } 3617 return ret; 3618 } 3619 3620 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3621 uint64_t value) 3622 { 3623 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3624 } 3625 3626 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3627 { 3628 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3629 } 3630 3631 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3632 uint64_t value) 3633 { 3634 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3635 } 3636 3637 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3638 { 3639 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3640 } 3641 3642 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3643 { 3644 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3645 3646 if (!u32p) { 3647 return 0; 3648 } 3649 3650 u32p += env->pmsav7.rnr[M_REG_NS]; 3651 return *u32p; 3652 } 3653 3654 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3655 uint64_t value) 3656 { 3657 ARMCPU *cpu = env_archcpu(env); 3658 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3659 3660 if (!u32p) { 3661 return; 3662 } 3663 3664 u32p += env->pmsav7.rnr[M_REG_NS]; 3665 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3666 *u32p = value; 3667 } 3668 3669 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3670 uint64_t value) 3671 { 3672 ARMCPU *cpu = env_archcpu(env); 3673 uint32_t nrgs = cpu->pmsav7_dregion; 3674 3675 if (value >= nrgs) { 3676 qemu_log_mask(LOG_GUEST_ERROR, 3677 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3678 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3679 return; 3680 } 3681 3682 raw_write(env, ri, value); 3683 } 3684 3685 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3686 /* Reset for all these registers is handled in arm_cpu_reset(), 3687 * because the PMSAv7 is also used by M-profile CPUs, which do 3688 * not register cpregs but still need the state to be reset. 3689 */ 3690 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 3691 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3692 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 3693 .readfn = pmsav7_read, .writefn = pmsav7_write, 3694 .resetfn = arm_cp_reset_ignore }, 3695 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 3696 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3697 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 3698 .readfn = pmsav7_read, .writefn = pmsav7_write, 3699 .resetfn = arm_cp_reset_ignore }, 3700 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 3701 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3702 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 3703 .readfn = pmsav7_read, .writefn = pmsav7_write, 3704 .resetfn = arm_cp_reset_ignore }, 3705 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 3706 .access = PL1_RW, 3707 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 3708 .writefn = pmsav7_rgnr_write, 3709 .resetfn = arm_cp_reset_ignore }, 3710 REGINFO_SENTINEL 3711 }; 3712 3713 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 3714 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3715 .access = PL1_RW, .type = ARM_CP_ALIAS, 3716 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3717 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 3718 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3719 .access = PL1_RW, .type = ARM_CP_ALIAS, 3720 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3721 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 3722 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 3723 .access = PL1_RW, 3724 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3725 .resetvalue = 0, }, 3726 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 3727 .access = PL1_RW, 3728 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3729 .resetvalue = 0, }, 3730 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 3731 .access = PL1_RW, 3732 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 3733 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 3734 .access = PL1_RW, 3735 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 3736 /* Protection region base and size registers */ 3737 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 3738 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3739 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 3740 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 3741 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3742 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 3743 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 3744 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3745 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 3746 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 3747 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3748 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 3749 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 3750 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3751 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 3752 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 3753 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3754 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 3755 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 3756 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3757 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 3758 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 3759 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3760 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 3761 REGINFO_SENTINEL 3762 }; 3763 3764 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 3765 uint64_t value) 3766 { 3767 TCR *tcr = raw_ptr(env, ri); 3768 int maskshift = extract32(value, 0, 3); 3769 3770 if (!arm_feature(env, ARM_FEATURE_V8)) { 3771 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 3772 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 3773 * using Long-desciptor translation table format */ 3774 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 3775 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 3776 /* In an implementation that includes the Security Extensions 3777 * TTBCR has additional fields PD0 [4] and PD1 [5] for 3778 * Short-descriptor translation table format. 3779 */ 3780 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 3781 } else { 3782 value &= TTBCR_N; 3783 } 3784 } 3785 3786 /* Update the masks corresponding to the TCR bank being written 3787 * Note that we always calculate mask and base_mask, but 3788 * they are only used for short-descriptor tables (ie if EAE is 0); 3789 * for long-descriptor tables the TCR fields are used differently 3790 * and the mask and base_mask values are meaningless. 3791 */ 3792 tcr->raw_tcr = value; 3793 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 3794 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 3795 } 3796 3797 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3798 uint64_t value) 3799 { 3800 ARMCPU *cpu = env_archcpu(env); 3801 TCR *tcr = raw_ptr(env, ri); 3802 3803 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3804 /* With LPAE the TTBCR could result in a change of ASID 3805 * via the TTBCR.A1 bit, so do a TLB flush. 3806 */ 3807 tlb_flush(CPU(cpu)); 3808 } 3809 /* Preserve the high half of TCR_EL1, set via TTBCR2. */ 3810 value = deposit64(tcr->raw_tcr, 0, 32, value); 3811 vmsa_ttbcr_raw_write(env, ri, value); 3812 } 3813 3814 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3815 { 3816 TCR *tcr = raw_ptr(env, ri); 3817 3818 /* Reset both the TCR as well as the masks corresponding to the bank of 3819 * the TCR being reset. 3820 */ 3821 tcr->raw_tcr = 0; 3822 tcr->mask = 0; 3823 tcr->base_mask = 0xffffc000u; 3824 } 3825 3826 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, 3827 uint64_t value) 3828 { 3829 ARMCPU *cpu = env_archcpu(env); 3830 TCR *tcr = raw_ptr(env, ri); 3831 3832 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 3833 tlb_flush(CPU(cpu)); 3834 tcr->raw_tcr = value; 3835 } 3836 3837 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3838 uint64_t value) 3839 { 3840 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3841 if (cpreg_field_is_64bit(ri) && 3842 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3843 ARMCPU *cpu = env_archcpu(env); 3844 tlb_flush(CPU(cpu)); 3845 } 3846 raw_write(env, ri, value); 3847 } 3848 3849 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3850 uint64_t value) 3851 { 3852 /* 3853 * If we are running with E2&0 regime, then an ASID is active. 3854 * Flush if that might be changing. Note we're not checking 3855 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that 3856 * holds the active ASID, only checking the field that might. 3857 */ 3858 if (extract64(raw_read(env, ri) ^ value, 48, 16) && 3859 (arm_hcr_el2_eff(env) & HCR_E2H)) { 3860 tlb_flush_by_mmuidx(env_cpu(env), 3861 ARMMMUIdxBit_E20_2 | 3862 ARMMMUIdxBit_E20_2_PAN | 3863 ARMMMUIdxBit_E20_0); 3864 } 3865 raw_write(env, ri, value); 3866 } 3867 3868 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3869 uint64_t value) 3870 { 3871 ARMCPU *cpu = env_archcpu(env); 3872 CPUState *cs = CPU(cpu); 3873 3874 /* 3875 * A change in VMID to the stage2 page table (Stage2) invalidates 3876 * the combined stage 1&2 tlbs (EL10_1 and EL10_0). 3877 */ 3878 if (raw_read(env, ri) != value) { 3879 tlb_flush_by_mmuidx(cs, 3880 ARMMMUIdxBit_E10_1 | 3881 ARMMMUIdxBit_E10_1_PAN | 3882 ARMMMUIdxBit_E10_0 | 3883 ARMMMUIdxBit_Stage2); 3884 raw_write(env, ri, value); 3885 } 3886 } 3887 3888 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 3889 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3890 .access = PL1_RW, .type = ARM_CP_ALIAS, 3891 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 3892 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 3893 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3894 .access = PL1_RW, .resetvalue = 0, 3895 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 3896 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 3897 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 3898 .access = PL1_RW, .resetvalue = 0, 3899 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 3900 offsetof(CPUARMState, cp15.dfar_ns) } }, 3901 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 3902 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 3903 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 3904 .resetvalue = 0, }, 3905 REGINFO_SENTINEL 3906 }; 3907 3908 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 3909 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 3910 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 3911 .access = PL1_RW, 3912 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 3913 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 3914 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 3915 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 3916 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3917 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 3918 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 3919 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 3920 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 3921 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3922 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 3923 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 3924 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3925 .access = PL1_RW, .writefn = vmsa_tcr_el12_write, 3926 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 3927 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 3928 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3929 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 3930 .raw_writefn = vmsa_ttbcr_raw_write, 3931 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 3932 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 3933 REGINFO_SENTINEL 3934 }; 3935 3936 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing 3937 * qemu tlbs nor adjusting cached masks. 3938 */ 3939 static const ARMCPRegInfo ttbcr2_reginfo = { 3940 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 3941 .access = PL1_RW, .type = ARM_CP_ALIAS, 3942 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]), 3943 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) }, 3944 }; 3945 3946 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 3947 uint64_t value) 3948 { 3949 env->cp15.c15_ticonfig = value & 0xe7; 3950 /* The OS_TYPE bit in this register changes the reported CPUID! */ 3951 env->cp15.c0_cpuid = (value & (1 << 5)) ? 3952 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 3953 } 3954 3955 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 3956 uint64_t value) 3957 { 3958 env->cp15.c15_threadid = value & 0xffff; 3959 } 3960 3961 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 3962 uint64_t value) 3963 { 3964 /* Wait-for-interrupt (deprecated) */ 3965 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 3966 } 3967 3968 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 3969 uint64_t value) 3970 { 3971 /* On OMAP there are registers indicating the max/min index of dcache lines 3972 * containing a dirty line; cache flush operations have to reset these. 3973 */ 3974 env->cp15.c15_i_max = 0x000; 3975 env->cp15.c15_i_min = 0xff0; 3976 } 3977 3978 static const ARMCPRegInfo omap_cp_reginfo[] = { 3979 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 3980 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 3981 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 3982 .resetvalue = 0, }, 3983 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 3984 .access = PL1_RW, .type = ARM_CP_NOP }, 3985 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 3986 .access = PL1_RW, 3987 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 3988 .writefn = omap_ticonfig_write }, 3989 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 3990 .access = PL1_RW, 3991 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 3992 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 3993 .access = PL1_RW, .resetvalue = 0xff0, 3994 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 3995 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 3996 .access = PL1_RW, 3997 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 3998 .writefn = omap_threadid_write }, 3999 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 4000 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4001 .type = ARM_CP_NO_RAW, 4002 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 4003 /* TODO: Peripheral port remap register: 4004 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 4005 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 4006 * when MMU is off. 4007 */ 4008 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 4009 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 4010 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 4011 .writefn = omap_cachemaint_write }, 4012 { .name = "C9", .cp = 15, .crn = 9, 4013 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 4014 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 4015 REGINFO_SENTINEL 4016 }; 4017 4018 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4019 uint64_t value) 4020 { 4021 env->cp15.c15_cpar = value & 0x3fff; 4022 } 4023 4024 static const ARMCPRegInfo xscale_cp_reginfo[] = { 4025 { .name = "XSCALE_CPAR", 4026 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4027 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 4028 .writefn = xscale_cpar_write, }, 4029 { .name = "XSCALE_AUXCR", 4030 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 4031 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 4032 .resetvalue = 0, }, 4033 /* XScale specific cache-lockdown: since we have no cache we NOP these 4034 * and hope the guest does not really rely on cache behaviour. 4035 */ 4036 { .name = "XSCALE_LOCK_ICACHE_LINE", 4037 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 4038 .access = PL1_W, .type = ARM_CP_NOP }, 4039 { .name = "XSCALE_UNLOCK_ICACHE", 4040 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 4041 .access = PL1_W, .type = ARM_CP_NOP }, 4042 { .name = "XSCALE_DCACHE_LOCK", 4043 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 4044 .access = PL1_RW, .type = ARM_CP_NOP }, 4045 { .name = "XSCALE_UNLOCK_DCACHE", 4046 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 4047 .access = PL1_W, .type = ARM_CP_NOP }, 4048 REGINFO_SENTINEL 4049 }; 4050 4051 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 4052 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 4053 * implementation of this implementation-defined space. 4054 * Ideally this should eventually disappear in favour of actually 4055 * implementing the correct behaviour for all cores. 4056 */ 4057 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 4058 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4059 .access = PL1_RW, 4060 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 4061 .resetvalue = 0 }, 4062 REGINFO_SENTINEL 4063 }; 4064 4065 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 4066 /* Cache status: RAZ because we have no cache so it's always clean */ 4067 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 4068 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4069 .resetvalue = 0 }, 4070 REGINFO_SENTINEL 4071 }; 4072 4073 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 4074 /* We never have a a block transfer operation in progress */ 4075 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 4076 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4077 .resetvalue = 0 }, 4078 /* The cache ops themselves: these all NOP for QEMU */ 4079 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 4080 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4081 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 4082 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4083 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 4084 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4085 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 4086 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4087 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 4088 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4089 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 4090 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4091 REGINFO_SENTINEL 4092 }; 4093 4094 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 4095 /* The cache test-and-clean instructions always return (1 << 30) 4096 * to indicate that there are no dirty cache lines. 4097 */ 4098 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 4099 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4100 .resetvalue = (1 << 30) }, 4101 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 4102 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4103 .resetvalue = (1 << 30) }, 4104 REGINFO_SENTINEL 4105 }; 4106 4107 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 4108 /* Ignore ReadBuffer accesses */ 4109 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 4110 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4111 .access = PL1_RW, .resetvalue = 0, 4112 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 4113 REGINFO_SENTINEL 4114 }; 4115 4116 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4117 { 4118 ARMCPU *cpu = env_archcpu(env); 4119 unsigned int cur_el = arm_current_el(env); 4120 bool secure = arm_is_secure(env); 4121 4122 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 4123 return env->cp15.vpidr_el2; 4124 } 4125 return raw_read(env, ri); 4126 } 4127 4128 static uint64_t mpidr_read_val(CPUARMState *env) 4129 { 4130 ARMCPU *cpu = env_archcpu(env); 4131 uint64_t mpidr = cpu->mp_affinity; 4132 4133 if (arm_feature(env, ARM_FEATURE_V7MP)) { 4134 mpidr |= (1U << 31); 4135 /* Cores which are uniprocessor (non-coherent) 4136 * but still implement the MP extensions set 4137 * bit 30. (For instance, Cortex-R5). 4138 */ 4139 if (cpu->mp_is_up) { 4140 mpidr |= (1u << 30); 4141 } 4142 } 4143 return mpidr; 4144 } 4145 4146 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4147 { 4148 unsigned int cur_el = arm_current_el(env); 4149 bool secure = arm_is_secure(env); 4150 4151 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 4152 return env->cp15.vmpidr_el2; 4153 } 4154 return mpidr_read_val(env); 4155 } 4156 4157 static const ARMCPRegInfo lpae_cp_reginfo[] = { 4158 /* NOP AMAIR0/1 */ 4159 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 4160 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 4161 .access = PL1_RW, .type = ARM_CP_CONST, 4162 .resetvalue = 0 }, 4163 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 4164 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 4165 .access = PL1_RW, .type = ARM_CP_CONST, 4166 .resetvalue = 0 }, 4167 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 4168 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 4169 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 4170 offsetof(CPUARMState, cp15.par_ns)} }, 4171 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 4172 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4173 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4174 offsetof(CPUARMState, cp15.ttbr0_ns) }, 4175 .writefn = vmsa_ttbr_write, }, 4176 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 4177 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4178 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4179 offsetof(CPUARMState, cp15.ttbr1_ns) }, 4180 .writefn = vmsa_ttbr_write, }, 4181 REGINFO_SENTINEL 4182 }; 4183 4184 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4185 { 4186 return vfp_get_fpcr(env); 4187 } 4188 4189 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4190 uint64_t value) 4191 { 4192 vfp_set_fpcr(env, value); 4193 } 4194 4195 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4196 { 4197 return vfp_get_fpsr(env); 4198 } 4199 4200 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4201 uint64_t value) 4202 { 4203 vfp_set_fpsr(env, value); 4204 } 4205 4206 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 4207 bool isread) 4208 { 4209 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { 4210 return CP_ACCESS_TRAP; 4211 } 4212 return CP_ACCESS_OK; 4213 } 4214 4215 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 4216 uint64_t value) 4217 { 4218 env->daif = value & PSTATE_DAIF; 4219 } 4220 4221 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) 4222 { 4223 return env->pstate & PSTATE_PAN; 4224 } 4225 4226 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, 4227 uint64_t value) 4228 { 4229 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); 4230 } 4231 4232 static const ARMCPRegInfo pan_reginfo = { 4233 .name = "PAN", .state = ARM_CP_STATE_AA64, 4234 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, 4235 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4236 .readfn = aa64_pan_read, .writefn = aa64_pan_write 4237 }; 4238 4239 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) 4240 { 4241 return env->pstate & PSTATE_UAO; 4242 } 4243 4244 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, 4245 uint64_t value) 4246 { 4247 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); 4248 } 4249 4250 static const ARMCPRegInfo uao_reginfo = { 4251 .name = "UAO", .state = ARM_CP_STATE_AA64, 4252 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, 4253 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4254 .readfn = aa64_uao_read, .writefn = aa64_uao_write 4255 }; 4256 4257 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 4258 const ARMCPRegInfo *ri, 4259 bool isread) 4260 { 4261 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 4262 * SCTLR_EL1.UCI is set. 4263 */ 4264 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UCI)) { 4265 return CP_ACCESS_TRAP; 4266 } 4267 return CP_ACCESS_OK; 4268 } 4269 4270 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 4271 * Page D4-1736 (DDI0487A.b) 4272 */ 4273 4274 static int vae1_tlbmask(CPUARMState *env) 4275 { 4276 /* Since we exclude secure first, we may read HCR_EL2 directly. */ 4277 if (arm_is_secure_below_el3(env)) { 4278 return ARMMMUIdxBit_SE10_1 | 4279 ARMMMUIdxBit_SE10_1_PAN | 4280 ARMMMUIdxBit_SE10_0; 4281 } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) 4282 == (HCR_E2H | HCR_TGE)) { 4283 return ARMMMUIdxBit_E20_2 | 4284 ARMMMUIdxBit_E20_2_PAN | 4285 ARMMMUIdxBit_E20_0; 4286 } else { 4287 return ARMMMUIdxBit_E10_1 | 4288 ARMMMUIdxBit_E10_1_PAN | 4289 ARMMMUIdxBit_E10_0; 4290 } 4291 } 4292 4293 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4294 uint64_t value) 4295 { 4296 CPUState *cs = env_cpu(env); 4297 int mask = vae1_tlbmask(env); 4298 4299 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4300 } 4301 4302 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4303 uint64_t value) 4304 { 4305 CPUState *cs = env_cpu(env); 4306 int mask = vae1_tlbmask(env); 4307 4308 if (tlb_force_broadcast(env)) { 4309 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4310 } else { 4311 tlb_flush_by_mmuidx(cs, mask); 4312 } 4313 } 4314 4315 static int alle1_tlbmask(CPUARMState *env) 4316 { 4317 /* 4318 * Note that the 'ALL' scope must invalidate both stage 1 and 4319 * stage 2 translations, whereas most other scopes only invalidate 4320 * stage 1 translations. 4321 */ 4322 if (arm_is_secure_below_el3(env)) { 4323 return ARMMMUIdxBit_SE10_1 | 4324 ARMMMUIdxBit_SE10_1_PAN | 4325 ARMMMUIdxBit_SE10_0; 4326 } else if (arm_feature(env, ARM_FEATURE_EL2)) { 4327 return ARMMMUIdxBit_E10_1 | 4328 ARMMMUIdxBit_E10_1_PAN | 4329 ARMMMUIdxBit_E10_0 | 4330 ARMMMUIdxBit_Stage2; 4331 } else { 4332 return ARMMMUIdxBit_E10_1 | 4333 ARMMMUIdxBit_E10_1_PAN | 4334 ARMMMUIdxBit_E10_0; 4335 } 4336 } 4337 4338 static int e2_tlbmask(CPUARMState *env) 4339 { 4340 /* TODO: ARMv8.4-SecEL2 */ 4341 return ARMMMUIdxBit_E20_0 | 4342 ARMMMUIdxBit_E20_2 | 4343 ARMMMUIdxBit_E20_2_PAN | 4344 ARMMMUIdxBit_E2; 4345 } 4346 4347 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4348 uint64_t value) 4349 { 4350 CPUState *cs = env_cpu(env); 4351 int mask = alle1_tlbmask(env); 4352 4353 tlb_flush_by_mmuidx(cs, mask); 4354 } 4355 4356 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4357 uint64_t value) 4358 { 4359 CPUState *cs = env_cpu(env); 4360 int mask = e2_tlbmask(env); 4361 4362 tlb_flush_by_mmuidx(cs, mask); 4363 } 4364 4365 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4366 uint64_t value) 4367 { 4368 ARMCPU *cpu = env_archcpu(env); 4369 CPUState *cs = CPU(cpu); 4370 4371 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); 4372 } 4373 4374 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4375 uint64_t value) 4376 { 4377 CPUState *cs = env_cpu(env); 4378 int mask = alle1_tlbmask(env); 4379 4380 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4381 } 4382 4383 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4384 uint64_t value) 4385 { 4386 CPUState *cs = env_cpu(env); 4387 int mask = e2_tlbmask(env); 4388 4389 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4390 } 4391 4392 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4393 uint64_t value) 4394 { 4395 CPUState *cs = env_cpu(env); 4396 4397 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); 4398 } 4399 4400 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4401 uint64_t value) 4402 { 4403 /* Invalidate by VA, EL2 4404 * Currently handles both VAE2 and VALE2, since we don't support 4405 * flush-last-level-only. 4406 */ 4407 CPUState *cs = env_cpu(env); 4408 int mask = e2_tlbmask(env); 4409 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4410 4411 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4412 } 4413 4414 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4415 uint64_t value) 4416 { 4417 /* Invalidate by VA, EL3 4418 * Currently handles both VAE3 and VALE3, since we don't support 4419 * flush-last-level-only. 4420 */ 4421 ARMCPU *cpu = env_archcpu(env); 4422 CPUState *cs = CPU(cpu); 4423 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4424 4425 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); 4426 } 4427 4428 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4429 uint64_t value) 4430 { 4431 CPUState *cs = env_cpu(env); 4432 int mask = vae1_tlbmask(env); 4433 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4434 4435 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); 4436 } 4437 4438 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4439 uint64_t value) 4440 { 4441 /* Invalidate by VA, EL1&0 (AArch64 version). 4442 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 4443 * since we don't support flush-for-specific-ASID-only or 4444 * flush-last-level-only. 4445 */ 4446 CPUState *cs = env_cpu(env); 4447 int mask = vae1_tlbmask(env); 4448 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4449 4450 if (tlb_force_broadcast(env)) { 4451 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); 4452 } else { 4453 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4454 } 4455 } 4456 4457 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4458 uint64_t value) 4459 { 4460 CPUState *cs = env_cpu(env); 4461 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4462 4463 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4464 ARMMMUIdxBit_E2); 4465 } 4466 4467 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4468 uint64_t value) 4469 { 4470 CPUState *cs = env_cpu(env); 4471 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4472 4473 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4474 ARMMMUIdxBit_SE3); 4475 } 4476 4477 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4478 uint64_t value) 4479 { 4480 /* Invalidate by IPA. This has to invalidate any structures that 4481 * contain only stage 2 translation information, but does not need 4482 * to apply to structures that contain combined stage 1 and stage 2 4483 * translation information. 4484 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 4485 */ 4486 ARMCPU *cpu = env_archcpu(env); 4487 CPUState *cs = CPU(cpu); 4488 uint64_t pageaddr; 4489 4490 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4491 return; 4492 } 4493 4494 pageaddr = sextract64(value << 12, 0, 48); 4495 4496 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); 4497 } 4498 4499 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4500 uint64_t value) 4501 { 4502 CPUState *cs = env_cpu(env); 4503 uint64_t pageaddr; 4504 4505 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4506 return; 4507 } 4508 4509 pageaddr = sextract64(value << 12, 0, 48); 4510 4511 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4512 ARMMMUIdxBit_Stage2); 4513 } 4514 4515 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4516 bool isread) 4517 { 4518 int cur_el = arm_current_el(env); 4519 4520 if (cur_el < 2) { 4521 uint64_t hcr = arm_hcr_el2_eff(env); 4522 4523 if (cur_el == 0) { 4524 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4525 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { 4526 return CP_ACCESS_TRAP_EL2; 4527 } 4528 } else { 4529 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4530 return CP_ACCESS_TRAP; 4531 } 4532 if (hcr & HCR_TDZ) { 4533 return CP_ACCESS_TRAP_EL2; 4534 } 4535 } 4536 } else if (hcr & HCR_TDZ) { 4537 return CP_ACCESS_TRAP_EL2; 4538 } 4539 } 4540 return CP_ACCESS_OK; 4541 } 4542 4543 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4544 { 4545 ARMCPU *cpu = env_archcpu(env); 4546 int dzp_bit = 1 << 4; 4547 4548 /* DZP indicates whether DC ZVA access is allowed */ 4549 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4550 dzp_bit = 0; 4551 } 4552 return cpu->dcz_blocksize | dzp_bit; 4553 } 4554 4555 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4556 bool isread) 4557 { 4558 if (!(env->pstate & PSTATE_SP)) { 4559 /* Access to SP_EL0 is undefined if it's being used as 4560 * the stack pointer. 4561 */ 4562 return CP_ACCESS_TRAP_UNCATEGORIZED; 4563 } 4564 return CP_ACCESS_OK; 4565 } 4566 4567 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4568 { 4569 return env->pstate & PSTATE_SP; 4570 } 4571 4572 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 4573 { 4574 update_spsel(env, val); 4575 } 4576 4577 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4578 uint64_t value) 4579 { 4580 ARMCPU *cpu = env_archcpu(env); 4581 4582 if (raw_read(env, ri) == value) { 4583 /* Skip the TLB flush if nothing actually changed; Linux likes 4584 * to do a lot of pointless SCTLR writes. 4585 */ 4586 return; 4587 } 4588 4589 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 4590 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 4591 value &= ~SCTLR_M; 4592 } 4593 4594 raw_write(env, ri, value); 4595 /* ??? Lots of these bits are not implemented. */ 4596 /* This may enable/disable the MMU, so do a TLB flush. */ 4597 tlb_flush(CPU(cpu)); 4598 4599 if (ri->type & ARM_CP_SUPPRESS_TB_END) { 4600 /* 4601 * Normally we would always end the TB on an SCTLR write; see the 4602 * comment in ARMCPRegInfo sctlr initialization below for why Xscale 4603 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild 4604 * of hflags from the translator, so do it here. 4605 */ 4606 arm_rebuild_hflags(env); 4607 } 4608 } 4609 4610 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 4611 bool isread) 4612 { 4613 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 4614 return CP_ACCESS_TRAP_FP_EL2; 4615 } 4616 if (env->cp15.cptr_el[3] & CPTR_TFP) { 4617 return CP_ACCESS_TRAP_FP_EL3; 4618 } 4619 return CP_ACCESS_OK; 4620 } 4621 4622 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4623 uint64_t value) 4624 { 4625 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 4626 } 4627 4628 static const ARMCPRegInfo v8_cp_reginfo[] = { 4629 /* Minimal set of EL0-visible registers. This will need to be expanded 4630 * significantly for system emulation of AArch64 CPUs. 4631 */ 4632 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 4633 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 4634 .access = PL0_RW, .type = ARM_CP_NZCV }, 4635 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 4636 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 4637 .type = ARM_CP_NO_RAW, 4638 .access = PL0_RW, .accessfn = aa64_daif_access, 4639 .fieldoffset = offsetof(CPUARMState, daif), 4640 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 4641 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 4642 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 4643 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4644 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 4645 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 4646 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 4647 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4648 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 4649 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 4650 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 4651 .access = PL0_R, .type = ARM_CP_NO_RAW, 4652 .readfn = aa64_dczid_read }, 4653 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 4654 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 4655 .access = PL0_W, .type = ARM_CP_DC_ZVA, 4656 #ifndef CONFIG_USER_ONLY 4657 /* Avoid overhead of an access check that always passes in user-mode */ 4658 .accessfn = aa64_zva_access, 4659 #endif 4660 }, 4661 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 4662 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 4663 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 4664 /* Cache ops: all NOPs since we don't emulate caches */ 4665 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 4666 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4667 .access = PL1_W, .type = ARM_CP_NOP }, 4668 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 4669 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4670 .access = PL1_W, .type = ARM_CP_NOP }, 4671 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 4672 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 4673 .access = PL0_W, .type = ARM_CP_NOP, 4674 .accessfn = aa64_cacheop_access }, 4675 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 4676 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4677 .access = PL1_W, .type = ARM_CP_NOP }, 4678 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 4679 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4680 .access = PL1_W, .type = ARM_CP_NOP }, 4681 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 4682 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 4683 .access = PL0_W, .type = ARM_CP_NOP, 4684 .accessfn = aa64_cacheop_access }, 4685 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 4686 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4687 .access = PL1_W, .type = ARM_CP_NOP }, 4688 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 4689 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 4690 .access = PL0_W, .type = ARM_CP_NOP, 4691 .accessfn = aa64_cacheop_access }, 4692 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 4693 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 4694 .access = PL0_W, .type = ARM_CP_NOP, 4695 .accessfn = aa64_cacheop_access }, 4696 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 4697 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4698 .access = PL1_W, .type = ARM_CP_NOP }, 4699 /* TLBI operations */ 4700 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 4701 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 4702 .access = PL1_W, .type = ARM_CP_NO_RAW, 4703 .writefn = tlbi_aa64_vmalle1is_write }, 4704 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 4705 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 4706 .access = PL1_W, .type = ARM_CP_NO_RAW, 4707 .writefn = tlbi_aa64_vae1is_write }, 4708 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 4709 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 4710 .access = PL1_W, .type = ARM_CP_NO_RAW, 4711 .writefn = tlbi_aa64_vmalle1is_write }, 4712 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 4713 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 4714 .access = PL1_W, .type = ARM_CP_NO_RAW, 4715 .writefn = tlbi_aa64_vae1is_write }, 4716 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 4717 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4718 .access = PL1_W, .type = ARM_CP_NO_RAW, 4719 .writefn = tlbi_aa64_vae1is_write }, 4720 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 4721 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4722 .access = PL1_W, .type = ARM_CP_NO_RAW, 4723 .writefn = tlbi_aa64_vae1is_write }, 4724 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 4725 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 4726 .access = PL1_W, .type = ARM_CP_NO_RAW, 4727 .writefn = tlbi_aa64_vmalle1_write }, 4728 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 4729 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 4730 .access = PL1_W, .type = ARM_CP_NO_RAW, 4731 .writefn = tlbi_aa64_vae1_write }, 4732 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 4733 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 4734 .access = PL1_W, .type = ARM_CP_NO_RAW, 4735 .writefn = tlbi_aa64_vmalle1_write }, 4736 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 4737 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 4738 .access = PL1_W, .type = ARM_CP_NO_RAW, 4739 .writefn = tlbi_aa64_vae1_write }, 4740 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 4741 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4742 .access = PL1_W, .type = ARM_CP_NO_RAW, 4743 .writefn = tlbi_aa64_vae1_write }, 4744 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 4745 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4746 .access = PL1_W, .type = ARM_CP_NO_RAW, 4747 .writefn = tlbi_aa64_vae1_write }, 4748 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 4749 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4750 .access = PL2_W, .type = ARM_CP_NO_RAW, 4751 .writefn = tlbi_aa64_ipas2e1is_write }, 4752 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 4753 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4754 .access = PL2_W, .type = ARM_CP_NO_RAW, 4755 .writefn = tlbi_aa64_ipas2e1is_write }, 4756 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 4757 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4758 .access = PL2_W, .type = ARM_CP_NO_RAW, 4759 .writefn = tlbi_aa64_alle1is_write }, 4760 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 4761 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 4762 .access = PL2_W, .type = ARM_CP_NO_RAW, 4763 .writefn = tlbi_aa64_alle1is_write }, 4764 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 4765 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4766 .access = PL2_W, .type = ARM_CP_NO_RAW, 4767 .writefn = tlbi_aa64_ipas2e1_write }, 4768 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 4769 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4770 .access = PL2_W, .type = ARM_CP_NO_RAW, 4771 .writefn = tlbi_aa64_ipas2e1_write }, 4772 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 4773 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4774 .access = PL2_W, .type = ARM_CP_NO_RAW, 4775 .writefn = tlbi_aa64_alle1_write }, 4776 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 4777 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 4778 .access = PL2_W, .type = ARM_CP_NO_RAW, 4779 .writefn = tlbi_aa64_alle1is_write }, 4780 #ifndef CONFIG_USER_ONLY 4781 /* 64 bit address translation operations */ 4782 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 4783 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 4784 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4785 .writefn = ats_write64 }, 4786 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 4787 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 4788 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4789 .writefn = ats_write64 }, 4790 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 4791 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 4792 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4793 .writefn = ats_write64 }, 4794 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 4795 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 4796 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4797 .writefn = ats_write64 }, 4798 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 4799 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 4800 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4801 .writefn = ats_write64 }, 4802 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 4803 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 4804 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4805 .writefn = ats_write64 }, 4806 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 4807 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 4808 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4809 .writefn = ats_write64 }, 4810 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 4811 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 4812 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4813 .writefn = ats_write64 }, 4814 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 4815 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 4816 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 4817 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4818 .writefn = ats_write64 }, 4819 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 4820 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 4821 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4822 .writefn = ats_write64 }, 4823 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 4824 .type = ARM_CP_ALIAS, 4825 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 4826 .access = PL1_RW, .resetvalue = 0, 4827 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 4828 .writefn = par_write }, 4829 #endif 4830 /* TLB invalidate last level of translation table walk */ 4831 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4832 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 4833 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4834 .type = ARM_CP_NO_RAW, .access = PL1_W, 4835 .writefn = tlbimvaa_is_write }, 4836 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4837 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 4838 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4839 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 4840 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4841 .type = ARM_CP_NO_RAW, .access = PL2_W, 4842 .writefn = tlbimva_hyp_write }, 4843 { .name = "TLBIMVALHIS", 4844 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4845 .type = ARM_CP_NO_RAW, .access = PL2_W, 4846 .writefn = tlbimva_hyp_is_write }, 4847 { .name = "TLBIIPAS2", 4848 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4849 .type = ARM_CP_NO_RAW, .access = PL2_W, 4850 .writefn = tlbiipas2_write }, 4851 { .name = "TLBIIPAS2IS", 4852 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4853 .type = ARM_CP_NO_RAW, .access = PL2_W, 4854 .writefn = tlbiipas2_is_write }, 4855 { .name = "TLBIIPAS2L", 4856 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4857 .type = ARM_CP_NO_RAW, .access = PL2_W, 4858 .writefn = tlbiipas2_write }, 4859 { .name = "TLBIIPAS2LIS", 4860 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4861 .type = ARM_CP_NO_RAW, .access = PL2_W, 4862 .writefn = tlbiipas2_is_write }, 4863 /* 32 bit cache operations */ 4864 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4865 .type = ARM_CP_NOP, .access = PL1_W }, 4866 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 4867 .type = ARM_CP_NOP, .access = PL1_W }, 4868 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4869 .type = ARM_CP_NOP, .access = PL1_W }, 4870 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 4871 .type = ARM_CP_NOP, .access = PL1_W }, 4872 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 4873 .type = ARM_CP_NOP, .access = PL1_W }, 4874 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 4875 .type = ARM_CP_NOP, .access = PL1_W }, 4876 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4877 .type = ARM_CP_NOP, .access = PL1_W }, 4878 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4879 .type = ARM_CP_NOP, .access = PL1_W }, 4880 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 4881 .type = ARM_CP_NOP, .access = PL1_W }, 4882 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4883 .type = ARM_CP_NOP, .access = PL1_W }, 4884 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 4885 .type = ARM_CP_NOP, .access = PL1_W }, 4886 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 4887 .type = ARM_CP_NOP, .access = PL1_W }, 4888 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4889 .type = ARM_CP_NOP, .access = PL1_W }, 4890 /* MMU Domain access control / MPU write buffer control */ 4891 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 4892 .access = PL1_RW, .resetvalue = 0, 4893 .writefn = dacr_write, .raw_writefn = raw_write, 4894 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 4895 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 4896 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 4897 .type = ARM_CP_ALIAS, 4898 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 4899 .access = PL1_RW, 4900 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 4901 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 4902 .type = ARM_CP_ALIAS, 4903 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 4904 .access = PL1_RW, 4905 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 4906 /* We rely on the access checks not allowing the guest to write to the 4907 * state field when SPSel indicates that it's being used as the stack 4908 * pointer. 4909 */ 4910 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 4911 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 4912 .access = PL1_RW, .accessfn = sp_el0_access, 4913 .type = ARM_CP_ALIAS, 4914 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 4915 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 4916 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 4917 .access = PL2_RW, .type = ARM_CP_ALIAS, 4918 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 4919 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 4920 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 4921 .type = ARM_CP_NO_RAW, 4922 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 4923 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 4924 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 4925 .type = ARM_CP_ALIAS, 4926 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 4927 .access = PL2_RW, .accessfn = fpexc32_access }, 4928 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 4929 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 4930 .access = PL2_RW, .resetvalue = 0, 4931 .writefn = dacr_write, .raw_writefn = raw_write, 4932 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 4933 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 4934 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 4935 .access = PL2_RW, .resetvalue = 0, 4936 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 4937 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 4938 .type = ARM_CP_ALIAS, 4939 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 4940 .access = PL2_RW, 4941 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 4942 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 4943 .type = ARM_CP_ALIAS, 4944 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 4945 .access = PL2_RW, 4946 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 4947 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 4948 .type = ARM_CP_ALIAS, 4949 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 4950 .access = PL2_RW, 4951 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 4952 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 4953 .type = ARM_CP_ALIAS, 4954 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 4955 .access = PL2_RW, 4956 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 4957 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 4958 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 4959 .resetvalue = 0, 4960 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 4961 { .name = "SDCR", .type = ARM_CP_ALIAS, 4962 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 4963 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4964 .writefn = sdcr_write, 4965 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 4966 REGINFO_SENTINEL 4967 }; 4968 4969 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 4970 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 4971 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 4972 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 4973 .access = PL2_RW, 4974 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 4975 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 4976 .type = ARM_CP_NO_RAW, 4977 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 4978 .access = PL2_RW, 4979 .type = ARM_CP_CONST, .resetvalue = 0 }, 4980 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 4981 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 4982 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4983 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 4984 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 4985 .access = PL2_RW, 4986 .type = ARM_CP_CONST, .resetvalue = 0 }, 4987 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 4988 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 4989 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4990 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 4991 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 4992 .access = PL2_RW, .type = ARM_CP_CONST, 4993 .resetvalue = 0 }, 4994 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 4995 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 4996 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4997 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 4998 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 4999 .access = PL2_RW, .type = ARM_CP_CONST, 5000 .resetvalue = 0 }, 5001 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5002 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5003 .access = PL2_RW, .type = ARM_CP_CONST, 5004 .resetvalue = 0 }, 5005 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5006 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5007 .access = PL2_RW, .type = ARM_CP_CONST, 5008 .resetvalue = 0 }, 5009 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5010 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5011 .access = PL2_RW, .type = ARM_CP_CONST, 5012 .resetvalue = 0 }, 5013 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5014 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5015 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5016 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 5017 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5018 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5019 .type = ARM_CP_CONST, .resetvalue = 0 }, 5020 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5021 .cp = 15, .opc1 = 6, .crm = 2, 5022 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5023 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 5024 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5025 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5026 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5027 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5028 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5029 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5030 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5031 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5032 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5033 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5034 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5035 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5036 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5037 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5038 .resetvalue = 0 }, 5039 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5040 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5041 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5042 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5043 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5044 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5045 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5046 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5047 .resetvalue = 0 }, 5048 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5049 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5050 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5051 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5052 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5053 .resetvalue = 0 }, 5054 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5055 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5056 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5057 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5058 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5059 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5060 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5061 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5062 .access = PL2_RW, .accessfn = access_tda, 5063 .type = ARM_CP_CONST, .resetvalue = 0 }, 5064 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 5065 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5066 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5067 .type = ARM_CP_CONST, .resetvalue = 0 }, 5068 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5069 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5070 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5071 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5072 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5073 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5074 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5075 .type = ARM_CP_CONST, 5076 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5077 .access = PL2_RW, .resetvalue = 0 }, 5078 REGINFO_SENTINEL 5079 }; 5080 5081 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 5082 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 5083 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5084 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5085 .access = PL2_RW, 5086 .type = ARM_CP_CONST, .resetvalue = 0 }, 5087 REGINFO_SENTINEL 5088 }; 5089 5090 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 5091 { 5092 ARMCPU *cpu = env_archcpu(env); 5093 /* Begin with bits defined in base ARMv8.0. */ 5094 uint64_t valid_mask = MAKE_64BIT_MASK(0, 34); 5095 5096 if (arm_feature(env, ARM_FEATURE_EL3)) { 5097 valid_mask &= ~HCR_HCD; 5098 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 5099 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 5100 * However, if we're using the SMC PSCI conduit then QEMU is 5101 * effectively acting like EL3 firmware and so the guest at 5102 * EL2 should retain the ability to prevent EL1 from being 5103 * able to make SMC calls into the ersatz firmware, so in 5104 * that case HCR.TSC should be read/write. 5105 */ 5106 valid_mask &= ~HCR_TSC; 5107 } 5108 if (cpu_isar_feature(aa64_vh, cpu)) { 5109 valid_mask |= HCR_E2H; 5110 } 5111 if (cpu_isar_feature(aa64_lor, cpu)) { 5112 valid_mask |= HCR_TLOR; 5113 } 5114 if (cpu_isar_feature(aa64_pauth, cpu)) { 5115 valid_mask |= HCR_API | HCR_APK; 5116 } 5117 5118 /* Clear RES0 bits. */ 5119 value &= valid_mask; 5120 5121 /* These bits change the MMU setup: 5122 * HCR_VM enables stage 2 translation 5123 * HCR_PTW forbids certain page-table setups 5124 * HCR_DC Disables stage1 and enables stage2 translation 5125 */ 5126 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 5127 tlb_flush(CPU(cpu)); 5128 } 5129 env->cp15.hcr_el2 = value; 5130 5131 /* 5132 * Updates to VI and VF require us to update the status of 5133 * virtual interrupts, which are the logical OR of these bits 5134 * and the state of the input lines from the GIC. (This requires 5135 * that we have the iothread lock, which is done by marking the 5136 * reginfo structs as ARM_CP_IO.) 5137 * Note that if a write to HCR pends a VIRQ or VFIQ it is never 5138 * possible for it to be taken immediately, because VIRQ and 5139 * VFIQ are masked unless running at EL0 or EL1, and HCR 5140 * can only be written at EL2. 5141 */ 5142 g_assert(qemu_mutex_iothread_locked()); 5143 arm_cpu_update_virq(cpu); 5144 arm_cpu_update_vfiq(cpu); 5145 } 5146 5147 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 5148 uint64_t value) 5149 { 5150 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 5151 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 5152 hcr_write(env, NULL, value); 5153 } 5154 5155 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 5156 uint64_t value) 5157 { 5158 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 5159 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 5160 hcr_write(env, NULL, value); 5161 } 5162 5163 /* 5164 * Return the effective value of HCR_EL2. 5165 * Bits that are not included here: 5166 * RW (read from SCR_EL3.RW as needed) 5167 */ 5168 uint64_t arm_hcr_el2_eff(CPUARMState *env) 5169 { 5170 uint64_t ret = env->cp15.hcr_el2; 5171 5172 if (arm_is_secure_below_el3(env)) { 5173 /* 5174 * "This register has no effect if EL2 is not enabled in the 5175 * current Security state". This is ARMv8.4-SecEL2 speak for 5176 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 5177 * 5178 * Prior to that, the language was "In an implementation that 5179 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 5180 * as if this field is 0 for all purposes other than a direct 5181 * read or write access of HCR_EL2". With lots of enumeration 5182 * on a per-field basis. In current QEMU, this is condition 5183 * is arm_is_secure_below_el3. 5184 * 5185 * Since the v8.4 language applies to the entire register, and 5186 * appears to be backward compatible, use that. 5187 */ 5188 ret = 0; 5189 } else if (ret & HCR_TGE) { 5190 /* These bits are up-to-date as of ARMv8.4. */ 5191 if (ret & HCR_E2H) { 5192 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 5193 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 5194 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 5195 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE); 5196 } else { 5197 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 5198 } 5199 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 5200 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 5201 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 5202 HCR_TLOR); 5203 } 5204 5205 return ret; 5206 } 5207 5208 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 5209 uint64_t value) 5210 { 5211 /* 5212 * For A-profile AArch32 EL3, if NSACR.CP10 5213 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5214 */ 5215 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5216 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5217 value &= ~(0x3 << 10); 5218 value |= env->cp15.cptr_el[2] & (0x3 << 10); 5219 } 5220 env->cp15.cptr_el[2] = value; 5221 } 5222 5223 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) 5224 { 5225 /* 5226 * For A-profile AArch32 EL3, if NSACR.CP10 5227 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5228 */ 5229 uint64_t value = env->cp15.cptr_el[2]; 5230 5231 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5232 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5233 value |= 0x3 << 10; 5234 } 5235 return value; 5236 } 5237 5238 static const ARMCPRegInfo el2_cp_reginfo[] = { 5239 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 5240 .type = ARM_CP_IO, 5241 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5242 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5243 .writefn = hcr_write }, 5244 { .name = "HCR", .state = ARM_CP_STATE_AA32, 5245 .type = ARM_CP_ALIAS | ARM_CP_IO, 5246 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5247 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5248 .writefn = hcr_writelow }, 5249 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5250 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5251 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5252 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 5253 .type = ARM_CP_ALIAS, 5254 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 5255 .access = PL2_RW, 5256 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 5257 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5258 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5259 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 5260 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5261 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5262 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 5263 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5264 .type = ARM_CP_ALIAS, 5265 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5266 .access = PL2_RW, 5267 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 5268 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 5269 .type = ARM_CP_ALIAS, 5270 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 5271 .access = PL2_RW, 5272 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 5273 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5274 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5275 .access = PL2_RW, .writefn = vbar_write, 5276 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 5277 .resetvalue = 0 }, 5278 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 5279 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 5280 .access = PL3_RW, .type = ARM_CP_ALIAS, 5281 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 5282 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5283 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5284 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 5285 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), 5286 .readfn = cptr_el2_read, .writefn = cptr_el2_write }, 5287 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5288 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5289 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 5290 .resetvalue = 0 }, 5291 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5292 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5293 .access = PL2_RW, .type = ARM_CP_ALIAS, 5294 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 5295 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5296 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5297 .access = PL2_RW, .type = ARM_CP_CONST, 5298 .resetvalue = 0 }, 5299 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 5300 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5301 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5302 .access = PL2_RW, .type = ARM_CP_CONST, 5303 .resetvalue = 0 }, 5304 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5305 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5306 .access = PL2_RW, .type = ARM_CP_CONST, 5307 .resetvalue = 0 }, 5308 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5309 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5310 .access = PL2_RW, .type = ARM_CP_CONST, 5311 .resetvalue = 0 }, 5312 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5313 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5314 .access = PL2_RW, .writefn = vmsa_tcr_el12_write, 5315 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ 5316 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 5317 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 5318 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5319 .type = ARM_CP_ALIAS, 5320 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5321 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5322 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 5323 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5324 .access = PL2_RW, 5325 /* no .writefn needed as this can't cause an ASID change; 5326 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 5327 */ 5328 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5329 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5330 .cp = 15, .opc1 = 6, .crm = 2, 5331 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5332 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5333 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 5334 .writefn = vttbr_write }, 5335 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5336 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5337 .access = PL2_RW, .writefn = vttbr_write, 5338 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 5339 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5340 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5341 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 5342 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 5343 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5344 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5345 .access = PL2_RW, .resetvalue = 0, 5346 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 5347 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5348 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5349 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, 5350 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5351 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5352 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5353 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5354 { .name = "TLBIALLNSNH", 5355 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 5356 .type = ARM_CP_NO_RAW, .access = PL2_W, 5357 .writefn = tlbiall_nsnh_write }, 5358 { .name = "TLBIALLNSNHIS", 5359 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 5360 .type = ARM_CP_NO_RAW, .access = PL2_W, 5361 .writefn = tlbiall_nsnh_is_write }, 5362 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5363 .type = ARM_CP_NO_RAW, .access = PL2_W, 5364 .writefn = tlbiall_hyp_write }, 5365 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5366 .type = ARM_CP_NO_RAW, .access = PL2_W, 5367 .writefn = tlbiall_hyp_is_write }, 5368 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5369 .type = ARM_CP_NO_RAW, .access = PL2_W, 5370 .writefn = tlbimva_hyp_write }, 5371 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5372 .type = ARM_CP_NO_RAW, .access = PL2_W, 5373 .writefn = tlbimva_hyp_is_write }, 5374 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 5375 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5376 .type = ARM_CP_NO_RAW, .access = PL2_W, 5377 .writefn = tlbi_aa64_alle2_write }, 5378 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 5379 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5380 .type = ARM_CP_NO_RAW, .access = PL2_W, 5381 .writefn = tlbi_aa64_vae2_write }, 5382 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 5383 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 5384 .access = PL2_W, .type = ARM_CP_NO_RAW, 5385 .writefn = tlbi_aa64_vae2_write }, 5386 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 5387 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5388 .access = PL2_W, .type = ARM_CP_NO_RAW, 5389 .writefn = tlbi_aa64_alle2is_write }, 5390 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 5391 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5392 .type = ARM_CP_NO_RAW, .access = PL2_W, 5393 .writefn = tlbi_aa64_vae2is_write }, 5394 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 5395 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 5396 .access = PL2_W, .type = ARM_CP_NO_RAW, 5397 .writefn = tlbi_aa64_vae2is_write }, 5398 #ifndef CONFIG_USER_ONLY 5399 /* Unlike the other EL2-related AT operations, these must 5400 * UNDEF from EL3 if EL2 is not implemented, which is why we 5401 * define them here rather than with the rest of the AT ops. 5402 */ 5403 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 5404 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5405 .access = PL2_W, .accessfn = at_s1e2_access, 5406 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5407 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 5408 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5409 .access = PL2_W, .accessfn = at_s1e2_access, 5410 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5411 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 5412 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 5413 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 5414 * to behave as if SCR.NS was 1. 5415 */ 5416 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5417 .access = PL2_W, 5418 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5419 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5420 .access = PL2_W, 5421 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5422 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5423 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5424 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 5425 * reset values as IMPDEF. We choose to reset to 3 to comply with 5426 * both ARMv7 and ARMv8. 5427 */ 5428 .access = PL2_RW, .resetvalue = 3, 5429 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 5430 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5431 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5432 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 5433 .writefn = gt_cntvoff_write, 5434 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5435 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5436 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 5437 .writefn = gt_cntvoff_write, 5438 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5439 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5440 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5441 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5442 .type = ARM_CP_IO, .access = PL2_RW, 5443 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5444 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5445 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5446 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 5447 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5448 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5449 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5450 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 5451 .resetfn = gt_hyp_timer_reset, 5452 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 5453 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5454 .type = ARM_CP_IO, 5455 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5456 .access = PL2_RW, 5457 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 5458 .resetvalue = 0, 5459 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 5460 #endif 5461 /* The only field of MDCR_EL2 that has a defined architectural reset value 5462 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 5463 * don't implement any PMU event counters, so using zero as a reset 5464 * value for MDCR_EL2 is okay 5465 */ 5466 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5467 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5468 .access = PL2_RW, .resetvalue = 0, 5469 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 5470 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 5471 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5472 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5473 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5474 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 5475 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5476 .access = PL2_RW, 5477 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5478 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5479 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5480 .access = PL2_RW, 5481 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 5482 REGINFO_SENTINEL 5483 }; 5484 5485 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 5486 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5487 .type = ARM_CP_ALIAS | ARM_CP_IO, 5488 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5489 .access = PL2_RW, 5490 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 5491 .writefn = hcr_writehigh }, 5492 REGINFO_SENTINEL 5493 }; 5494 5495 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 5496 bool isread) 5497 { 5498 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 5499 * At Secure EL1 it traps to EL3. 5500 */ 5501 if (arm_current_el(env) == 3) { 5502 return CP_ACCESS_OK; 5503 } 5504 if (arm_is_secure_below_el3(env)) { 5505 return CP_ACCESS_TRAP_EL3; 5506 } 5507 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 5508 if (isread) { 5509 return CP_ACCESS_OK; 5510 } 5511 return CP_ACCESS_TRAP_UNCATEGORIZED; 5512 } 5513 5514 static const ARMCPRegInfo el3_cp_reginfo[] = { 5515 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 5516 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 5517 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 5518 .resetvalue = 0, .writefn = scr_write }, 5519 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, 5520 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 5521 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5522 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 5523 .writefn = scr_write }, 5524 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 5525 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 5526 .access = PL3_RW, .resetvalue = 0, 5527 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 5528 { .name = "SDER", 5529 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 5530 .access = PL3_RW, .resetvalue = 0, 5531 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 5532 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5533 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5534 .writefn = vbar_write, .resetvalue = 0, 5535 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 5536 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 5537 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 5538 .access = PL3_RW, .resetvalue = 0, 5539 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 5540 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 5541 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 5542 .access = PL3_RW, 5543 /* no .writefn needed as this can't cause an ASID change; 5544 * we must provide a .raw_writefn and .resetfn because we handle 5545 * reset and migration for the AArch32 TTBCR(S), which might be 5546 * using mask and base_mask. 5547 */ 5548 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 5549 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 5550 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 5551 .type = ARM_CP_ALIAS, 5552 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 5553 .access = PL3_RW, 5554 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 5555 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 5556 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 5557 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 5558 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 5559 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 5560 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 5561 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 5562 .type = ARM_CP_ALIAS, 5563 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 5564 .access = PL3_RW, 5565 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 5566 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 5567 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 5568 .access = PL3_RW, .writefn = vbar_write, 5569 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 5570 .resetvalue = 0 }, 5571 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 5572 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 5573 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 5574 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 5575 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 5576 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 5577 .access = PL3_RW, .resetvalue = 0, 5578 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 5579 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 5580 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 5581 .access = PL3_RW, .type = ARM_CP_CONST, 5582 .resetvalue = 0 }, 5583 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 5584 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 5585 .access = PL3_RW, .type = ARM_CP_CONST, 5586 .resetvalue = 0 }, 5587 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 5588 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 5589 .access = PL3_RW, .type = ARM_CP_CONST, 5590 .resetvalue = 0 }, 5591 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 5592 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 5593 .access = PL3_W, .type = ARM_CP_NO_RAW, 5594 .writefn = tlbi_aa64_alle3is_write }, 5595 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 5596 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 5597 .access = PL3_W, .type = ARM_CP_NO_RAW, 5598 .writefn = tlbi_aa64_vae3is_write }, 5599 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 5600 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 5601 .access = PL3_W, .type = ARM_CP_NO_RAW, 5602 .writefn = tlbi_aa64_vae3is_write }, 5603 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 5604 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 5605 .access = PL3_W, .type = ARM_CP_NO_RAW, 5606 .writefn = tlbi_aa64_alle3_write }, 5607 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 5608 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 5609 .access = PL3_W, .type = ARM_CP_NO_RAW, 5610 .writefn = tlbi_aa64_vae3_write }, 5611 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 5612 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 5613 .access = PL3_W, .type = ARM_CP_NO_RAW, 5614 .writefn = tlbi_aa64_vae3_write }, 5615 REGINFO_SENTINEL 5616 }; 5617 5618 #ifndef CONFIG_USER_ONLY 5619 /* Test if system register redirection is to occur in the current state. */ 5620 static bool redirect_for_e2h(CPUARMState *env) 5621 { 5622 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); 5623 } 5624 5625 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) 5626 { 5627 CPReadFn *readfn; 5628 5629 if (redirect_for_e2h(env)) { 5630 /* Switch to the saved EL2 version of the register. */ 5631 ri = ri->opaque; 5632 readfn = ri->readfn; 5633 } else { 5634 readfn = ri->orig_readfn; 5635 } 5636 if (readfn == NULL) { 5637 readfn = raw_read; 5638 } 5639 return readfn(env, ri); 5640 } 5641 5642 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, 5643 uint64_t value) 5644 { 5645 CPWriteFn *writefn; 5646 5647 if (redirect_for_e2h(env)) { 5648 /* Switch to the saved EL2 version of the register. */ 5649 ri = ri->opaque; 5650 writefn = ri->writefn; 5651 } else { 5652 writefn = ri->orig_writefn; 5653 } 5654 if (writefn == NULL) { 5655 writefn = raw_write; 5656 } 5657 writefn(env, ri, value); 5658 } 5659 5660 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) 5661 { 5662 struct E2HAlias { 5663 uint32_t src_key, dst_key, new_key; 5664 const char *src_name, *dst_name, *new_name; 5665 bool (*feature)(const ARMISARegisters *id); 5666 }; 5667 5668 #define K(op0, op1, crn, crm, op2) \ 5669 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) 5670 5671 static const struct E2HAlias aliases[] = { 5672 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), 5673 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, 5674 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), 5675 "CPACR", "CPTR_EL2", "CPACR_EL12" }, 5676 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), 5677 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, 5678 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), 5679 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, 5680 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), 5681 "TCR_EL1", "TCR_EL2", "TCR_EL12" }, 5682 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), 5683 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, 5684 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), 5685 "ELR_EL1", "ELR_EL2", "ELR_EL12" }, 5686 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), 5687 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, 5688 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), 5689 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, 5690 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), 5691 "ESR_EL1", "ESR_EL2", "ESR_EL12" }, 5692 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), 5693 "FAR_EL1", "FAR_EL2", "FAR_EL12" }, 5694 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), 5695 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, 5696 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), 5697 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, 5698 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), 5699 "VBAR", "VBAR_EL2", "VBAR_EL12" }, 5700 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), 5701 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, 5702 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), 5703 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, 5704 5705 /* 5706 * Note that redirection of ZCR is mentioned in the description 5707 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but 5708 * not in the summary table. 5709 */ 5710 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), 5711 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, 5712 5713 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ 5714 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ 5715 }; 5716 #undef K 5717 5718 size_t i; 5719 5720 for (i = 0; i < ARRAY_SIZE(aliases); i++) { 5721 const struct E2HAlias *a = &aliases[i]; 5722 ARMCPRegInfo *src_reg, *dst_reg; 5723 5724 if (a->feature && !a->feature(&cpu->isar)) { 5725 continue; 5726 } 5727 5728 src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); 5729 dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); 5730 g_assert(src_reg != NULL); 5731 g_assert(dst_reg != NULL); 5732 5733 /* Cross-compare names to detect typos in the keys. */ 5734 g_assert(strcmp(src_reg->name, a->src_name) == 0); 5735 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); 5736 5737 /* None of the core system registers use opaque; we will. */ 5738 g_assert(src_reg->opaque == NULL); 5739 5740 /* Create alias before redirection so we dup the right data. */ 5741 if (a->new_key) { 5742 ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); 5743 uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t)); 5744 bool ok; 5745 5746 new_reg->name = a->new_name; 5747 new_reg->type |= ARM_CP_ALIAS; 5748 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ 5749 new_reg->access &= PL2_RW | PL3_RW; 5750 5751 ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); 5752 g_assert(ok); 5753 } 5754 5755 src_reg->opaque = dst_reg; 5756 src_reg->orig_readfn = src_reg->readfn ?: raw_read; 5757 src_reg->orig_writefn = src_reg->writefn ?: raw_write; 5758 if (!src_reg->raw_readfn) { 5759 src_reg->raw_readfn = raw_read; 5760 } 5761 if (!src_reg->raw_writefn) { 5762 src_reg->raw_writefn = raw_write; 5763 } 5764 src_reg->readfn = el2_e2h_read; 5765 src_reg->writefn = el2_e2h_write; 5766 } 5767 } 5768 #endif 5769 5770 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 5771 bool isread) 5772 { 5773 int cur_el = arm_current_el(env); 5774 5775 if (cur_el < 2) { 5776 uint64_t hcr = arm_hcr_el2_eff(env); 5777 5778 if (cur_el == 0) { 5779 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 5780 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { 5781 return CP_ACCESS_TRAP_EL2; 5782 } 5783 } else { 5784 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 5785 return CP_ACCESS_TRAP; 5786 } 5787 if (hcr & HCR_TID2) { 5788 return CP_ACCESS_TRAP_EL2; 5789 } 5790 } 5791 } else if (hcr & HCR_TID2) { 5792 return CP_ACCESS_TRAP_EL2; 5793 } 5794 } 5795 5796 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { 5797 return CP_ACCESS_TRAP_EL2; 5798 } 5799 5800 return CP_ACCESS_OK; 5801 } 5802 5803 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 5804 uint64_t value) 5805 { 5806 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 5807 * read via a bit in OSLSR_EL1. 5808 */ 5809 int oslock; 5810 5811 if (ri->state == ARM_CP_STATE_AA32) { 5812 oslock = (value == 0xC5ACCE55); 5813 } else { 5814 oslock = value & 1; 5815 } 5816 5817 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 5818 } 5819 5820 static const ARMCPRegInfo debug_cp_reginfo[] = { 5821 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 5822 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 5823 * unlike DBGDRAR it is never accessible from EL0. 5824 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 5825 * accessor. 5826 */ 5827 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 5828 .access = PL0_R, .accessfn = access_tdra, 5829 .type = ARM_CP_CONST, .resetvalue = 0 }, 5830 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 5831 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5832 .access = PL1_R, .accessfn = access_tdra, 5833 .type = ARM_CP_CONST, .resetvalue = 0 }, 5834 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 5835 .access = PL0_R, .accessfn = access_tdra, 5836 .type = ARM_CP_CONST, .resetvalue = 0 }, 5837 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 5838 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 5839 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 5840 .access = PL1_RW, .accessfn = access_tda, 5841 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 5842 .resetvalue = 0 }, 5843 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 5844 * We don't implement the configurable EL0 access. 5845 */ 5846 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 5847 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 5848 .type = ARM_CP_ALIAS, 5849 .access = PL1_R, .accessfn = access_tda, 5850 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 5851 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 5852 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 5853 .access = PL1_W, .type = ARM_CP_NO_RAW, 5854 .accessfn = access_tdosa, 5855 .writefn = oslar_write }, 5856 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 5857 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 5858 .access = PL1_R, .resetvalue = 10, 5859 .accessfn = access_tdosa, 5860 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 5861 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 5862 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 5863 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 5864 .access = PL1_RW, .accessfn = access_tdosa, 5865 .type = ARM_CP_NOP }, 5866 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 5867 * implement vector catch debug events yet. 5868 */ 5869 { .name = "DBGVCR", 5870 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 5871 .access = PL1_RW, .accessfn = access_tda, 5872 .type = ARM_CP_NOP }, 5873 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 5874 * to save and restore a 32-bit guest's DBGVCR) 5875 */ 5876 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 5877 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 5878 .access = PL2_RW, .accessfn = access_tda, 5879 .type = ARM_CP_NOP }, 5880 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 5881 * Channel but Linux may try to access this register. The 32-bit 5882 * alias is DBGDCCINT. 5883 */ 5884 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 5885 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 5886 .access = PL1_RW, .accessfn = access_tda, 5887 .type = ARM_CP_NOP }, 5888 REGINFO_SENTINEL 5889 }; 5890 5891 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 5892 /* 64 bit access versions of the (dummy) debug registers */ 5893 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 5894 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 5895 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 5896 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 5897 REGINFO_SENTINEL 5898 }; 5899 5900 /* Return the exception level to which exceptions should be taken 5901 * via SVEAccessTrap. If an exception should be routed through 5902 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 5903 * take care of raising that exception. 5904 * C.f. the ARM pseudocode function CheckSVEEnabled. 5905 */ 5906 int sve_exception_el(CPUARMState *env, int el) 5907 { 5908 #ifndef CONFIG_USER_ONLY 5909 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 5910 5911 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 5912 bool disabled = false; 5913 5914 /* The CPACR.ZEN controls traps to EL1: 5915 * 0, 2 : trap EL0 and EL1 accesses 5916 * 1 : trap only EL0 accesses 5917 * 3 : trap no accesses 5918 */ 5919 if (!extract32(env->cp15.cpacr_el1, 16, 1)) { 5920 disabled = true; 5921 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { 5922 disabled = el == 0; 5923 } 5924 if (disabled) { 5925 /* route_to_el2 */ 5926 return hcr_el2 & HCR_TGE ? 2 : 1; 5927 } 5928 5929 /* Check CPACR.FPEN. */ 5930 if (!extract32(env->cp15.cpacr_el1, 20, 1)) { 5931 disabled = true; 5932 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { 5933 disabled = el == 0; 5934 } 5935 if (disabled) { 5936 return 0; 5937 } 5938 } 5939 5940 /* CPTR_EL2. Since TZ and TFP are positive, 5941 * they will be zero when EL2 is not present. 5942 */ 5943 if (el <= 2 && !arm_is_secure_below_el3(env)) { 5944 if (env->cp15.cptr_el[2] & CPTR_TZ) { 5945 return 2; 5946 } 5947 if (env->cp15.cptr_el[2] & CPTR_TFP) { 5948 return 0; 5949 } 5950 } 5951 5952 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 5953 if (arm_feature(env, ARM_FEATURE_EL3) 5954 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 5955 return 3; 5956 } 5957 #endif 5958 return 0; 5959 } 5960 5961 static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) 5962 { 5963 uint32_t end_len; 5964 5965 end_len = start_len &= 0xf; 5966 if (!test_bit(start_len, cpu->sve_vq_map)) { 5967 end_len = find_last_bit(cpu->sve_vq_map, start_len); 5968 assert(end_len < start_len); 5969 } 5970 return end_len; 5971 } 5972 5973 /* 5974 * Given that SVE is enabled, return the vector length for EL. 5975 */ 5976 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 5977 { 5978 ARMCPU *cpu = env_archcpu(env); 5979 uint32_t zcr_len = cpu->sve_max_vq - 1; 5980 5981 if (el <= 1) { 5982 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 5983 } 5984 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { 5985 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 5986 } 5987 if (arm_feature(env, ARM_FEATURE_EL3)) { 5988 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 5989 } 5990 5991 return sve_zcr_get_valid_len(cpu, zcr_len); 5992 } 5993 5994 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5995 uint64_t value) 5996 { 5997 int cur_el = arm_current_el(env); 5998 int old_len = sve_zcr_len_for_el(env, cur_el); 5999 int new_len; 6000 6001 /* Bits other than [3:0] are RAZ/WI. */ 6002 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); 6003 raw_write(env, ri, value & 0xf); 6004 6005 /* 6006 * Because we arrived here, we know both FP and SVE are enabled; 6007 * otherwise we would have trapped access to the ZCR_ELn register. 6008 */ 6009 new_len = sve_zcr_len_for_el(env, cur_el); 6010 if (new_len < old_len) { 6011 aarch64_sve_narrow_vq(env, new_len + 1); 6012 } 6013 } 6014 6015 static const ARMCPRegInfo zcr_el1_reginfo = { 6016 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 6017 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 6018 .access = PL1_RW, .type = ARM_CP_SVE, 6019 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 6020 .writefn = zcr_write, .raw_writefn = raw_write 6021 }; 6022 6023 static const ARMCPRegInfo zcr_el2_reginfo = { 6024 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6025 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6026 .access = PL2_RW, .type = ARM_CP_SVE, 6027 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 6028 .writefn = zcr_write, .raw_writefn = raw_write 6029 }; 6030 6031 static const ARMCPRegInfo zcr_no_el2_reginfo = { 6032 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6033 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6034 .access = PL2_RW, .type = ARM_CP_SVE, 6035 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 6036 }; 6037 6038 static const ARMCPRegInfo zcr_el3_reginfo = { 6039 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 6040 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 6041 .access = PL3_RW, .type = ARM_CP_SVE, 6042 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 6043 .writefn = zcr_write, .raw_writefn = raw_write 6044 }; 6045 6046 void hw_watchpoint_update(ARMCPU *cpu, int n) 6047 { 6048 CPUARMState *env = &cpu->env; 6049 vaddr len = 0; 6050 vaddr wvr = env->cp15.dbgwvr[n]; 6051 uint64_t wcr = env->cp15.dbgwcr[n]; 6052 int mask; 6053 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 6054 6055 if (env->cpu_watchpoint[n]) { 6056 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 6057 env->cpu_watchpoint[n] = NULL; 6058 } 6059 6060 if (!extract64(wcr, 0, 1)) { 6061 /* E bit clear : watchpoint disabled */ 6062 return; 6063 } 6064 6065 switch (extract64(wcr, 3, 2)) { 6066 case 0: 6067 /* LSC 00 is reserved and must behave as if the wp is disabled */ 6068 return; 6069 case 1: 6070 flags |= BP_MEM_READ; 6071 break; 6072 case 2: 6073 flags |= BP_MEM_WRITE; 6074 break; 6075 case 3: 6076 flags |= BP_MEM_ACCESS; 6077 break; 6078 } 6079 6080 /* Attempts to use both MASK and BAS fields simultaneously are 6081 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 6082 * thus generating a watchpoint for every byte in the masked region. 6083 */ 6084 mask = extract64(wcr, 24, 4); 6085 if (mask == 1 || mask == 2) { 6086 /* Reserved values of MASK; we must act as if the mask value was 6087 * some non-reserved value, or as if the watchpoint were disabled. 6088 * We choose the latter. 6089 */ 6090 return; 6091 } else if (mask) { 6092 /* Watchpoint covers an aligned area up to 2GB in size */ 6093 len = 1ULL << mask; 6094 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 6095 * whether the watchpoint fires when the unmasked bits match; we opt 6096 * to generate the exceptions. 6097 */ 6098 wvr &= ~(len - 1); 6099 } else { 6100 /* Watchpoint covers bytes defined by the byte address select bits */ 6101 int bas = extract64(wcr, 5, 8); 6102 int basstart; 6103 6104 if (bas == 0) { 6105 /* This must act as if the watchpoint is disabled */ 6106 return; 6107 } 6108 6109 if (extract64(wvr, 2, 1)) { 6110 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 6111 * ignored, and BAS[3:0] define which bytes to watch. 6112 */ 6113 bas &= 0xf; 6114 } 6115 /* The BAS bits are supposed to be programmed to indicate a contiguous 6116 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 6117 * we fire for each byte in the word/doubleword addressed by the WVR. 6118 * We choose to ignore any non-zero bits after the first range of 1s. 6119 */ 6120 basstart = ctz32(bas); 6121 len = cto32(bas >> basstart); 6122 wvr += basstart; 6123 } 6124 6125 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 6126 &env->cpu_watchpoint[n]); 6127 } 6128 6129 void hw_watchpoint_update_all(ARMCPU *cpu) 6130 { 6131 int i; 6132 CPUARMState *env = &cpu->env; 6133 6134 /* Completely clear out existing QEMU watchpoints and our array, to 6135 * avoid possible stale entries following migration load. 6136 */ 6137 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 6138 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 6139 6140 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 6141 hw_watchpoint_update(cpu, i); 6142 } 6143 } 6144 6145 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6146 uint64_t value) 6147 { 6148 ARMCPU *cpu = env_archcpu(env); 6149 int i = ri->crm; 6150 6151 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 6152 * register reads and behaves as if values written are sign extended. 6153 * Bits [1:0] are RES0. 6154 */ 6155 value = sextract64(value, 0, 49) & ~3ULL; 6156 6157 raw_write(env, ri, value); 6158 hw_watchpoint_update(cpu, i); 6159 } 6160 6161 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6162 uint64_t value) 6163 { 6164 ARMCPU *cpu = env_archcpu(env); 6165 int i = ri->crm; 6166 6167 raw_write(env, ri, value); 6168 hw_watchpoint_update(cpu, i); 6169 } 6170 6171 void hw_breakpoint_update(ARMCPU *cpu, int n) 6172 { 6173 CPUARMState *env = &cpu->env; 6174 uint64_t bvr = env->cp15.dbgbvr[n]; 6175 uint64_t bcr = env->cp15.dbgbcr[n]; 6176 vaddr addr; 6177 int bt; 6178 int flags = BP_CPU; 6179 6180 if (env->cpu_breakpoint[n]) { 6181 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 6182 env->cpu_breakpoint[n] = NULL; 6183 } 6184 6185 if (!extract64(bcr, 0, 1)) { 6186 /* E bit clear : watchpoint disabled */ 6187 return; 6188 } 6189 6190 bt = extract64(bcr, 20, 4); 6191 6192 switch (bt) { 6193 case 4: /* unlinked address mismatch (reserved if AArch64) */ 6194 case 5: /* linked address mismatch (reserved if AArch64) */ 6195 qemu_log_mask(LOG_UNIMP, 6196 "arm: address mismatch breakpoint types not implemented\n"); 6197 return; 6198 case 0: /* unlinked address match */ 6199 case 1: /* linked address match */ 6200 { 6201 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 6202 * we behave as if the register was sign extended. Bits [1:0] are 6203 * RES0. The BAS field is used to allow setting breakpoints on 16 6204 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 6205 * a bp will fire if the addresses covered by the bp and the addresses 6206 * covered by the insn overlap but the insn doesn't start at the 6207 * start of the bp address range. We choose to require the insn and 6208 * the bp to have the same address. The constraints on writing to 6209 * BAS enforced in dbgbcr_write mean we have only four cases: 6210 * 0b0000 => no breakpoint 6211 * 0b0011 => breakpoint on addr 6212 * 0b1100 => breakpoint on addr + 2 6213 * 0b1111 => breakpoint on addr 6214 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 6215 */ 6216 int bas = extract64(bcr, 5, 4); 6217 addr = sextract64(bvr, 0, 49) & ~3ULL; 6218 if (bas == 0) { 6219 return; 6220 } 6221 if (bas == 0xc) { 6222 addr += 2; 6223 } 6224 break; 6225 } 6226 case 2: /* unlinked context ID match */ 6227 case 8: /* unlinked VMID match (reserved if no EL2) */ 6228 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 6229 qemu_log_mask(LOG_UNIMP, 6230 "arm: unlinked context breakpoint types not implemented\n"); 6231 return; 6232 case 9: /* linked VMID match (reserved if no EL2) */ 6233 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 6234 case 3: /* linked context ID match */ 6235 default: 6236 /* We must generate no events for Linked context matches (unless 6237 * they are linked to by some other bp/wp, which is handled in 6238 * updates for the linking bp/wp). We choose to also generate no events 6239 * for reserved values. 6240 */ 6241 return; 6242 } 6243 6244 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 6245 } 6246 6247 void hw_breakpoint_update_all(ARMCPU *cpu) 6248 { 6249 int i; 6250 CPUARMState *env = &cpu->env; 6251 6252 /* Completely clear out existing QEMU breakpoints and our array, to 6253 * avoid possible stale entries following migration load. 6254 */ 6255 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 6256 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 6257 6258 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 6259 hw_breakpoint_update(cpu, i); 6260 } 6261 } 6262 6263 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6264 uint64_t value) 6265 { 6266 ARMCPU *cpu = env_archcpu(env); 6267 int i = ri->crm; 6268 6269 raw_write(env, ri, value); 6270 hw_breakpoint_update(cpu, i); 6271 } 6272 6273 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6274 uint64_t value) 6275 { 6276 ARMCPU *cpu = env_archcpu(env); 6277 int i = ri->crm; 6278 6279 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 6280 * copy of BAS[0]. 6281 */ 6282 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 6283 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 6284 6285 raw_write(env, ri, value); 6286 hw_breakpoint_update(cpu, i); 6287 } 6288 6289 static void define_debug_regs(ARMCPU *cpu) 6290 { 6291 /* Define v7 and v8 architectural debug registers. 6292 * These are just dummy implementations for now. 6293 */ 6294 int i; 6295 int wrps, brps, ctx_cmps; 6296 ARMCPRegInfo dbgdidr = { 6297 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 6298 .access = PL0_R, .accessfn = access_tda, 6299 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 6300 }; 6301 6302 /* Note that all these register fields hold "number of Xs minus 1". */ 6303 brps = arm_num_brps(cpu); 6304 wrps = arm_num_wrps(cpu); 6305 ctx_cmps = arm_num_ctx_cmps(cpu); 6306 6307 assert(ctx_cmps <= brps); 6308 6309 define_one_arm_cp_reg(cpu, &dbgdidr); 6310 define_arm_cp_regs(cpu, debug_cp_reginfo); 6311 6312 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 6313 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 6314 } 6315 6316 for (i = 0; i < brps; i++) { 6317 ARMCPRegInfo dbgregs[] = { 6318 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 6319 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 6320 .access = PL1_RW, .accessfn = access_tda, 6321 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 6322 .writefn = dbgbvr_write, .raw_writefn = raw_write 6323 }, 6324 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 6325 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 6326 .access = PL1_RW, .accessfn = access_tda, 6327 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 6328 .writefn = dbgbcr_write, .raw_writefn = raw_write 6329 }, 6330 REGINFO_SENTINEL 6331 }; 6332 define_arm_cp_regs(cpu, dbgregs); 6333 } 6334 6335 for (i = 0; i < wrps; i++) { 6336 ARMCPRegInfo dbgregs[] = { 6337 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 6338 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 6339 .access = PL1_RW, .accessfn = access_tda, 6340 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 6341 .writefn = dbgwvr_write, .raw_writefn = raw_write 6342 }, 6343 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 6344 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 6345 .access = PL1_RW, .accessfn = access_tda, 6346 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 6347 .writefn = dbgwcr_write, .raw_writefn = raw_write 6348 }, 6349 REGINFO_SENTINEL 6350 }; 6351 define_arm_cp_regs(cpu, dbgregs); 6352 } 6353 } 6354 6355 static void define_pmu_regs(ARMCPU *cpu) 6356 { 6357 /* 6358 * v7 performance monitor control register: same implementor 6359 * field as main ID register, and we implement four counters in 6360 * addition to the cycle count register. 6361 */ 6362 unsigned int i, pmcrn = 4; 6363 ARMCPRegInfo pmcr = { 6364 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 6365 .access = PL0_RW, 6366 .type = ARM_CP_IO | ARM_CP_ALIAS, 6367 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 6368 .accessfn = pmreg_access, .writefn = pmcr_write, 6369 .raw_writefn = raw_write, 6370 }; 6371 ARMCPRegInfo pmcr64 = { 6372 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 6373 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 6374 .access = PL0_RW, .accessfn = pmreg_access, 6375 .type = ARM_CP_IO, 6376 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 6377 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | 6378 PMCRLC, 6379 .writefn = pmcr_write, .raw_writefn = raw_write, 6380 }; 6381 define_one_arm_cp_reg(cpu, &pmcr); 6382 define_one_arm_cp_reg(cpu, &pmcr64); 6383 for (i = 0; i < pmcrn; i++) { 6384 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 6385 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 6386 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 6387 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 6388 ARMCPRegInfo pmev_regs[] = { 6389 { .name = pmevcntr_name, .cp = 15, .crn = 14, 6390 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6391 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6392 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6393 .accessfn = pmreg_access }, 6394 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 6395 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 6396 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6397 .type = ARM_CP_IO, 6398 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6399 .raw_readfn = pmevcntr_rawread, 6400 .raw_writefn = pmevcntr_rawwrite }, 6401 { .name = pmevtyper_name, .cp = 15, .crn = 14, 6402 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6403 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6404 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6405 .accessfn = pmreg_access }, 6406 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 6407 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 6408 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6409 .type = ARM_CP_IO, 6410 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6411 .raw_writefn = pmevtyper_rawwrite }, 6412 REGINFO_SENTINEL 6413 }; 6414 define_arm_cp_regs(cpu, pmev_regs); 6415 g_free(pmevcntr_name); 6416 g_free(pmevcntr_el0_name); 6417 g_free(pmevtyper_name); 6418 g_free(pmevtyper_el0_name); 6419 } 6420 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { 6421 ARMCPRegInfo v81_pmu_regs[] = { 6422 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 6423 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 6424 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6425 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 6426 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 6427 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 6428 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6429 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 6430 REGINFO_SENTINEL 6431 }; 6432 define_arm_cp_regs(cpu, v81_pmu_regs); 6433 } 6434 if (cpu_isar_feature(any_pmu_8_4, cpu)) { 6435 static const ARMCPRegInfo v84_pmmir = { 6436 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, 6437 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, 6438 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6439 .resetvalue = 0 6440 }; 6441 define_one_arm_cp_reg(cpu, &v84_pmmir); 6442 } 6443 } 6444 6445 /* We don't know until after realize whether there's a GICv3 6446 * attached, and that is what registers the gicv3 sysregs. 6447 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 6448 * at runtime. 6449 */ 6450 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 6451 { 6452 ARMCPU *cpu = env_archcpu(env); 6453 uint64_t pfr1 = cpu->id_pfr1; 6454 6455 if (env->gicv3state) { 6456 pfr1 |= 1 << 28; 6457 } 6458 return pfr1; 6459 } 6460 6461 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 6462 { 6463 ARMCPU *cpu = env_archcpu(env); 6464 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 6465 6466 if (env->gicv3state) { 6467 pfr0 |= 1 << 24; 6468 } 6469 return pfr0; 6470 } 6471 6472 /* Shared logic between LORID and the rest of the LOR* registers. 6473 * Secure state has already been delt with. 6474 */ 6475 static CPAccessResult access_lor_ns(CPUARMState *env) 6476 { 6477 int el = arm_current_el(env); 6478 6479 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 6480 return CP_ACCESS_TRAP_EL2; 6481 } 6482 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 6483 return CP_ACCESS_TRAP_EL3; 6484 } 6485 return CP_ACCESS_OK; 6486 } 6487 6488 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, 6489 bool isread) 6490 { 6491 if (arm_is_secure_below_el3(env)) { 6492 /* Access ok in secure mode. */ 6493 return CP_ACCESS_OK; 6494 } 6495 return access_lor_ns(env); 6496 } 6497 6498 static CPAccessResult access_lor_other(CPUARMState *env, 6499 const ARMCPRegInfo *ri, bool isread) 6500 { 6501 if (arm_is_secure_below_el3(env)) { 6502 /* Access denied in secure mode. */ 6503 return CP_ACCESS_TRAP; 6504 } 6505 return access_lor_ns(env); 6506 } 6507 6508 /* 6509 * A trivial implementation of ARMv8.1-LOR leaves all of these 6510 * registers fixed at 0, which indicates that there are zero 6511 * supported Limited Ordering regions. 6512 */ 6513 static const ARMCPRegInfo lor_reginfo[] = { 6514 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 6515 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 6516 .access = PL1_RW, .accessfn = access_lor_other, 6517 .type = ARM_CP_CONST, .resetvalue = 0 }, 6518 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 6519 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 6520 .access = PL1_RW, .accessfn = access_lor_other, 6521 .type = ARM_CP_CONST, .resetvalue = 0 }, 6522 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 6523 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 6524 .access = PL1_RW, .accessfn = access_lor_other, 6525 .type = ARM_CP_CONST, .resetvalue = 0 }, 6526 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 6527 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 6528 .access = PL1_RW, .accessfn = access_lor_other, 6529 .type = ARM_CP_CONST, .resetvalue = 0 }, 6530 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 6531 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 6532 .access = PL1_R, .accessfn = access_lorid, 6533 .type = ARM_CP_CONST, .resetvalue = 0 }, 6534 REGINFO_SENTINEL 6535 }; 6536 6537 #ifdef TARGET_AARCH64 6538 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 6539 bool isread) 6540 { 6541 int el = arm_current_el(env); 6542 6543 if (el < 2 && 6544 arm_feature(env, ARM_FEATURE_EL2) && 6545 !(arm_hcr_el2_eff(env) & HCR_APK)) { 6546 return CP_ACCESS_TRAP_EL2; 6547 } 6548 if (el < 3 && 6549 arm_feature(env, ARM_FEATURE_EL3) && 6550 !(env->cp15.scr_el3 & SCR_APK)) { 6551 return CP_ACCESS_TRAP_EL3; 6552 } 6553 return CP_ACCESS_OK; 6554 } 6555 6556 static const ARMCPRegInfo pauth_reginfo[] = { 6557 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6558 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 6559 .access = PL1_RW, .accessfn = access_pauth, 6560 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, 6561 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6562 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 6563 .access = PL1_RW, .accessfn = access_pauth, 6564 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, 6565 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6566 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 6567 .access = PL1_RW, .accessfn = access_pauth, 6568 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, 6569 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6570 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 6571 .access = PL1_RW, .accessfn = access_pauth, 6572 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, 6573 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6574 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 6575 .access = PL1_RW, .accessfn = access_pauth, 6576 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, 6577 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6578 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 6579 .access = PL1_RW, .accessfn = access_pauth, 6580 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, 6581 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6582 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 6583 .access = PL1_RW, .accessfn = access_pauth, 6584 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, 6585 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6586 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 6587 .access = PL1_RW, .accessfn = access_pauth, 6588 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, 6589 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6590 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 6591 .access = PL1_RW, .accessfn = access_pauth, 6592 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, 6593 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6594 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 6595 .access = PL1_RW, .accessfn = access_pauth, 6596 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, 6597 REGINFO_SENTINEL 6598 }; 6599 6600 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 6601 { 6602 Error *err = NULL; 6603 uint64_t ret; 6604 6605 /* Success sets NZCV = 0000. */ 6606 env->NF = env->CF = env->VF = 0, env->ZF = 1; 6607 6608 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { 6609 /* 6610 * ??? Failed, for unknown reasons in the crypto subsystem. 6611 * The best we can do is log the reason and return the 6612 * timed-out indication to the guest. There is no reason 6613 * we know to expect this failure to be transitory, so the 6614 * guest may well hang retrying the operation. 6615 */ 6616 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 6617 ri->name, error_get_pretty(err)); 6618 error_free(err); 6619 6620 env->ZF = 0; /* NZCF = 0100 */ 6621 return 0; 6622 } 6623 return ret; 6624 } 6625 6626 /* We do not support re-seeding, so the two registers operate the same. */ 6627 static const ARMCPRegInfo rndr_reginfo[] = { 6628 { .name = "RNDR", .state = ARM_CP_STATE_AA64, 6629 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6630 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, 6631 .access = PL0_R, .readfn = rndr_readfn }, 6632 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, 6633 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6634 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, 6635 .access = PL0_R, .readfn = rndr_readfn }, 6636 REGINFO_SENTINEL 6637 }; 6638 6639 #ifndef CONFIG_USER_ONLY 6640 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, 6641 uint64_t value) 6642 { 6643 ARMCPU *cpu = env_archcpu(env); 6644 /* CTR_EL0 System register -> DminLine, bits [19:16] */ 6645 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); 6646 uint64_t vaddr_in = (uint64_t) value; 6647 uint64_t vaddr = vaddr_in & ~(dline_size - 1); 6648 void *haddr; 6649 int mem_idx = cpu_mmu_index(env, false); 6650 6651 /* This won't be crossing page boundaries */ 6652 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); 6653 if (haddr) { 6654 6655 ram_addr_t offset; 6656 MemoryRegion *mr; 6657 6658 /* RCU lock is already being held */ 6659 mr = memory_region_from_host(haddr, &offset); 6660 6661 if (mr) { 6662 memory_region_do_writeback(mr, offset, dline_size); 6663 } 6664 } 6665 } 6666 6667 static const ARMCPRegInfo dcpop_reg[] = { 6668 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, 6669 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, 6670 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6671 .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, 6672 REGINFO_SENTINEL 6673 }; 6674 6675 static const ARMCPRegInfo dcpodp_reg[] = { 6676 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, 6677 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, 6678 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6679 .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, 6680 REGINFO_SENTINEL 6681 }; 6682 #endif /*CONFIG_USER_ONLY*/ 6683 6684 #endif 6685 6686 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, 6687 bool isread) 6688 { 6689 int el = arm_current_el(env); 6690 6691 if (el == 0) { 6692 uint64_t sctlr = arm_sctlr(env, el); 6693 if (!(sctlr & SCTLR_EnRCTX)) { 6694 return CP_ACCESS_TRAP; 6695 } 6696 } else if (el == 1) { 6697 uint64_t hcr = arm_hcr_el2_eff(env); 6698 if (hcr & HCR_NV) { 6699 return CP_ACCESS_TRAP_EL2; 6700 } 6701 } 6702 return CP_ACCESS_OK; 6703 } 6704 6705 static const ARMCPRegInfo predinv_reginfo[] = { 6706 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, 6707 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, 6708 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6709 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, 6710 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, 6711 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6712 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, 6713 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, 6714 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6715 /* 6716 * Note the AArch32 opcodes have a different OPC1. 6717 */ 6718 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, 6719 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, 6720 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6721 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, 6722 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, 6723 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6724 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, 6725 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, 6726 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6727 REGINFO_SENTINEL 6728 }; 6729 6730 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 6731 bool isread) 6732 { 6733 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { 6734 return CP_ACCESS_TRAP_EL2; 6735 } 6736 6737 return CP_ACCESS_OK; 6738 } 6739 6740 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 6741 bool isread) 6742 { 6743 if (arm_feature(env, ARM_FEATURE_V8)) { 6744 return access_aa64_tid3(env, ri, isread); 6745 } 6746 6747 return CP_ACCESS_OK; 6748 } 6749 6750 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, 6751 bool isread) 6752 { 6753 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { 6754 return CP_ACCESS_TRAP_EL2; 6755 } 6756 6757 return CP_ACCESS_OK; 6758 } 6759 6760 static const ARMCPRegInfo jazelle_regs[] = { 6761 { .name = "JIDR", 6762 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, 6763 .access = PL1_R, .accessfn = access_jazelle, 6764 .type = ARM_CP_CONST, .resetvalue = 0 }, 6765 { .name = "JOSCR", 6766 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, 6767 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 6768 { .name = "JMCR", 6769 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, 6770 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 6771 REGINFO_SENTINEL 6772 }; 6773 6774 static const ARMCPRegInfo vhe_reginfo[] = { 6775 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, 6776 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, 6777 .access = PL2_RW, 6778 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, 6779 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, 6780 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, 6781 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, 6782 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, 6783 #ifndef CONFIG_USER_ONLY 6784 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, 6785 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, 6786 .fieldoffset = 6787 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), 6788 .type = ARM_CP_IO, .access = PL2_RW, 6789 .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, 6790 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 6791 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, 6792 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 6793 .resetfn = gt_hv_timer_reset, 6794 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, 6795 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, 6796 .type = ARM_CP_IO, 6797 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, 6798 .access = PL2_RW, 6799 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), 6800 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, 6801 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, 6802 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, 6803 .type = ARM_CP_IO | ARM_CP_ALIAS, 6804 .access = PL2_RW, .accessfn = e2h_access, 6805 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 6806 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, 6807 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, 6808 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, 6809 .type = ARM_CP_IO | ARM_CP_ALIAS, 6810 .access = PL2_RW, .accessfn = e2h_access, 6811 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 6812 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, 6813 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, 6814 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, 6815 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 6816 .access = PL2_RW, .accessfn = e2h_access, 6817 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, 6818 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, 6819 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, 6820 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 6821 .access = PL2_RW, .accessfn = e2h_access, 6822 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, 6823 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, 6824 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, 6825 .type = ARM_CP_IO | ARM_CP_ALIAS, 6826 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 6827 .access = PL2_RW, .accessfn = e2h_access, 6828 .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, 6829 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, 6830 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, 6831 .type = ARM_CP_IO | ARM_CP_ALIAS, 6832 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 6833 .access = PL2_RW, .accessfn = e2h_access, 6834 .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, 6835 #endif 6836 REGINFO_SENTINEL 6837 }; 6838 6839 #ifndef CONFIG_USER_ONLY 6840 static const ARMCPRegInfo ats1e1_reginfo[] = { 6841 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 6842 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 6843 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6844 .writefn = ats_write64 }, 6845 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 6846 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 6847 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6848 .writefn = ats_write64 }, 6849 REGINFO_SENTINEL 6850 }; 6851 6852 static const ARMCPRegInfo ats1cp_reginfo[] = { 6853 { .name = "ATS1CPRP", 6854 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 6855 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6856 .writefn = ats_write }, 6857 { .name = "ATS1CPWP", 6858 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 6859 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6860 .writefn = ats_write }, 6861 REGINFO_SENTINEL 6862 }; 6863 #endif 6864 6865 void register_cp_regs_for_features(ARMCPU *cpu) 6866 { 6867 /* Register all the coprocessor registers based on feature bits */ 6868 CPUARMState *env = &cpu->env; 6869 if (arm_feature(env, ARM_FEATURE_M)) { 6870 /* M profile has no coprocessor registers */ 6871 return; 6872 } 6873 6874 define_arm_cp_regs(cpu, cp_reginfo); 6875 if (!arm_feature(env, ARM_FEATURE_V8)) { 6876 /* Must go early as it is full of wildcards that may be 6877 * overridden by later definitions. 6878 */ 6879 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 6880 } 6881 6882 if (arm_feature(env, ARM_FEATURE_V6)) { 6883 /* The ID registers all have impdef reset values */ 6884 ARMCPRegInfo v6_idregs[] = { 6885 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 6886 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 6887 .access = PL1_R, .type = ARM_CP_CONST, 6888 .accessfn = access_aa32_tid3, 6889 .resetvalue = cpu->id_pfr0 }, 6890 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 6891 * the value of the GIC field until after we define these regs. 6892 */ 6893 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 6894 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 6895 .access = PL1_R, .type = ARM_CP_NO_RAW, 6896 .accessfn = access_aa32_tid3, 6897 .readfn = id_pfr1_read, 6898 .writefn = arm_cp_write_ignore }, 6899 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 6900 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 6901 .access = PL1_R, .type = ARM_CP_CONST, 6902 .accessfn = access_aa32_tid3, 6903 .resetvalue = cpu->isar.id_dfr0 }, 6904 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 6905 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 6906 .access = PL1_R, .type = ARM_CP_CONST, 6907 .accessfn = access_aa32_tid3, 6908 .resetvalue = cpu->id_afr0 }, 6909 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 6910 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 6911 .access = PL1_R, .type = ARM_CP_CONST, 6912 .accessfn = access_aa32_tid3, 6913 .resetvalue = cpu->isar.id_mmfr0 }, 6914 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 6915 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 6916 .access = PL1_R, .type = ARM_CP_CONST, 6917 .accessfn = access_aa32_tid3, 6918 .resetvalue = cpu->isar.id_mmfr1 }, 6919 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 6920 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 6921 .access = PL1_R, .type = ARM_CP_CONST, 6922 .accessfn = access_aa32_tid3, 6923 .resetvalue = cpu->isar.id_mmfr2 }, 6924 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 6925 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 6926 .access = PL1_R, .type = ARM_CP_CONST, 6927 .accessfn = access_aa32_tid3, 6928 .resetvalue = cpu->isar.id_mmfr3 }, 6929 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 6930 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 6931 .access = PL1_R, .type = ARM_CP_CONST, 6932 .accessfn = access_aa32_tid3, 6933 .resetvalue = cpu->isar.id_isar0 }, 6934 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 6935 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 6936 .access = PL1_R, .type = ARM_CP_CONST, 6937 .accessfn = access_aa32_tid3, 6938 .resetvalue = cpu->isar.id_isar1 }, 6939 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 6940 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 6941 .access = PL1_R, .type = ARM_CP_CONST, 6942 .accessfn = access_aa32_tid3, 6943 .resetvalue = cpu->isar.id_isar2 }, 6944 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 6945 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 6946 .access = PL1_R, .type = ARM_CP_CONST, 6947 .accessfn = access_aa32_tid3, 6948 .resetvalue = cpu->isar.id_isar3 }, 6949 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 6950 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 6951 .access = PL1_R, .type = ARM_CP_CONST, 6952 .accessfn = access_aa32_tid3, 6953 .resetvalue = cpu->isar.id_isar4 }, 6954 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 6955 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 6956 .access = PL1_R, .type = ARM_CP_CONST, 6957 .accessfn = access_aa32_tid3, 6958 .resetvalue = cpu->isar.id_isar5 }, 6959 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 6960 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 6961 .access = PL1_R, .type = ARM_CP_CONST, 6962 .accessfn = access_aa32_tid3, 6963 .resetvalue = cpu->isar.id_mmfr4 }, 6964 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 6965 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 6966 .access = PL1_R, .type = ARM_CP_CONST, 6967 .accessfn = access_aa32_tid3, 6968 .resetvalue = cpu->isar.id_isar6 }, 6969 REGINFO_SENTINEL 6970 }; 6971 define_arm_cp_regs(cpu, v6_idregs); 6972 define_arm_cp_regs(cpu, v6_cp_reginfo); 6973 } else { 6974 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 6975 } 6976 if (arm_feature(env, ARM_FEATURE_V6K)) { 6977 define_arm_cp_regs(cpu, v6k_cp_reginfo); 6978 } 6979 if (arm_feature(env, ARM_FEATURE_V7MP) && 6980 !arm_feature(env, ARM_FEATURE_PMSA)) { 6981 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 6982 } 6983 if (arm_feature(env, ARM_FEATURE_V7VE)) { 6984 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 6985 } 6986 if (arm_feature(env, ARM_FEATURE_V7)) { 6987 ARMCPRegInfo clidr = { 6988 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 6989 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 6990 .access = PL1_R, .type = ARM_CP_CONST, 6991 .accessfn = access_aa64_tid2, 6992 .resetvalue = cpu->clidr 6993 }; 6994 define_one_arm_cp_reg(cpu, &clidr); 6995 define_arm_cp_regs(cpu, v7_cp_reginfo); 6996 define_debug_regs(cpu); 6997 define_pmu_regs(cpu); 6998 } else { 6999 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 7000 } 7001 if (arm_feature(env, ARM_FEATURE_V8)) { 7002 /* AArch64 ID registers, which all have impdef reset values. 7003 * Note that within the ID register ranges the unused slots 7004 * must all RAZ, not UNDEF; future architecture versions may 7005 * define new registers here. 7006 */ 7007 ARMCPRegInfo v8_idregs[] = { 7008 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 7009 * know the right value for the GIC field until after we 7010 * define these regs. 7011 */ 7012 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 7013 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 7014 .access = PL1_R, .type = ARM_CP_NO_RAW, 7015 .accessfn = access_aa64_tid3, 7016 .readfn = id_aa64pfr0_read, 7017 .writefn = arm_cp_write_ignore }, 7018 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 7019 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 7020 .access = PL1_R, .type = ARM_CP_CONST, 7021 .accessfn = access_aa64_tid3, 7022 .resetvalue = cpu->isar.id_aa64pfr1}, 7023 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7024 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 7025 .access = PL1_R, .type = ARM_CP_CONST, 7026 .accessfn = access_aa64_tid3, 7027 .resetvalue = 0 }, 7028 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7029 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 7030 .access = PL1_R, .type = ARM_CP_CONST, 7031 .accessfn = access_aa64_tid3, 7032 .resetvalue = 0 }, 7033 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 7034 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 7035 .access = PL1_R, .type = ARM_CP_CONST, 7036 .accessfn = access_aa64_tid3, 7037 /* At present, only SVEver == 0 is defined anyway. */ 7038 .resetvalue = 0 }, 7039 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7040 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 7041 .access = PL1_R, .type = ARM_CP_CONST, 7042 .accessfn = access_aa64_tid3, 7043 .resetvalue = 0 }, 7044 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7045 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 7046 .access = PL1_R, .type = ARM_CP_CONST, 7047 .accessfn = access_aa64_tid3, 7048 .resetvalue = 0 }, 7049 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7050 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 7051 .access = PL1_R, .type = ARM_CP_CONST, 7052 .accessfn = access_aa64_tid3, 7053 .resetvalue = 0 }, 7054 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 7055 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 7056 .access = PL1_R, .type = ARM_CP_CONST, 7057 .accessfn = access_aa64_tid3, 7058 .resetvalue = cpu->isar.id_aa64dfr0 }, 7059 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 7060 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 7061 .access = PL1_R, .type = ARM_CP_CONST, 7062 .accessfn = access_aa64_tid3, 7063 .resetvalue = cpu->isar.id_aa64dfr1 }, 7064 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7065 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 7066 .access = PL1_R, .type = ARM_CP_CONST, 7067 .accessfn = access_aa64_tid3, 7068 .resetvalue = 0 }, 7069 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7070 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 7071 .access = PL1_R, .type = ARM_CP_CONST, 7072 .accessfn = access_aa64_tid3, 7073 .resetvalue = 0 }, 7074 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 7075 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 7076 .access = PL1_R, .type = ARM_CP_CONST, 7077 .accessfn = access_aa64_tid3, 7078 .resetvalue = cpu->id_aa64afr0 }, 7079 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 7080 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 7081 .access = PL1_R, .type = ARM_CP_CONST, 7082 .accessfn = access_aa64_tid3, 7083 .resetvalue = cpu->id_aa64afr1 }, 7084 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7085 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 7086 .access = PL1_R, .type = ARM_CP_CONST, 7087 .accessfn = access_aa64_tid3, 7088 .resetvalue = 0 }, 7089 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7090 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 7091 .access = PL1_R, .type = ARM_CP_CONST, 7092 .accessfn = access_aa64_tid3, 7093 .resetvalue = 0 }, 7094 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 7095 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 7096 .access = PL1_R, .type = ARM_CP_CONST, 7097 .accessfn = access_aa64_tid3, 7098 .resetvalue = cpu->isar.id_aa64isar0 }, 7099 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 7100 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 7101 .access = PL1_R, .type = ARM_CP_CONST, 7102 .accessfn = access_aa64_tid3, 7103 .resetvalue = cpu->isar.id_aa64isar1 }, 7104 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7105 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 7106 .access = PL1_R, .type = ARM_CP_CONST, 7107 .accessfn = access_aa64_tid3, 7108 .resetvalue = 0 }, 7109 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7110 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 7111 .access = PL1_R, .type = ARM_CP_CONST, 7112 .accessfn = access_aa64_tid3, 7113 .resetvalue = 0 }, 7114 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7115 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 7116 .access = PL1_R, .type = ARM_CP_CONST, 7117 .accessfn = access_aa64_tid3, 7118 .resetvalue = 0 }, 7119 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7120 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 7121 .access = PL1_R, .type = ARM_CP_CONST, 7122 .accessfn = access_aa64_tid3, 7123 .resetvalue = 0 }, 7124 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7125 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 7126 .access = PL1_R, .type = ARM_CP_CONST, 7127 .accessfn = access_aa64_tid3, 7128 .resetvalue = 0 }, 7129 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7130 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 7131 .access = PL1_R, .type = ARM_CP_CONST, 7132 .accessfn = access_aa64_tid3, 7133 .resetvalue = 0 }, 7134 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 7135 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 7136 .access = PL1_R, .type = ARM_CP_CONST, 7137 .accessfn = access_aa64_tid3, 7138 .resetvalue = cpu->isar.id_aa64mmfr0 }, 7139 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 7140 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 7141 .access = PL1_R, .type = ARM_CP_CONST, 7142 .accessfn = access_aa64_tid3, 7143 .resetvalue = cpu->isar.id_aa64mmfr1 }, 7144 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, 7145 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 7146 .access = PL1_R, .type = ARM_CP_CONST, 7147 .accessfn = access_aa64_tid3, 7148 .resetvalue = cpu->isar.id_aa64mmfr2 }, 7149 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7150 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 7151 .access = PL1_R, .type = ARM_CP_CONST, 7152 .accessfn = access_aa64_tid3, 7153 .resetvalue = 0 }, 7154 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7155 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 7156 .access = PL1_R, .type = ARM_CP_CONST, 7157 .accessfn = access_aa64_tid3, 7158 .resetvalue = 0 }, 7159 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7160 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 7161 .access = PL1_R, .type = ARM_CP_CONST, 7162 .accessfn = access_aa64_tid3, 7163 .resetvalue = 0 }, 7164 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7165 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 7166 .access = PL1_R, .type = ARM_CP_CONST, 7167 .accessfn = access_aa64_tid3, 7168 .resetvalue = 0 }, 7169 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7170 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 7171 .access = PL1_R, .type = ARM_CP_CONST, 7172 .accessfn = access_aa64_tid3, 7173 .resetvalue = 0 }, 7174 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 7175 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 7176 .access = PL1_R, .type = ARM_CP_CONST, 7177 .accessfn = access_aa64_tid3, 7178 .resetvalue = cpu->isar.mvfr0 }, 7179 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 7180 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 7181 .access = PL1_R, .type = ARM_CP_CONST, 7182 .accessfn = access_aa64_tid3, 7183 .resetvalue = cpu->isar.mvfr1 }, 7184 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 7185 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 7186 .access = PL1_R, .type = ARM_CP_CONST, 7187 .accessfn = access_aa64_tid3, 7188 .resetvalue = cpu->isar.mvfr2 }, 7189 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7190 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 7191 .access = PL1_R, .type = ARM_CP_CONST, 7192 .accessfn = access_aa64_tid3, 7193 .resetvalue = 0 }, 7194 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7195 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 7196 .access = PL1_R, .type = ARM_CP_CONST, 7197 .accessfn = access_aa64_tid3, 7198 .resetvalue = 0 }, 7199 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7200 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 7201 .access = PL1_R, .type = ARM_CP_CONST, 7202 .accessfn = access_aa64_tid3, 7203 .resetvalue = 0 }, 7204 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7205 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 7206 .access = PL1_R, .type = ARM_CP_CONST, 7207 .accessfn = access_aa64_tid3, 7208 .resetvalue = 0 }, 7209 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7210 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 7211 .access = PL1_R, .type = ARM_CP_CONST, 7212 .accessfn = access_aa64_tid3, 7213 .resetvalue = 0 }, 7214 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 7215 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 7216 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7217 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 7218 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 7219 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 7220 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7221 .resetvalue = cpu->pmceid0 }, 7222 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 7223 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 7224 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7225 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 7226 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 7227 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 7228 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7229 .resetvalue = cpu->pmceid1 }, 7230 REGINFO_SENTINEL 7231 }; 7232 #ifdef CONFIG_USER_ONLY 7233 ARMCPRegUserSpaceInfo v8_user_idregs[] = { 7234 { .name = "ID_AA64PFR0_EL1", 7235 .exported_bits = 0x000f000f00ff0000, 7236 .fixed_bits = 0x0000000000000011 }, 7237 { .name = "ID_AA64PFR1_EL1", 7238 .exported_bits = 0x00000000000000f0 }, 7239 { .name = "ID_AA64PFR*_EL1_RESERVED", 7240 .is_glob = true }, 7241 { .name = "ID_AA64ZFR0_EL1" }, 7242 { .name = "ID_AA64MMFR0_EL1", 7243 .fixed_bits = 0x00000000ff000000 }, 7244 { .name = "ID_AA64MMFR1_EL1" }, 7245 { .name = "ID_AA64MMFR*_EL1_RESERVED", 7246 .is_glob = true }, 7247 { .name = "ID_AA64DFR0_EL1", 7248 .fixed_bits = 0x0000000000000006 }, 7249 { .name = "ID_AA64DFR1_EL1" }, 7250 { .name = "ID_AA64DFR*_EL1_RESERVED", 7251 .is_glob = true }, 7252 { .name = "ID_AA64AFR*", 7253 .is_glob = true }, 7254 { .name = "ID_AA64ISAR0_EL1", 7255 .exported_bits = 0x00fffffff0fffff0 }, 7256 { .name = "ID_AA64ISAR1_EL1", 7257 .exported_bits = 0x000000f0ffffffff }, 7258 { .name = "ID_AA64ISAR*_EL1_RESERVED", 7259 .is_glob = true }, 7260 REGUSERINFO_SENTINEL 7261 }; 7262 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 7263 #endif 7264 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 7265 if (!arm_feature(env, ARM_FEATURE_EL3) && 7266 !arm_feature(env, ARM_FEATURE_EL2)) { 7267 ARMCPRegInfo rvbar = { 7268 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 7269 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 7270 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 7271 }; 7272 define_one_arm_cp_reg(cpu, &rvbar); 7273 } 7274 define_arm_cp_regs(cpu, v8_idregs); 7275 define_arm_cp_regs(cpu, v8_cp_reginfo); 7276 } 7277 if (arm_feature(env, ARM_FEATURE_EL2)) { 7278 uint64_t vmpidr_def = mpidr_read_val(env); 7279 ARMCPRegInfo vpidr_regs[] = { 7280 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 7281 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7282 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7283 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 7284 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 7285 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 7286 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7287 .access = PL2_RW, .resetvalue = cpu->midr, 7288 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7289 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 7290 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7291 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7292 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 7293 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 7294 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 7295 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7296 .access = PL2_RW, 7297 .resetvalue = vmpidr_def, 7298 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 7299 REGINFO_SENTINEL 7300 }; 7301 define_arm_cp_regs(cpu, vpidr_regs); 7302 define_arm_cp_regs(cpu, el2_cp_reginfo); 7303 if (arm_feature(env, ARM_FEATURE_V8)) { 7304 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 7305 } 7306 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 7307 if (!arm_feature(env, ARM_FEATURE_EL3)) { 7308 ARMCPRegInfo rvbar = { 7309 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 7310 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 7311 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 7312 }; 7313 define_one_arm_cp_reg(cpu, &rvbar); 7314 } 7315 } else { 7316 /* If EL2 is missing but higher ELs are enabled, we need to 7317 * register the no_el2 reginfos. 7318 */ 7319 if (arm_feature(env, ARM_FEATURE_EL3)) { 7320 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 7321 * of MIDR_EL1 and MPIDR_EL1. 7322 */ 7323 ARMCPRegInfo vpidr_regs[] = { 7324 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7325 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7326 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 7327 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 7328 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7329 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7330 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7331 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 7332 .type = ARM_CP_NO_RAW, 7333 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 7334 REGINFO_SENTINEL 7335 }; 7336 define_arm_cp_regs(cpu, vpidr_regs); 7337 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 7338 if (arm_feature(env, ARM_FEATURE_V8)) { 7339 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 7340 } 7341 } 7342 } 7343 if (arm_feature(env, ARM_FEATURE_EL3)) { 7344 define_arm_cp_regs(cpu, el3_cp_reginfo); 7345 ARMCPRegInfo el3_regs[] = { 7346 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 7347 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 7348 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 7349 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 7350 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 7351 .access = PL3_RW, 7352 .raw_writefn = raw_write, .writefn = sctlr_write, 7353 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 7354 .resetvalue = cpu->reset_sctlr }, 7355 REGINFO_SENTINEL 7356 }; 7357 7358 define_arm_cp_regs(cpu, el3_regs); 7359 } 7360 /* The behaviour of NSACR is sufficiently various that we don't 7361 * try to describe it in a single reginfo: 7362 * if EL3 is 64 bit, then trap to EL3 from S EL1, 7363 * reads as constant 0xc00 from NS EL1 and NS EL2 7364 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 7365 * if v7 without EL3, register doesn't exist 7366 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 7367 */ 7368 if (arm_feature(env, ARM_FEATURE_EL3)) { 7369 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7370 ARMCPRegInfo nsacr = { 7371 .name = "NSACR", .type = ARM_CP_CONST, 7372 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7373 .access = PL1_RW, .accessfn = nsacr_access, 7374 .resetvalue = 0xc00 7375 }; 7376 define_one_arm_cp_reg(cpu, &nsacr); 7377 } else { 7378 ARMCPRegInfo nsacr = { 7379 .name = "NSACR", 7380 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7381 .access = PL3_RW | PL1_R, 7382 .resetvalue = 0, 7383 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 7384 }; 7385 define_one_arm_cp_reg(cpu, &nsacr); 7386 } 7387 } else { 7388 if (arm_feature(env, ARM_FEATURE_V8)) { 7389 ARMCPRegInfo nsacr = { 7390 .name = "NSACR", .type = ARM_CP_CONST, 7391 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7392 .access = PL1_R, 7393 .resetvalue = 0xc00 7394 }; 7395 define_one_arm_cp_reg(cpu, &nsacr); 7396 } 7397 } 7398 7399 if (arm_feature(env, ARM_FEATURE_PMSA)) { 7400 if (arm_feature(env, ARM_FEATURE_V6)) { 7401 /* PMSAv6 not implemented */ 7402 assert(arm_feature(env, ARM_FEATURE_V7)); 7403 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 7404 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 7405 } else { 7406 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 7407 } 7408 } else { 7409 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 7410 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 7411 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */ 7412 if (FIELD_EX32(cpu->isar.id_mmfr4, ID_MMFR4, HPDS) != 0) { 7413 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 7414 } 7415 } 7416 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 7417 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 7418 } 7419 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 7420 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 7421 } 7422 if (arm_feature(env, ARM_FEATURE_VAPA)) { 7423 define_arm_cp_regs(cpu, vapa_cp_reginfo); 7424 } 7425 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 7426 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 7427 } 7428 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 7429 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 7430 } 7431 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 7432 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 7433 } 7434 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 7435 define_arm_cp_regs(cpu, omap_cp_reginfo); 7436 } 7437 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 7438 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 7439 } 7440 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 7441 define_arm_cp_regs(cpu, xscale_cp_reginfo); 7442 } 7443 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 7444 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 7445 } 7446 if (arm_feature(env, ARM_FEATURE_LPAE)) { 7447 define_arm_cp_regs(cpu, lpae_cp_reginfo); 7448 } 7449 if (cpu_isar_feature(aa32_jazelle, cpu)) { 7450 define_arm_cp_regs(cpu, jazelle_regs); 7451 } 7452 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 7453 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 7454 * be read-only (ie write causes UNDEF exception). 7455 */ 7456 { 7457 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 7458 /* Pre-v8 MIDR space. 7459 * Note that the MIDR isn't a simple constant register because 7460 * of the TI925 behaviour where writes to another register can 7461 * cause the MIDR value to change. 7462 * 7463 * Unimplemented registers in the c15 0 0 0 space default to 7464 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 7465 * and friends override accordingly. 7466 */ 7467 { .name = "MIDR", 7468 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 7469 .access = PL1_R, .resetvalue = cpu->midr, 7470 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 7471 .readfn = midr_read, 7472 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 7473 .type = ARM_CP_OVERRIDE }, 7474 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 7475 { .name = "DUMMY", 7476 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 7477 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7478 { .name = "DUMMY", 7479 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 7480 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7481 { .name = "DUMMY", 7482 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 7483 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7484 { .name = "DUMMY", 7485 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 7486 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7487 { .name = "DUMMY", 7488 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 7489 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7490 REGINFO_SENTINEL 7491 }; 7492 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 7493 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 7494 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 7495 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 7496 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 7497 .readfn = midr_read }, 7498 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 7499 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 7500 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 7501 .access = PL1_R, .resetvalue = cpu->midr }, 7502 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 7503 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 7504 .access = PL1_R, .resetvalue = cpu->midr }, 7505 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 7506 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 7507 .access = PL1_R, 7508 .accessfn = access_aa64_tid1, 7509 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 7510 REGINFO_SENTINEL 7511 }; 7512 ARMCPRegInfo id_cp_reginfo[] = { 7513 /* These are common to v8 and pre-v8 */ 7514 { .name = "CTR", 7515 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 7516 .access = PL1_R, .accessfn = ctr_el0_access, 7517 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 7518 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 7519 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 7520 .access = PL0_R, .accessfn = ctr_el0_access, 7521 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 7522 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 7523 { .name = "TCMTR", 7524 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 7525 .access = PL1_R, 7526 .accessfn = access_aa32_tid1, 7527 .type = ARM_CP_CONST, .resetvalue = 0 }, 7528 REGINFO_SENTINEL 7529 }; 7530 /* TLBTR is specific to VMSA */ 7531 ARMCPRegInfo id_tlbtr_reginfo = { 7532 .name = "TLBTR", 7533 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 7534 .access = PL1_R, 7535 .accessfn = access_aa32_tid1, 7536 .type = ARM_CP_CONST, .resetvalue = 0, 7537 }; 7538 /* MPUIR is specific to PMSA V6+ */ 7539 ARMCPRegInfo id_mpuir_reginfo = { 7540 .name = "MPUIR", 7541 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 7542 .access = PL1_R, .type = ARM_CP_CONST, 7543 .resetvalue = cpu->pmsav7_dregion << 8 7544 }; 7545 ARMCPRegInfo crn0_wi_reginfo = { 7546 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 7547 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 7548 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 7549 }; 7550 #ifdef CONFIG_USER_ONLY 7551 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 7552 { .name = "MIDR_EL1", 7553 .exported_bits = 0x00000000ffffffff }, 7554 { .name = "REVIDR_EL1" }, 7555 REGUSERINFO_SENTINEL 7556 }; 7557 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 7558 #endif 7559 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 7560 arm_feature(env, ARM_FEATURE_STRONGARM)) { 7561 ARMCPRegInfo *r; 7562 /* Register the blanket "writes ignored" value first to cover the 7563 * whole space. Then update the specific ID registers to allow write 7564 * access, so that they ignore writes rather than causing them to 7565 * UNDEF. 7566 */ 7567 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 7568 for (r = id_pre_v8_midr_cp_reginfo; 7569 r->type != ARM_CP_SENTINEL; r++) { 7570 r->access = PL1_RW; 7571 } 7572 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 7573 r->access = PL1_RW; 7574 } 7575 id_mpuir_reginfo.access = PL1_RW; 7576 id_tlbtr_reginfo.access = PL1_RW; 7577 } 7578 if (arm_feature(env, ARM_FEATURE_V8)) { 7579 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 7580 } else { 7581 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 7582 } 7583 define_arm_cp_regs(cpu, id_cp_reginfo); 7584 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 7585 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 7586 } else if (arm_feature(env, ARM_FEATURE_V7)) { 7587 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 7588 } 7589 } 7590 7591 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 7592 ARMCPRegInfo mpidr_cp_reginfo[] = { 7593 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 7594 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 7595 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 7596 REGINFO_SENTINEL 7597 }; 7598 #ifdef CONFIG_USER_ONLY 7599 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 7600 { .name = "MPIDR_EL1", 7601 .fixed_bits = 0x0000000080000000 }, 7602 REGUSERINFO_SENTINEL 7603 }; 7604 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 7605 #endif 7606 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 7607 } 7608 7609 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 7610 ARMCPRegInfo auxcr_reginfo[] = { 7611 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 7612 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 7613 .access = PL1_RW, .type = ARM_CP_CONST, 7614 .resetvalue = cpu->reset_auxcr }, 7615 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 7616 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 7617 .access = PL2_RW, .type = ARM_CP_CONST, 7618 .resetvalue = 0 }, 7619 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 7620 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 7621 .access = PL3_RW, .type = ARM_CP_CONST, 7622 .resetvalue = 0 }, 7623 REGINFO_SENTINEL 7624 }; 7625 define_arm_cp_regs(cpu, auxcr_reginfo); 7626 if (arm_feature(env, ARM_FEATURE_V8)) { 7627 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */ 7628 ARMCPRegInfo hactlr2_reginfo = { 7629 .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 7630 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 7631 .access = PL2_RW, .type = ARM_CP_CONST, 7632 .resetvalue = 0 7633 }; 7634 define_one_arm_cp_reg(cpu, &hactlr2_reginfo); 7635 } 7636 } 7637 7638 if (arm_feature(env, ARM_FEATURE_CBAR)) { 7639 /* 7640 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. 7641 * There are two flavours: 7642 * (1) older 32-bit only cores have a simple 32-bit CBAR 7643 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a 7644 * 32-bit register visible to AArch32 at a different encoding 7645 * to the "flavour 1" register and with the bits rearranged to 7646 * be able to squash a 64-bit address into the 32-bit view. 7647 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but 7648 * in future if we support AArch32-only configs of some of the 7649 * AArch64 cores we might need to add a specific feature flag 7650 * to indicate cores with "flavour 2" CBAR. 7651 */ 7652 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7653 /* 32 bit view is [31:18] 0...0 [43:32]. */ 7654 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 7655 | extract64(cpu->reset_cbar, 32, 12); 7656 ARMCPRegInfo cbar_reginfo[] = { 7657 { .name = "CBAR", 7658 .type = ARM_CP_CONST, 7659 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, 7660 .access = PL1_R, .resetvalue = cbar32 }, 7661 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 7662 .type = ARM_CP_CONST, 7663 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 7664 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 7665 REGINFO_SENTINEL 7666 }; 7667 /* We don't implement a r/w 64 bit CBAR currently */ 7668 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 7669 define_arm_cp_regs(cpu, cbar_reginfo); 7670 } else { 7671 ARMCPRegInfo cbar = { 7672 .name = "CBAR", 7673 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 7674 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 7675 .fieldoffset = offsetof(CPUARMState, 7676 cp15.c15_config_base_address) 7677 }; 7678 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 7679 cbar.access = PL1_R; 7680 cbar.fieldoffset = 0; 7681 cbar.type = ARM_CP_CONST; 7682 } 7683 define_one_arm_cp_reg(cpu, &cbar); 7684 } 7685 } 7686 7687 if (arm_feature(env, ARM_FEATURE_VBAR)) { 7688 ARMCPRegInfo vbar_cp_reginfo[] = { 7689 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 7690 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 7691 .access = PL1_RW, .writefn = vbar_write, 7692 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 7693 offsetof(CPUARMState, cp15.vbar_ns) }, 7694 .resetvalue = 0 }, 7695 REGINFO_SENTINEL 7696 }; 7697 define_arm_cp_regs(cpu, vbar_cp_reginfo); 7698 } 7699 7700 /* Generic registers whose values depend on the implementation */ 7701 { 7702 ARMCPRegInfo sctlr = { 7703 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 7704 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 7705 .access = PL1_RW, 7706 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 7707 offsetof(CPUARMState, cp15.sctlr_ns) }, 7708 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 7709 .raw_writefn = raw_write, 7710 }; 7711 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 7712 /* Normally we would always end the TB on an SCTLR write, but Linux 7713 * arch/arm/mach-pxa/sleep.S expects two instructions following 7714 * an MMU enable to execute from cache. Imitate this behaviour. 7715 */ 7716 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 7717 } 7718 define_one_arm_cp_reg(cpu, &sctlr); 7719 } 7720 7721 if (cpu_isar_feature(aa64_lor, cpu)) { 7722 define_arm_cp_regs(cpu, lor_reginfo); 7723 } 7724 if (cpu_isar_feature(aa64_pan, cpu)) { 7725 define_one_arm_cp_reg(cpu, &pan_reginfo); 7726 } 7727 #ifndef CONFIG_USER_ONLY 7728 if (cpu_isar_feature(aa64_ats1e1, cpu)) { 7729 define_arm_cp_regs(cpu, ats1e1_reginfo); 7730 } 7731 if (cpu_isar_feature(aa32_ats1e1, cpu)) { 7732 define_arm_cp_regs(cpu, ats1cp_reginfo); 7733 } 7734 #endif 7735 if (cpu_isar_feature(aa64_uao, cpu)) { 7736 define_one_arm_cp_reg(cpu, &uao_reginfo); 7737 } 7738 7739 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 7740 define_arm_cp_regs(cpu, vhe_reginfo); 7741 } 7742 7743 if (cpu_isar_feature(aa64_sve, cpu)) { 7744 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 7745 if (arm_feature(env, ARM_FEATURE_EL2)) { 7746 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 7747 } else { 7748 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 7749 } 7750 if (arm_feature(env, ARM_FEATURE_EL3)) { 7751 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 7752 } 7753 } 7754 7755 #ifdef TARGET_AARCH64 7756 if (cpu_isar_feature(aa64_pauth, cpu)) { 7757 define_arm_cp_regs(cpu, pauth_reginfo); 7758 } 7759 if (cpu_isar_feature(aa64_rndr, cpu)) { 7760 define_arm_cp_regs(cpu, rndr_reginfo); 7761 } 7762 #ifndef CONFIG_USER_ONLY 7763 /* Data Cache clean instructions up to PoP */ 7764 if (cpu_isar_feature(aa64_dcpop, cpu)) { 7765 define_one_arm_cp_reg(cpu, dcpop_reg); 7766 7767 if (cpu_isar_feature(aa64_dcpodp, cpu)) { 7768 define_one_arm_cp_reg(cpu, dcpodp_reg); 7769 } 7770 } 7771 #endif /*CONFIG_USER_ONLY*/ 7772 #endif 7773 7774 if (cpu_isar_feature(any_predinv, cpu)) { 7775 define_arm_cp_regs(cpu, predinv_reginfo); 7776 } 7777 7778 #ifndef CONFIG_USER_ONLY 7779 /* 7780 * Register redirections and aliases must be done last, 7781 * after the registers from the other extensions have been defined. 7782 */ 7783 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 7784 define_arm_vh_e2h_redirects_aliases(cpu); 7785 } 7786 #endif 7787 } 7788 7789 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 7790 { 7791 CPUState *cs = CPU(cpu); 7792 CPUARMState *env = &cpu->env; 7793 7794 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7795 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 7796 aarch64_fpu_gdb_set_reg, 7797 34, "aarch64-fpu.xml", 0); 7798 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 7799 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7800 51, "arm-neon.xml", 0); 7801 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 7802 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7803 35, "arm-vfp3.xml", 0); 7804 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 7805 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7806 19, "arm-vfp.xml", 0); 7807 } 7808 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 7809 arm_gen_dynamic_xml(cs), 7810 "system-registers.xml", 0); 7811 } 7812 7813 /* Sort alphabetically by type name, except for "any". */ 7814 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 7815 { 7816 ObjectClass *class_a = (ObjectClass *)a; 7817 ObjectClass *class_b = (ObjectClass *)b; 7818 const char *name_a, *name_b; 7819 7820 name_a = object_class_get_name(class_a); 7821 name_b = object_class_get_name(class_b); 7822 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 7823 return 1; 7824 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 7825 return -1; 7826 } else { 7827 return strcmp(name_a, name_b); 7828 } 7829 } 7830 7831 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 7832 { 7833 ObjectClass *oc = data; 7834 const char *typename; 7835 char *name; 7836 7837 typename = object_class_get_name(oc); 7838 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 7839 qemu_printf(" %s\n", name); 7840 g_free(name); 7841 } 7842 7843 void arm_cpu_list(void) 7844 { 7845 GSList *list; 7846 7847 list = object_class_get_list(TYPE_ARM_CPU, false); 7848 list = g_slist_sort(list, arm_cpu_list_compare); 7849 qemu_printf("Available CPUs:\n"); 7850 g_slist_foreach(list, arm_cpu_list_entry, NULL); 7851 g_slist_free(list); 7852 } 7853 7854 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 7855 { 7856 ObjectClass *oc = data; 7857 CpuDefinitionInfoList **cpu_list = user_data; 7858 CpuDefinitionInfoList *entry; 7859 CpuDefinitionInfo *info; 7860 const char *typename; 7861 7862 typename = object_class_get_name(oc); 7863 info = g_malloc0(sizeof(*info)); 7864 info->name = g_strndup(typename, 7865 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 7866 info->q_typename = g_strdup(typename); 7867 7868 entry = g_malloc0(sizeof(*entry)); 7869 entry->value = info; 7870 entry->next = *cpu_list; 7871 *cpu_list = entry; 7872 } 7873 7874 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 7875 { 7876 CpuDefinitionInfoList *cpu_list = NULL; 7877 GSList *list; 7878 7879 list = object_class_get_list(TYPE_ARM_CPU, false); 7880 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 7881 g_slist_free(list); 7882 7883 return cpu_list; 7884 } 7885 7886 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 7887 void *opaque, int state, int secstate, 7888 int crm, int opc1, int opc2, 7889 const char *name) 7890 { 7891 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 7892 * add a single reginfo struct to the hash table. 7893 */ 7894 uint32_t *key = g_new(uint32_t, 1); 7895 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 7896 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 7897 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 7898 7899 r2->name = g_strdup(name); 7900 /* Reset the secure state to the specific incoming state. This is 7901 * necessary as the register may have been defined with both states. 7902 */ 7903 r2->secure = secstate; 7904 7905 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 7906 /* Register is banked (using both entries in array). 7907 * Overwriting fieldoffset as the array is only used to define 7908 * banked registers but later only fieldoffset is used. 7909 */ 7910 r2->fieldoffset = r->bank_fieldoffsets[ns]; 7911 } 7912 7913 if (state == ARM_CP_STATE_AA32) { 7914 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 7915 /* If the register is banked then we don't need to migrate or 7916 * reset the 32-bit instance in certain cases: 7917 * 7918 * 1) If the register has both 32-bit and 64-bit instances then we 7919 * can count on the 64-bit instance taking care of the 7920 * non-secure bank. 7921 * 2) If ARMv8 is enabled then we can count on a 64-bit version 7922 * taking care of the secure bank. This requires that separate 7923 * 32 and 64-bit definitions are provided. 7924 */ 7925 if ((r->state == ARM_CP_STATE_BOTH && ns) || 7926 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 7927 r2->type |= ARM_CP_ALIAS; 7928 } 7929 } else if ((secstate != r->secure) && !ns) { 7930 /* The register is not banked so we only want to allow migration of 7931 * the non-secure instance. 7932 */ 7933 r2->type |= ARM_CP_ALIAS; 7934 } 7935 7936 if (r->state == ARM_CP_STATE_BOTH) { 7937 /* We assume it is a cp15 register if the .cp field is left unset. 7938 */ 7939 if (r2->cp == 0) { 7940 r2->cp = 15; 7941 } 7942 7943 #ifdef HOST_WORDS_BIGENDIAN 7944 if (r2->fieldoffset) { 7945 r2->fieldoffset += sizeof(uint32_t); 7946 } 7947 #endif 7948 } 7949 } 7950 if (state == ARM_CP_STATE_AA64) { 7951 /* To allow abbreviation of ARMCPRegInfo 7952 * definitions, we treat cp == 0 as equivalent to 7953 * the value for "standard guest-visible sysreg". 7954 * STATE_BOTH definitions are also always "standard 7955 * sysreg" in their AArch64 view (the .cp value may 7956 * be non-zero for the benefit of the AArch32 view). 7957 */ 7958 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 7959 r2->cp = CP_REG_ARM64_SYSREG_CP; 7960 } 7961 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 7962 r2->opc0, opc1, opc2); 7963 } else { 7964 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 7965 } 7966 if (opaque) { 7967 r2->opaque = opaque; 7968 } 7969 /* reginfo passed to helpers is correct for the actual access, 7970 * and is never ARM_CP_STATE_BOTH: 7971 */ 7972 r2->state = state; 7973 /* Make sure reginfo passed to helpers for wildcarded regs 7974 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 7975 */ 7976 r2->crm = crm; 7977 r2->opc1 = opc1; 7978 r2->opc2 = opc2; 7979 /* By convention, for wildcarded registers only the first 7980 * entry is used for migration; the others are marked as 7981 * ALIAS so we don't try to transfer the register 7982 * multiple times. Special registers (ie NOP/WFI) are 7983 * never migratable and not even raw-accessible. 7984 */ 7985 if ((r->type & ARM_CP_SPECIAL)) { 7986 r2->type |= ARM_CP_NO_RAW; 7987 } 7988 if (((r->crm == CP_ANY) && crm != 0) || 7989 ((r->opc1 == CP_ANY) && opc1 != 0) || 7990 ((r->opc2 == CP_ANY) && opc2 != 0)) { 7991 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 7992 } 7993 7994 /* Check that raw accesses are either forbidden or handled. Note that 7995 * we can't assert this earlier because the setup of fieldoffset for 7996 * banked registers has to be done first. 7997 */ 7998 if (!(r2->type & ARM_CP_NO_RAW)) { 7999 assert(!raw_accessors_invalid(r2)); 8000 } 8001 8002 /* Overriding of an existing definition must be explicitly 8003 * requested. 8004 */ 8005 if (!(r->type & ARM_CP_OVERRIDE)) { 8006 ARMCPRegInfo *oldreg; 8007 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 8008 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 8009 fprintf(stderr, "Register redefined: cp=%d %d bit " 8010 "crn=%d crm=%d opc1=%d opc2=%d, " 8011 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 8012 r2->crn, r2->crm, r2->opc1, r2->opc2, 8013 oldreg->name, r2->name); 8014 g_assert_not_reached(); 8015 } 8016 } 8017 g_hash_table_insert(cpu->cp_regs, key, r2); 8018 } 8019 8020 8021 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 8022 const ARMCPRegInfo *r, void *opaque) 8023 { 8024 /* Define implementations of coprocessor registers. 8025 * We store these in a hashtable because typically 8026 * there are less than 150 registers in a space which 8027 * is 16*16*16*8*8 = 262144 in size. 8028 * Wildcarding is supported for the crm, opc1 and opc2 fields. 8029 * If a register is defined twice then the second definition is 8030 * used, so this can be used to define some generic registers and 8031 * then override them with implementation specific variations. 8032 * At least one of the original and the second definition should 8033 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 8034 * against accidental use. 8035 * 8036 * The state field defines whether the register is to be 8037 * visible in the AArch32 or AArch64 execution state. If the 8038 * state is set to ARM_CP_STATE_BOTH then we synthesise a 8039 * reginfo structure for the AArch32 view, which sees the lower 8040 * 32 bits of the 64 bit register. 8041 * 8042 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 8043 * be wildcarded. AArch64 registers are always considered to be 64 8044 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 8045 * the register, if any. 8046 */ 8047 int crm, opc1, opc2, state; 8048 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 8049 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 8050 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 8051 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 8052 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 8053 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 8054 /* 64 bit registers have only CRm and Opc1 fields */ 8055 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 8056 /* op0 only exists in the AArch64 encodings */ 8057 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 8058 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 8059 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 8060 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 8061 * encodes a minimum access level for the register. We roll this 8062 * runtime check into our general permission check code, so check 8063 * here that the reginfo's specified permissions are strict enough 8064 * to encompass the generic architectural permission check. 8065 */ 8066 if (r->state != ARM_CP_STATE_AA32) { 8067 int mask = 0; 8068 switch (r->opc1) { 8069 case 0: 8070 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 8071 mask = PL0U_R | PL1_RW; 8072 break; 8073 case 1: case 2: 8074 /* min_EL EL1 */ 8075 mask = PL1_RW; 8076 break; 8077 case 3: 8078 /* min_EL EL0 */ 8079 mask = PL0_RW; 8080 break; 8081 case 4: 8082 case 5: 8083 /* min_EL EL2 */ 8084 mask = PL2_RW; 8085 break; 8086 case 6: 8087 /* min_EL EL3 */ 8088 mask = PL3_RW; 8089 break; 8090 case 7: 8091 /* min_EL EL1, secure mode only (we don't check the latter) */ 8092 mask = PL1_RW; 8093 break; 8094 default: 8095 /* broken reginfo with out-of-range opc1 */ 8096 assert(false); 8097 break; 8098 } 8099 /* assert our permissions are not too lax (stricter is fine) */ 8100 assert((r->access & ~mask) == 0); 8101 } 8102 8103 /* Check that the register definition has enough info to handle 8104 * reads and writes if they are permitted. 8105 */ 8106 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 8107 if (r->access & PL3_R) { 8108 assert((r->fieldoffset || 8109 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8110 r->readfn); 8111 } 8112 if (r->access & PL3_W) { 8113 assert((r->fieldoffset || 8114 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8115 r->writefn); 8116 } 8117 } 8118 /* Bad type field probably means missing sentinel at end of reg list */ 8119 assert(cptype_valid(r->type)); 8120 for (crm = crmmin; crm <= crmmax; crm++) { 8121 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 8122 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 8123 for (state = ARM_CP_STATE_AA32; 8124 state <= ARM_CP_STATE_AA64; state++) { 8125 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 8126 continue; 8127 } 8128 if (state == ARM_CP_STATE_AA32) { 8129 /* Under AArch32 CP registers can be common 8130 * (same for secure and non-secure world) or banked. 8131 */ 8132 char *name; 8133 8134 switch (r->secure) { 8135 case ARM_CP_SECSTATE_S: 8136 case ARM_CP_SECSTATE_NS: 8137 add_cpreg_to_hashtable(cpu, r, opaque, state, 8138 r->secure, crm, opc1, opc2, 8139 r->name); 8140 break; 8141 default: 8142 name = g_strdup_printf("%s_S", r->name); 8143 add_cpreg_to_hashtable(cpu, r, opaque, state, 8144 ARM_CP_SECSTATE_S, 8145 crm, opc1, opc2, name); 8146 g_free(name); 8147 add_cpreg_to_hashtable(cpu, r, opaque, state, 8148 ARM_CP_SECSTATE_NS, 8149 crm, opc1, opc2, r->name); 8150 break; 8151 } 8152 } else { 8153 /* AArch64 registers get mapped to non-secure instance 8154 * of AArch32 */ 8155 add_cpreg_to_hashtable(cpu, r, opaque, state, 8156 ARM_CP_SECSTATE_NS, 8157 crm, opc1, opc2, r->name); 8158 } 8159 } 8160 } 8161 } 8162 } 8163 } 8164 8165 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 8166 const ARMCPRegInfo *regs, void *opaque) 8167 { 8168 /* Define a whole list of registers */ 8169 const ARMCPRegInfo *r; 8170 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 8171 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 8172 } 8173 } 8174 8175 /* 8176 * Modify ARMCPRegInfo for access from userspace. 8177 * 8178 * This is a data driven modification directed by 8179 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 8180 * user-space cannot alter any values and dynamic values pertaining to 8181 * execution state are hidden from user space view anyway. 8182 */ 8183 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) 8184 { 8185 const ARMCPRegUserSpaceInfo *m; 8186 ARMCPRegInfo *r; 8187 8188 for (m = mods; m->name; m++) { 8189 GPatternSpec *pat = NULL; 8190 if (m->is_glob) { 8191 pat = g_pattern_spec_new(m->name); 8192 } 8193 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 8194 if (pat && g_pattern_match_string(pat, r->name)) { 8195 r->type = ARM_CP_CONST; 8196 r->access = PL0U_R; 8197 r->resetvalue = 0; 8198 /* continue */ 8199 } else if (strcmp(r->name, m->name) == 0) { 8200 r->type = ARM_CP_CONST; 8201 r->access = PL0U_R; 8202 r->resetvalue &= m->exported_bits; 8203 r->resetvalue |= m->fixed_bits; 8204 break; 8205 } 8206 } 8207 if (pat) { 8208 g_pattern_spec_free(pat); 8209 } 8210 } 8211 } 8212 8213 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 8214 { 8215 return g_hash_table_lookup(cpregs, &encoded_cp); 8216 } 8217 8218 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 8219 uint64_t value) 8220 { 8221 /* Helper coprocessor write function for write-ignore registers */ 8222 } 8223 8224 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 8225 { 8226 /* Helper coprocessor write function for read-as-zero registers */ 8227 return 0; 8228 } 8229 8230 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 8231 { 8232 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 8233 } 8234 8235 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 8236 { 8237 /* Return true if it is not valid for us to switch to 8238 * this CPU mode (ie all the UNPREDICTABLE cases in 8239 * the ARM ARM CPSRWriteByInstr pseudocode). 8240 */ 8241 8242 /* Changes to or from Hyp via MSR and CPS are illegal. */ 8243 if (write_type == CPSRWriteByInstr && 8244 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 8245 mode == ARM_CPU_MODE_HYP)) { 8246 return 1; 8247 } 8248 8249 switch (mode) { 8250 case ARM_CPU_MODE_USR: 8251 return 0; 8252 case ARM_CPU_MODE_SYS: 8253 case ARM_CPU_MODE_SVC: 8254 case ARM_CPU_MODE_ABT: 8255 case ARM_CPU_MODE_UND: 8256 case ARM_CPU_MODE_IRQ: 8257 case ARM_CPU_MODE_FIQ: 8258 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 8259 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 8260 */ 8261 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 8262 * and CPS are treated as illegal mode changes. 8263 */ 8264 if (write_type == CPSRWriteByInstr && 8265 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 8266 (arm_hcr_el2_eff(env) & HCR_TGE)) { 8267 return 1; 8268 } 8269 return 0; 8270 case ARM_CPU_MODE_HYP: 8271 return !arm_feature(env, ARM_FEATURE_EL2) 8272 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); 8273 case ARM_CPU_MODE_MON: 8274 return arm_current_el(env) < 3; 8275 default: 8276 return 1; 8277 } 8278 } 8279 8280 uint32_t cpsr_read(CPUARMState *env) 8281 { 8282 int ZF; 8283 ZF = (env->ZF == 0); 8284 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 8285 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 8286 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 8287 | ((env->condexec_bits & 0xfc) << 8) 8288 | (env->GE << 16) | (env->daif & CPSR_AIF); 8289 } 8290 8291 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 8292 CPSRWriteType write_type) 8293 { 8294 uint32_t changed_daif; 8295 8296 if (mask & CPSR_NZCV) { 8297 env->ZF = (~val) & CPSR_Z; 8298 env->NF = val; 8299 env->CF = (val >> 29) & 1; 8300 env->VF = (val << 3) & 0x80000000; 8301 } 8302 if (mask & CPSR_Q) 8303 env->QF = ((val & CPSR_Q) != 0); 8304 if (mask & CPSR_T) 8305 env->thumb = ((val & CPSR_T) != 0); 8306 if (mask & CPSR_IT_0_1) { 8307 env->condexec_bits &= ~3; 8308 env->condexec_bits |= (val >> 25) & 3; 8309 } 8310 if (mask & CPSR_IT_2_7) { 8311 env->condexec_bits &= 3; 8312 env->condexec_bits |= (val >> 8) & 0xfc; 8313 } 8314 if (mask & CPSR_GE) { 8315 env->GE = (val >> 16) & 0xf; 8316 } 8317 8318 /* In a V7 implementation that includes the security extensions but does 8319 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 8320 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 8321 * bits respectively. 8322 * 8323 * In a V8 implementation, it is permitted for privileged software to 8324 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 8325 */ 8326 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 8327 arm_feature(env, ARM_FEATURE_EL3) && 8328 !arm_feature(env, ARM_FEATURE_EL2) && 8329 !arm_is_secure(env)) { 8330 8331 changed_daif = (env->daif ^ val) & mask; 8332 8333 if (changed_daif & CPSR_A) { 8334 /* Check to see if we are allowed to change the masking of async 8335 * abort exceptions from a non-secure state. 8336 */ 8337 if (!(env->cp15.scr_el3 & SCR_AW)) { 8338 qemu_log_mask(LOG_GUEST_ERROR, 8339 "Ignoring attempt to switch CPSR_A flag from " 8340 "non-secure world with SCR.AW bit clear\n"); 8341 mask &= ~CPSR_A; 8342 } 8343 } 8344 8345 if (changed_daif & CPSR_F) { 8346 /* Check to see if we are allowed to change the masking of FIQ 8347 * exceptions from a non-secure state. 8348 */ 8349 if (!(env->cp15.scr_el3 & SCR_FW)) { 8350 qemu_log_mask(LOG_GUEST_ERROR, 8351 "Ignoring attempt to switch CPSR_F flag from " 8352 "non-secure world with SCR.FW bit clear\n"); 8353 mask &= ~CPSR_F; 8354 } 8355 8356 /* Check whether non-maskable FIQ (NMFI) support is enabled. 8357 * If this bit is set software is not allowed to mask 8358 * FIQs, but is allowed to set CPSR_F to 0. 8359 */ 8360 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 8361 (val & CPSR_F)) { 8362 qemu_log_mask(LOG_GUEST_ERROR, 8363 "Ignoring attempt to enable CPSR_F flag " 8364 "(non-maskable FIQ [NMFI] support enabled)\n"); 8365 mask &= ~CPSR_F; 8366 } 8367 } 8368 } 8369 8370 env->daif &= ~(CPSR_AIF & mask); 8371 env->daif |= val & CPSR_AIF & mask; 8372 8373 if (write_type != CPSRWriteRaw && 8374 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 8375 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 8376 /* Note that we can only get here in USR mode if this is a 8377 * gdb stub write; for this case we follow the architectural 8378 * behaviour for guest writes in USR mode of ignoring an attempt 8379 * to switch mode. (Those are caught by translate.c for writes 8380 * triggered by guest instructions.) 8381 */ 8382 mask &= ~CPSR_M; 8383 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 8384 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 8385 * v7, and has defined behaviour in v8: 8386 * + leave CPSR.M untouched 8387 * + allow changes to the other CPSR fields 8388 * + set PSTATE.IL 8389 * For user changes via the GDB stub, we don't set PSTATE.IL, 8390 * as this would be unnecessarily harsh for a user error. 8391 */ 8392 mask &= ~CPSR_M; 8393 if (write_type != CPSRWriteByGDBStub && 8394 arm_feature(env, ARM_FEATURE_V8)) { 8395 mask |= CPSR_IL; 8396 val |= CPSR_IL; 8397 } 8398 qemu_log_mask(LOG_GUEST_ERROR, 8399 "Illegal AArch32 mode switch attempt from %s to %s\n", 8400 aarch32_mode_name(env->uncached_cpsr), 8401 aarch32_mode_name(val)); 8402 } else { 8403 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 8404 write_type == CPSRWriteExceptionReturn ? 8405 "Exception return from AArch32" : 8406 "AArch32 mode switch from", 8407 aarch32_mode_name(env->uncached_cpsr), 8408 aarch32_mode_name(val), env->regs[15]); 8409 switch_mode(env, val & CPSR_M); 8410 } 8411 } 8412 mask &= ~CACHED_CPSR_BITS; 8413 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 8414 } 8415 8416 /* Sign/zero extend */ 8417 uint32_t HELPER(sxtb16)(uint32_t x) 8418 { 8419 uint32_t res; 8420 res = (uint16_t)(int8_t)x; 8421 res |= (uint32_t)(int8_t)(x >> 16) << 16; 8422 return res; 8423 } 8424 8425 uint32_t HELPER(uxtb16)(uint32_t x) 8426 { 8427 uint32_t res; 8428 res = (uint16_t)(uint8_t)x; 8429 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 8430 return res; 8431 } 8432 8433 int32_t HELPER(sdiv)(int32_t num, int32_t den) 8434 { 8435 if (den == 0) 8436 return 0; 8437 if (num == INT_MIN && den == -1) 8438 return INT_MIN; 8439 return num / den; 8440 } 8441 8442 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 8443 { 8444 if (den == 0) 8445 return 0; 8446 return num / den; 8447 } 8448 8449 uint32_t HELPER(rbit)(uint32_t x) 8450 { 8451 return revbit32(x); 8452 } 8453 8454 #ifdef CONFIG_USER_ONLY 8455 8456 static void switch_mode(CPUARMState *env, int mode) 8457 { 8458 ARMCPU *cpu = env_archcpu(env); 8459 8460 if (mode != ARM_CPU_MODE_USR) { 8461 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 8462 } 8463 } 8464 8465 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 8466 uint32_t cur_el, bool secure) 8467 { 8468 return 1; 8469 } 8470 8471 void aarch64_sync_64_to_32(CPUARMState *env) 8472 { 8473 g_assert_not_reached(); 8474 } 8475 8476 #else 8477 8478 static void switch_mode(CPUARMState *env, int mode) 8479 { 8480 int old_mode; 8481 int i; 8482 8483 old_mode = env->uncached_cpsr & CPSR_M; 8484 if (mode == old_mode) 8485 return; 8486 8487 if (old_mode == ARM_CPU_MODE_FIQ) { 8488 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 8489 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 8490 } else if (mode == ARM_CPU_MODE_FIQ) { 8491 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 8492 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 8493 } 8494 8495 i = bank_number(old_mode); 8496 env->banked_r13[i] = env->regs[13]; 8497 env->banked_spsr[i] = env->spsr; 8498 8499 i = bank_number(mode); 8500 env->regs[13] = env->banked_r13[i]; 8501 env->spsr = env->banked_spsr[i]; 8502 8503 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 8504 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 8505 } 8506 8507 /* Physical Interrupt Target EL Lookup Table 8508 * 8509 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 8510 * 8511 * The below multi-dimensional table is used for looking up the target 8512 * exception level given numerous condition criteria. Specifically, the 8513 * target EL is based on SCR and HCR routing controls as well as the 8514 * currently executing EL and secure state. 8515 * 8516 * Dimensions: 8517 * target_el_table[2][2][2][2][2][4] 8518 * | | | | | +--- Current EL 8519 * | | | | +------ Non-secure(0)/Secure(1) 8520 * | | | +--------- HCR mask override 8521 * | | +------------ SCR exec state control 8522 * | +--------------- SCR mask override 8523 * +------------------ 32-bit(0)/64-bit(1) EL3 8524 * 8525 * The table values are as such: 8526 * 0-3 = EL0-EL3 8527 * -1 = Cannot occur 8528 * 8529 * The ARM ARM target EL table includes entries indicating that an "exception 8530 * is not taken". The two cases where this is applicable are: 8531 * 1) An exception is taken from EL3 but the SCR does not have the exception 8532 * routed to EL3. 8533 * 2) An exception is taken from EL2 but the HCR does not have the exception 8534 * routed to EL2. 8535 * In these two cases, the below table contain a target of EL1. This value is 8536 * returned as it is expected that the consumer of the table data will check 8537 * for "target EL >= current EL" to ensure the exception is not taken. 8538 * 8539 * SCR HCR 8540 * 64 EA AMO From 8541 * BIT IRQ IMO Non-secure Secure 8542 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 8543 */ 8544 static const int8_t target_el_table[2][2][2][2][2][4] = { 8545 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 8546 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 8547 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 8548 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 8549 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 8550 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 8551 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 8552 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 8553 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 8554 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 8555 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 8556 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 8557 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 8558 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 8559 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 8560 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 8561 }; 8562 8563 /* 8564 * Determine the target EL for physical exceptions 8565 */ 8566 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 8567 uint32_t cur_el, bool secure) 8568 { 8569 CPUARMState *env = cs->env_ptr; 8570 bool rw; 8571 bool scr; 8572 bool hcr; 8573 int target_el; 8574 /* Is the highest EL AArch64? */ 8575 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 8576 uint64_t hcr_el2; 8577 8578 if (arm_feature(env, ARM_FEATURE_EL3)) { 8579 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 8580 } else { 8581 /* Either EL2 is the highest EL (and so the EL2 register width 8582 * is given by is64); or there is no EL2 or EL3, in which case 8583 * the value of 'rw' does not affect the table lookup anyway. 8584 */ 8585 rw = is64; 8586 } 8587 8588 hcr_el2 = arm_hcr_el2_eff(env); 8589 switch (excp_idx) { 8590 case EXCP_IRQ: 8591 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 8592 hcr = hcr_el2 & HCR_IMO; 8593 break; 8594 case EXCP_FIQ: 8595 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 8596 hcr = hcr_el2 & HCR_FMO; 8597 break; 8598 default: 8599 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 8600 hcr = hcr_el2 & HCR_AMO; 8601 break; 8602 }; 8603 8604 /* 8605 * For these purposes, TGE and AMO/IMO/FMO both force the 8606 * interrupt to EL2. Fold TGE into the bit extracted above. 8607 */ 8608 hcr |= (hcr_el2 & HCR_TGE) != 0; 8609 8610 /* Perform a table-lookup for the target EL given the current state */ 8611 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 8612 8613 assert(target_el > 0); 8614 8615 return target_el; 8616 } 8617 8618 void arm_log_exception(int idx) 8619 { 8620 if (qemu_loglevel_mask(CPU_LOG_INT)) { 8621 const char *exc = NULL; 8622 static const char * const excnames[] = { 8623 [EXCP_UDEF] = "Undefined Instruction", 8624 [EXCP_SWI] = "SVC", 8625 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 8626 [EXCP_DATA_ABORT] = "Data Abort", 8627 [EXCP_IRQ] = "IRQ", 8628 [EXCP_FIQ] = "FIQ", 8629 [EXCP_BKPT] = "Breakpoint", 8630 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 8631 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 8632 [EXCP_HVC] = "Hypervisor Call", 8633 [EXCP_HYP_TRAP] = "Hypervisor Trap", 8634 [EXCP_SMC] = "Secure Monitor Call", 8635 [EXCP_VIRQ] = "Virtual IRQ", 8636 [EXCP_VFIQ] = "Virtual FIQ", 8637 [EXCP_SEMIHOST] = "Semihosting call", 8638 [EXCP_NOCP] = "v7M NOCP UsageFault", 8639 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 8640 [EXCP_STKOF] = "v8M STKOF UsageFault", 8641 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", 8642 [EXCP_LSERR] = "v8M LSERR UsageFault", 8643 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", 8644 }; 8645 8646 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 8647 exc = excnames[idx]; 8648 } 8649 if (!exc) { 8650 exc = "unknown"; 8651 } 8652 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 8653 } 8654 } 8655 8656 /* 8657 * Function used to synchronize QEMU's AArch64 register set with AArch32 8658 * register set. This is necessary when switching between AArch32 and AArch64 8659 * execution state. 8660 */ 8661 void aarch64_sync_32_to_64(CPUARMState *env) 8662 { 8663 int i; 8664 uint32_t mode = env->uncached_cpsr & CPSR_M; 8665 8666 /* We can blanket copy R[0:7] to X[0:7] */ 8667 for (i = 0; i < 8; i++) { 8668 env->xregs[i] = env->regs[i]; 8669 } 8670 8671 /* 8672 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 8673 * Otherwise, they come from the banked user regs. 8674 */ 8675 if (mode == ARM_CPU_MODE_FIQ) { 8676 for (i = 8; i < 13; i++) { 8677 env->xregs[i] = env->usr_regs[i - 8]; 8678 } 8679 } else { 8680 for (i = 8; i < 13; i++) { 8681 env->xregs[i] = env->regs[i]; 8682 } 8683 } 8684 8685 /* 8686 * Registers x13-x23 are the various mode SP and FP registers. Registers 8687 * r13 and r14 are only copied if we are in that mode, otherwise we copy 8688 * from the mode banked register. 8689 */ 8690 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8691 env->xregs[13] = env->regs[13]; 8692 env->xregs[14] = env->regs[14]; 8693 } else { 8694 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 8695 /* HYP is an exception in that it is copied from r14 */ 8696 if (mode == ARM_CPU_MODE_HYP) { 8697 env->xregs[14] = env->regs[14]; 8698 } else { 8699 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 8700 } 8701 } 8702 8703 if (mode == ARM_CPU_MODE_HYP) { 8704 env->xregs[15] = env->regs[13]; 8705 } else { 8706 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 8707 } 8708 8709 if (mode == ARM_CPU_MODE_IRQ) { 8710 env->xregs[16] = env->regs[14]; 8711 env->xregs[17] = env->regs[13]; 8712 } else { 8713 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 8714 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 8715 } 8716 8717 if (mode == ARM_CPU_MODE_SVC) { 8718 env->xregs[18] = env->regs[14]; 8719 env->xregs[19] = env->regs[13]; 8720 } else { 8721 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 8722 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 8723 } 8724 8725 if (mode == ARM_CPU_MODE_ABT) { 8726 env->xregs[20] = env->regs[14]; 8727 env->xregs[21] = env->regs[13]; 8728 } else { 8729 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 8730 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 8731 } 8732 8733 if (mode == ARM_CPU_MODE_UND) { 8734 env->xregs[22] = env->regs[14]; 8735 env->xregs[23] = env->regs[13]; 8736 } else { 8737 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 8738 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 8739 } 8740 8741 /* 8742 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 8743 * mode, then we can copy from r8-r14. Otherwise, we copy from the 8744 * FIQ bank for r8-r14. 8745 */ 8746 if (mode == ARM_CPU_MODE_FIQ) { 8747 for (i = 24; i < 31; i++) { 8748 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 8749 } 8750 } else { 8751 for (i = 24; i < 29; i++) { 8752 env->xregs[i] = env->fiq_regs[i - 24]; 8753 } 8754 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 8755 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 8756 } 8757 8758 env->pc = env->regs[15]; 8759 } 8760 8761 /* 8762 * Function used to synchronize QEMU's AArch32 register set with AArch64 8763 * register set. This is necessary when switching between AArch32 and AArch64 8764 * execution state. 8765 */ 8766 void aarch64_sync_64_to_32(CPUARMState *env) 8767 { 8768 int i; 8769 uint32_t mode = env->uncached_cpsr & CPSR_M; 8770 8771 /* We can blanket copy X[0:7] to R[0:7] */ 8772 for (i = 0; i < 8; i++) { 8773 env->regs[i] = env->xregs[i]; 8774 } 8775 8776 /* 8777 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 8778 * Otherwise, we copy x8-x12 into the banked user regs. 8779 */ 8780 if (mode == ARM_CPU_MODE_FIQ) { 8781 for (i = 8; i < 13; i++) { 8782 env->usr_regs[i - 8] = env->xregs[i]; 8783 } 8784 } else { 8785 for (i = 8; i < 13; i++) { 8786 env->regs[i] = env->xregs[i]; 8787 } 8788 } 8789 8790 /* 8791 * Registers r13 & r14 depend on the current mode. 8792 * If we are in a given mode, we copy the corresponding x registers to r13 8793 * and r14. Otherwise, we copy the x register to the banked r13 and r14 8794 * for the mode. 8795 */ 8796 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8797 env->regs[13] = env->xregs[13]; 8798 env->regs[14] = env->xregs[14]; 8799 } else { 8800 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 8801 8802 /* 8803 * HYP is an exception in that it does not have its own banked r14 but 8804 * shares the USR r14 8805 */ 8806 if (mode == ARM_CPU_MODE_HYP) { 8807 env->regs[14] = env->xregs[14]; 8808 } else { 8809 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 8810 } 8811 } 8812 8813 if (mode == ARM_CPU_MODE_HYP) { 8814 env->regs[13] = env->xregs[15]; 8815 } else { 8816 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 8817 } 8818 8819 if (mode == ARM_CPU_MODE_IRQ) { 8820 env->regs[14] = env->xregs[16]; 8821 env->regs[13] = env->xregs[17]; 8822 } else { 8823 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 8824 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 8825 } 8826 8827 if (mode == ARM_CPU_MODE_SVC) { 8828 env->regs[14] = env->xregs[18]; 8829 env->regs[13] = env->xregs[19]; 8830 } else { 8831 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 8832 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 8833 } 8834 8835 if (mode == ARM_CPU_MODE_ABT) { 8836 env->regs[14] = env->xregs[20]; 8837 env->regs[13] = env->xregs[21]; 8838 } else { 8839 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 8840 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 8841 } 8842 8843 if (mode == ARM_CPU_MODE_UND) { 8844 env->regs[14] = env->xregs[22]; 8845 env->regs[13] = env->xregs[23]; 8846 } else { 8847 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 8848 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 8849 } 8850 8851 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 8852 * mode, then we can copy to r8-r14. Otherwise, we copy to the 8853 * FIQ bank for r8-r14. 8854 */ 8855 if (mode == ARM_CPU_MODE_FIQ) { 8856 for (i = 24; i < 31; i++) { 8857 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 8858 } 8859 } else { 8860 for (i = 24; i < 29; i++) { 8861 env->fiq_regs[i - 24] = env->xregs[i]; 8862 } 8863 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 8864 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 8865 } 8866 8867 env->regs[15] = env->pc; 8868 } 8869 8870 static void take_aarch32_exception(CPUARMState *env, int new_mode, 8871 uint32_t mask, uint32_t offset, 8872 uint32_t newpc) 8873 { 8874 int new_el; 8875 8876 /* Change the CPU state so as to actually take the exception. */ 8877 switch_mode(env, new_mode); 8878 new_el = arm_current_el(env); 8879 8880 /* 8881 * For exceptions taken to AArch32 we must clear the SS bit in both 8882 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 8883 */ 8884 env->uncached_cpsr &= ~PSTATE_SS; 8885 env->spsr = cpsr_read(env); 8886 /* Clear IT bits. */ 8887 env->condexec_bits = 0; 8888 /* Switch to the new mode, and to the correct instruction set. */ 8889 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 8890 /* Set new mode endianness */ 8891 env->uncached_cpsr &= ~CPSR_E; 8892 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { 8893 env->uncached_cpsr |= CPSR_E; 8894 } 8895 /* J and IL must always be cleared for exception entry */ 8896 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 8897 env->daif |= mask; 8898 8899 if (new_mode == ARM_CPU_MODE_HYP) { 8900 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 8901 env->elr_el[2] = env->regs[15]; 8902 } else { 8903 /* CPSR.PAN is normally preserved preserved unless... */ 8904 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { 8905 switch (new_el) { 8906 case 3: 8907 if (!arm_is_secure_below_el3(env)) { 8908 /* ... the target is EL3, from non-secure state. */ 8909 env->uncached_cpsr &= ~CPSR_PAN; 8910 break; 8911 } 8912 /* ... the target is EL3, from secure state ... */ 8913 /* fall through */ 8914 case 1: 8915 /* ... the target is EL1 and SCTLR.SPAN is 0. */ 8916 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { 8917 env->uncached_cpsr |= CPSR_PAN; 8918 } 8919 break; 8920 } 8921 } 8922 /* 8923 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 8924 * and we should just guard the thumb mode on V4 8925 */ 8926 if (arm_feature(env, ARM_FEATURE_V4T)) { 8927 env->thumb = 8928 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 8929 } 8930 env->regs[14] = env->regs[15] + offset; 8931 } 8932 env->regs[15] = newpc; 8933 arm_rebuild_hflags(env); 8934 } 8935 8936 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 8937 { 8938 /* 8939 * Handle exception entry to Hyp mode; this is sufficiently 8940 * different to entry to other AArch32 modes that we handle it 8941 * separately here. 8942 * 8943 * The vector table entry used is always the 0x14 Hyp mode entry point, 8944 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. 8945 * The offset applied to the preferred return address is always zero 8946 * (see DDI0487C.a section G1.12.3). 8947 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 8948 */ 8949 uint32_t addr, mask; 8950 ARMCPU *cpu = ARM_CPU(cs); 8951 CPUARMState *env = &cpu->env; 8952 8953 switch (cs->exception_index) { 8954 case EXCP_UDEF: 8955 addr = 0x04; 8956 break; 8957 case EXCP_SWI: 8958 addr = 0x14; 8959 break; 8960 case EXCP_BKPT: 8961 /* Fall through to prefetch abort. */ 8962 case EXCP_PREFETCH_ABORT: 8963 env->cp15.ifar_s = env->exception.vaddress; 8964 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 8965 (uint32_t)env->exception.vaddress); 8966 addr = 0x0c; 8967 break; 8968 case EXCP_DATA_ABORT: 8969 env->cp15.dfar_s = env->exception.vaddress; 8970 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 8971 (uint32_t)env->exception.vaddress); 8972 addr = 0x10; 8973 break; 8974 case EXCP_IRQ: 8975 addr = 0x18; 8976 break; 8977 case EXCP_FIQ: 8978 addr = 0x1c; 8979 break; 8980 case EXCP_HVC: 8981 addr = 0x08; 8982 break; 8983 case EXCP_HYP_TRAP: 8984 addr = 0x14; 8985 break; 8986 default: 8987 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8988 } 8989 8990 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 8991 if (!arm_feature(env, ARM_FEATURE_V8)) { 8992 /* 8993 * QEMU syndrome values are v8-style. v7 has the IL bit 8994 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 8995 * If this is a v7 CPU, squash the IL bit in those cases. 8996 */ 8997 if (cs->exception_index == EXCP_PREFETCH_ABORT || 8998 (cs->exception_index == EXCP_DATA_ABORT && 8999 !(env->exception.syndrome & ARM_EL_ISV)) || 9000 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 9001 env->exception.syndrome &= ~ARM_EL_IL; 9002 } 9003 } 9004 env->cp15.esr_el[2] = env->exception.syndrome; 9005 } 9006 9007 if (arm_current_el(env) != 2 && addr < 0x14) { 9008 addr = 0x14; 9009 } 9010 9011 mask = 0; 9012 if (!(env->cp15.scr_el3 & SCR_EA)) { 9013 mask |= CPSR_A; 9014 } 9015 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 9016 mask |= CPSR_I; 9017 } 9018 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 9019 mask |= CPSR_F; 9020 } 9021 9022 addr += env->cp15.hvbar; 9023 9024 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 9025 } 9026 9027 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 9028 { 9029 ARMCPU *cpu = ARM_CPU(cs); 9030 CPUARMState *env = &cpu->env; 9031 uint32_t addr; 9032 uint32_t mask; 9033 int new_mode; 9034 uint32_t offset; 9035 uint32_t moe; 9036 9037 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 9038 switch (syn_get_ec(env->exception.syndrome)) { 9039 case EC_BREAKPOINT: 9040 case EC_BREAKPOINT_SAME_EL: 9041 moe = 1; 9042 break; 9043 case EC_WATCHPOINT: 9044 case EC_WATCHPOINT_SAME_EL: 9045 moe = 10; 9046 break; 9047 case EC_AA32_BKPT: 9048 moe = 3; 9049 break; 9050 case EC_VECTORCATCH: 9051 moe = 5; 9052 break; 9053 default: 9054 moe = 0; 9055 break; 9056 } 9057 9058 if (moe) { 9059 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 9060 } 9061 9062 if (env->exception.target_el == 2) { 9063 arm_cpu_do_interrupt_aarch32_hyp(cs); 9064 return; 9065 } 9066 9067 switch (cs->exception_index) { 9068 case EXCP_UDEF: 9069 new_mode = ARM_CPU_MODE_UND; 9070 addr = 0x04; 9071 mask = CPSR_I; 9072 if (env->thumb) 9073 offset = 2; 9074 else 9075 offset = 4; 9076 break; 9077 case EXCP_SWI: 9078 new_mode = ARM_CPU_MODE_SVC; 9079 addr = 0x08; 9080 mask = CPSR_I; 9081 /* The PC already points to the next instruction. */ 9082 offset = 0; 9083 break; 9084 case EXCP_BKPT: 9085 /* Fall through to prefetch abort. */ 9086 case EXCP_PREFETCH_ABORT: 9087 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 9088 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 9089 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 9090 env->exception.fsr, (uint32_t)env->exception.vaddress); 9091 new_mode = ARM_CPU_MODE_ABT; 9092 addr = 0x0c; 9093 mask = CPSR_A | CPSR_I; 9094 offset = 4; 9095 break; 9096 case EXCP_DATA_ABORT: 9097 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 9098 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 9099 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 9100 env->exception.fsr, 9101 (uint32_t)env->exception.vaddress); 9102 new_mode = ARM_CPU_MODE_ABT; 9103 addr = 0x10; 9104 mask = CPSR_A | CPSR_I; 9105 offset = 8; 9106 break; 9107 case EXCP_IRQ: 9108 new_mode = ARM_CPU_MODE_IRQ; 9109 addr = 0x18; 9110 /* Disable IRQ and imprecise data aborts. */ 9111 mask = CPSR_A | CPSR_I; 9112 offset = 4; 9113 if (env->cp15.scr_el3 & SCR_IRQ) { 9114 /* IRQ routed to monitor mode */ 9115 new_mode = ARM_CPU_MODE_MON; 9116 mask |= CPSR_F; 9117 } 9118 break; 9119 case EXCP_FIQ: 9120 new_mode = ARM_CPU_MODE_FIQ; 9121 addr = 0x1c; 9122 /* Disable FIQ, IRQ and imprecise data aborts. */ 9123 mask = CPSR_A | CPSR_I | CPSR_F; 9124 if (env->cp15.scr_el3 & SCR_FIQ) { 9125 /* FIQ routed to monitor mode */ 9126 new_mode = ARM_CPU_MODE_MON; 9127 } 9128 offset = 4; 9129 break; 9130 case EXCP_VIRQ: 9131 new_mode = ARM_CPU_MODE_IRQ; 9132 addr = 0x18; 9133 /* Disable IRQ and imprecise data aborts. */ 9134 mask = CPSR_A | CPSR_I; 9135 offset = 4; 9136 break; 9137 case EXCP_VFIQ: 9138 new_mode = ARM_CPU_MODE_FIQ; 9139 addr = 0x1c; 9140 /* Disable FIQ, IRQ and imprecise data aborts. */ 9141 mask = CPSR_A | CPSR_I | CPSR_F; 9142 offset = 4; 9143 break; 9144 case EXCP_SMC: 9145 new_mode = ARM_CPU_MODE_MON; 9146 addr = 0x08; 9147 mask = CPSR_A | CPSR_I | CPSR_F; 9148 offset = 0; 9149 break; 9150 default: 9151 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9152 return; /* Never happens. Keep compiler happy. */ 9153 } 9154 9155 if (new_mode == ARM_CPU_MODE_MON) { 9156 addr += env->cp15.mvbar; 9157 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 9158 /* High vectors. When enabled, base address cannot be remapped. */ 9159 addr += 0xffff0000; 9160 } else { 9161 /* ARM v7 architectures provide a vector base address register to remap 9162 * the interrupt vector table. 9163 * This register is only followed in non-monitor mode, and is banked. 9164 * Note: only bits 31:5 are valid. 9165 */ 9166 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 9167 } 9168 9169 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 9170 env->cp15.scr_el3 &= ~SCR_NS; 9171 } 9172 9173 take_aarch32_exception(env, new_mode, mask, offset, addr); 9174 } 9175 9176 /* Handle exception entry to a target EL which is using AArch64 */ 9177 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 9178 { 9179 ARMCPU *cpu = ARM_CPU(cs); 9180 CPUARMState *env = &cpu->env; 9181 unsigned int new_el = env->exception.target_el; 9182 target_ulong addr = env->cp15.vbar_el[new_el]; 9183 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 9184 unsigned int old_mode; 9185 unsigned int cur_el = arm_current_el(env); 9186 9187 /* 9188 * Note that new_el can never be 0. If cur_el is 0, then 9189 * el0_a64 is is_a64(), else el0_a64 is ignored. 9190 */ 9191 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 9192 9193 if (cur_el < new_el) { 9194 /* Entry vector offset depends on whether the implemented EL 9195 * immediately lower than the target level is using AArch32 or AArch64 9196 */ 9197 bool is_aa64; 9198 uint64_t hcr; 9199 9200 switch (new_el) { 9201 case 3: 9202 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 9203 break; 9204 case 2: 9205 hcr = arm_hcr_el2_eff(env); 9206 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 9207 is_aa64 = (hcr & HCR_RW) != 0; 9208 break; 9209 } 9210 /* fall through */ 9211 case 1: 9212 is_aa64 = is_a64(env); 9213 break; 9214 default: 9215 g_assert_not_reached(); 9216 } 9217 9218 if (is_aa64) { 9219 addr += 0x400; 9220 } else { 9221 addr += 0x600; 9222 } 9223 } else if (pstate_read(env) & PSTATE_SP) { 9224 addr += 0x200; 9225 } 9226 9227 switch (cs->exception_index) { 9228 case EXCP_PREFETCH_ABORT: 9229 case EXCP_DATA_ABORT: 9230 env->cp15.far_el[new_el] = env->exception.vaddress; 9231 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 9232 env->cp15.far_el[new_el]); 9233 /* fall through */ 9234 case EXCP_BKPT: 9235 case EXCP_UDEF: 9236 case EXCP_SWI: 9237 case EXCP_HVC: 9238 case EXCP_HYP_TRAP: 9239 case EXCP_SMC: 9240 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { 9241 /* 9242 * QEMU internal FP/SIMD syndromes from AArch32 include the 9243 * TA and coproc fields which are only exposed if the exception 9244 * is taken to AArch32 Hyp mode. Mask them out to get a valid 9245 * AArch64 format syndrome. 9246 */ 9247 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 9248 } 9249 env->cp15.esr_el[new_el] = env->exception.syndrome; 9250 break; 9251 case EXCP_IRQ: 9252 case EXCP_VIRQ: 9253 addr += 0x80; 9254 break; 9255 case EXCP_FIQ: 9256 case EXCP_VFIQ: 9257 addr += 0x100; 9258 break; 9259 default: 9260 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9261 } 9262 9263 if (is_a64(env)) { 9264 old_mode = pstate_read(env); 9265 aarch64_save_sp(env, arm_current_el(env)); 9266 env->elr_el[new_el] = env->pc; 9267 } else { 9268 old_mode = cpsr_read(env); 9269 env->elr_el[new_el] = env->regs[15]; 9270 9271 aarch64_sync_32_to_64(env); 9272 9273 env->condexec_bits = 0; 9274 } 9275 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; 9276 9277 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 9278 env->elr_el[new_el]); 9279 9280 if (cpu_isar_feature(aa64_pan, cpu)) { 9281 /* The value of PSTATE.PAN is normally preserved, except when ... */ 9282 new_mode |= old_mode & PSTATE_PAN; 9283 switch (new_el) { 9284 case 2: 9285 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ 9286 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) 9287 != (HCR_E2H | HCR_TGE)) { 9288 break; 9289 } 9290 /* fall through */ 9291 case 1: 9292 /* ... the target is EL1 ... */ 9293 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ 9294 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { 9295 new_mode |= PSTATE_PAN; 9296 } 9297 break; 9298 } 9299 } 9300 9301 pstate_write(env, PSTATE_DAIF | new_mode); 9302 env->aarch64 = 1; 9303 aarch64_restore_sp(env, new_el); 9304 helper_rebuild_hflags_a64(env, new_el); 9305 9306 env->pc = addr; 9307 9308 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 9309 new_el, env->pc, pstate_read(env)); 9310 } 9311 9312 /* 9313 * Do semihosting call and set the appropriate return value. All the 9314 * permission and validity checks have been done at translate time. 9315 * 9316 * We only see semihosting exceptions in TCG only as they are not 9317 * trapped to the hypervisor in KVM. 9318 */ 9319 #ifdef CONFIG_TCG 9320 static void handle_semihosting(CPUState *cs) 9321 { 9322 ARMCPU *cpu = ARM_CPU(cs); 9323 CPUARMState *env = &cpu->env; 9324 9325 if (is_a64(env)) { 9326 qemu_log_mask(CPU_LOG_INT, 9327 "...handling as semihosting call 0x%" PRIx64 "\n", 9328 env->xregs[0]); 9329 env->xregs[0] = do_arm_semihosting(env); 9330 env->pc += 4; 9331 } else { 9332 qemu_log_mask(CPU_LOG_INT, 9333 "...handling as semihosting call 0x%x\n", 9334 env->regs[0]); 9335 env->regs[0] = do_arm_semihosting(env); 9336 env->regs[15] += env->thumb ? 2 : 4; 9337 } 9338 } 9339 #endif 9340 9341 /* Handle a CPU exception for A and R profile CPUs. 9342 * Do any appropriate logging, handle PSCI calls, and then hand off 9343 * to the AArch64-entry or AArch32-entry function depending on the 9344 * target exception level's register width. 9345 */ 9346 void arm_cpu_do_interrupt(CPUState *cs) 9347 { 9348 ARMCPU *cpu = ARM_CPU(cs); 9349 CPUARMState *env = &cpu->env; 9350 unsigned int new_el = env->exception.target_el; 9351 9352 assert(!arm_feature(env, ARM_FEATURE_M)); 9353 9354 arm_log_exception(cs->exception_index); 9355 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 9356 new_el); 9357 if (qemu_loglevel_mask(CPU_LOG_INT) 9358 && !excp_is_internal(cs->exception_index)) { 9359 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 9360 syn_get_ec(env->exception.syndrome), 9361 env->exception.syndrome); 9362 } 9363 9364 if (arm_is_psci_call(cpu, cs->exception_index)) { 9365 arm_handle_psci_call(cpu); 9366 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 9367 return; 9368 } 9369 9370 /* 9371 * Semihosting semantics depend on the register width of the code 9372 * that caused the exception, not the target exception level, so 9373 * must be handled here. 9374 */ 9375 #ifdef CONFIG_TCG 9376 if (cs->exception_index == EXCP_SEMIHOST) { 9377 handle_semihosting(cs); 9378 return; 9379 } 9380 #endif 9381 9382 /* Hooks may change global state so BQL should be held, also the 9383 * BQL needs to be held for any modification of 9384 * cs->interrupt_request. 9385 */ 9386 g_assert(qemu_mutex_iothread_locked()); 9387 9388 arm_call_pre_el_change_hook(cpu); 9389 9390 assert(!excp_is_internal(cs->exception_index)); 9391 if (arm_el_is_aa64(env, new_el)) { 9392 arm_cpu_do_interrupt_aarch64(cs); 9393 } else { 9394 arm_cpu_do_interrupt_aarch32(cs); 9395 } 9396 9397 arm_call_el_change_hook(cpu); 9398 9399 if (!kvm_enabled()) { 9400 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 9401 } 9402 } 9403 #endif /* !CONFIG_USER_ONLY */ 9404 9405 /* Return the exception level which controls this address translation regime */ 9406 static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 9407 { 9408 switch (mmu_idx) { 9409 case ARMMMUIdx_E20_0: 9410 case ARMMMUIdx_E20_2: 9411 case ARMMMUIdx_E20_2_PAN: 9412 case ARMMMUIdx_Stage2: 9413 case ARMMMUIdx_E2: 9414 return 2; 9415 case ARMMMUIdx_SE3: 9416 return 3; 9417 case ARMMMUIdx_SE10_0: 9418 return arm_el_is_aa64(env, 3) ? 1 : 3; 9419 case ARMMMUIdx_SE10_1: 9420 case ARMMMUIdx_SE10_1_PAN: 9421 case ARMMMUIdx_Stage1_E0: 9422 case ARMMMUIdx_Stage1_E1: 9423 case ARMMMUIdx_Stage1_E1_PAN: 9424 case ARMMMUIdx_E10_0: 9425 case ARMMMUIdx_E10_1: 9426 case ARMMMUIdx_E10_1_PAN: 9427 case ARMMMUIdx_MPrivNegPri: 9428 case ARMMMUIdx_MUserNegPri: 9429 case ARMMMUIdx_MPriv: 9430 case ARMMMUIdx_MUser: 9431 case ARMMMUIdx_MSPrivNegPri: 9432 case ARMMMUIdx_MSUserNegPri: 9433 case ARMMMUIdx_MSPriv: 9434 case ARMMMUIdx_MSUser: 9435 return 1; 9436 default: 9437 g_assert_not_reached(); 9438 } 9439 } 9440 9441 uint64_t arm_sctlr(CPUARMState *env, int el) 9442 { 9443 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ 9444 if (el == 0) { 9445 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); 9446 el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1); 9447 } 9448 return env->cp15.sctlr_el[el]; 9449 } 9450 9451 /* Return the SCTLR value which controls this address translation regime */ 9452 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 9453 { 9454 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 9455 } 9456 9457 #ifndef CONFIG_USER_ONLY 9458 9459 /* Return true if the specified stage of address translation is disabled */ 9460 static inline bool regime_translation_disabled(CPUARMState *env, 9461 ARMMMUIdx mmu_idx) 9462 { 9463 if (arm_feature(env, ARM_FEATURE_M)) { 9464 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 9465 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 9466 case R_V7M_MPU_CTRL_ENABLE_MASK: 9467 /* Enabled, but not for HardFault and NMI */ 9468 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 9469 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 9470 /* Enabled for all cases */ 9471 return false; 9472 case 0: 9473 default: 9474 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 9475 * we warned about that in armv7m_nvic.c when the guest set it. 9476 */ 9477 return true; 9478 } 9479 } 9480 9481 if (mmu_idx == ARMMMUIdx_Stage2) { 9482 /* HCR.DC means HCR.VM behaves as 1 */ 9483 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; 9484 } 9485 9486 if (env->cp15.hcr_el2 & HCR_TGE) { 9487 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 9488 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 9489 return true; 9490 } 9491 } 9492 9493 if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 9494 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 9495 return true; 9496 } 9497 9498 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 9499 } 9500 9501 static inline bool regime_translation_big_endian(CPUARMState *env, 9502 ARMMMUIdx mmu_idx) 9503 { 9504 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 9505 } 9506 9507 /* Return the TTBR associated with this translation regime */ 9508 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 9509 int ttbrn) 9510 { 9511 if (mmu_idx == ARMMMUIdx_Stage2) { 9512 return env->cp15.vttbr_el2; 9513 } 9514 if (ttbrn == 0) { 9515 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 9516 } else { 9517 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 9518 } 9519 } 9520 9521 #endif /* !CONFIG_USER_ONLY */ 9522 9523 /* Return the TCR controlling this translation regime */ 9524 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 9525 { 9526 if (mmu_idx == ARMMMUIdx_Stage2) { 9527 return &env->cp15.vtcr_el2; 9528 } 9529 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 9530 } 9531 9532 /* Convert a possible stage1+2 MMU index into the appropriate 9533 * stage 1 MMU index 9534 */ 9535 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 9536 { 9537 switch (mmu_idx) { 9538 case ARMMMUIdx_E10_0: 9539 return ARMMMUIdx_Stage1_E0; 9540 case ARMMMUIdx_E10_1: 9541 return ARMMMUIdx_Stage1_E1; 9542 case ARMMMUIdx_E10_1_PAN: 9543 return ARMMMUIdx_Stage1_E1_PAN; 9544 default: 9545 return mmu_idx; 9546 } 9547 } 9548 9549 /* Return true if the translation regime is using LPAE format page tables */ 9550 static inline bool regime_using_lpae_format(CPUARMState *env, 9551 ARMMMUIdx mmu_idx) 9552 { 9553 int el = regime_el(env, mmu_idx); 9554 if (el == 2 || arm_el_is_aa64(env, el)) { 9555 return true; 9556 } 9557 if (arm_feature(env, ARM_FEATURE_LPAE) 9558 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 9559 return true; 9560 } 9561 return false; 9562 } 9563 9564 /* Returns true if the stage 1 translation regime is using LPAE format page 9565 * tables. Used when raising alignment exceptions, whose FSR changes depending 9566 * on whether the long or short descriptor format is in use. */ 9567 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 9568 { 9569 mmu_idx = stage_1_mmu_idx(mmu_idx); 9570 9571 return regime_using_lpae_format(env, mmu_idx); 9572 } 9573 9574 #ifndef CONFIG_USER_ONLY 9575 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 9576 { 9577 switch (mmu_idx) { 9578 case ARMMMUIdx_SE10_0: 9579 case ARMMMUIdx_E20_0: 9580 case ARMMMUIdx_Stage1_E0: 9581 case ARMMMUIdx_MUser: 9582 case ARMMMUIdx_MSUser: 9583 case ARMMMUIdx_MUserNegPri: 9584 case ARMMMUIdx_MSUserNegPri: 9585 return true; 9586 default: 9587 return false; 9588 case ARMMMUIdx_E10_0: 9589 case ARMMMUIdx_E10_1: 9590 case ARMMMUIdx_E10_1_PAN: 9591 g_assert_not_reached(); 9592 } 9593 } 9594 9595 /* Translate section/page access permissions to page 9596 * R/W protection flags 9597 * 9598 * @env: CPUARMState 9599 * @mmu_idx: MMU index indicating required translation regime 9600 * @ap: The 3-bit access permissions (AP[2:0]) 9601 * @domain_prot: The 2-bit domain access permissions 9602 */ 9603 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 9604 int ap, int domain_prot) 9605 { 9606 bool is_user = regime_is_user(env, mmu_idx); 9607 9608 if (domain_prot == 3) { 9609 return PAGE_READ | PAGE_WRITE; 9610 } 9611 9612 switch (ap) { 9613 case 0: 9614 if (arm_feature(env, ARM_FEATURE_V7)) { 9615 return 0; 9616 } 9617 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 9618 case SCTLR_S: 9619 return is_user ? 0 : PAGE_READ; 9620 case SCTLR_R: 9621 return PAGE_READ; 9622 default: 9623 return 0; 9624 } 9625 case 1: 9626 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9627 case 2: 9628 if (is_user) { 9629 return PAGE_READ; 9630 } else { 9631 return PAGE_READ | PAGE_WRITE; 9632 } 9633 case 3: 9634 return PAGE_READ | PAGE_WRITE; 9635 case 4: /* Reserved. */ 9636 return 0; 9637 case 5: 9638 return is_user ? 0 : PAGE_READ; 9639 case 6: 9640 return PAGE_READ; 9641 case 7: 9642 if (!arm_feature(env, ARM_FEATURE_V6K)) { 9643 return 0; 9644 } 9645 return PAGE_READ; 9646 default: 9647 g_assert_not_reached(); 9648 } 9649 } 9650 9651 /* Translate section/page access permissions to page 9652 * R/W protection flags. 9653 * 9654 * @ap: The 2-bit simple AP (AP[2:1]) 9655 * @is_user: TRUE if accessing from PL0 9656 */ 9657 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 9658 { 9659 switch (ap) { 9660 case 0: 9661 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9662 case 1: 9663 return PAGE_READ | PAGE_WRITE; 9664 case 2: 9665 return is_user ? 0 : PAGE_READ; 9666 case 3: 9667 return PAGE_READ; 9668 default: 9669 g_assert_not_reached(); 9670 } 9671 } 9672 9673 static inline int 9674 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 9675 { 9676 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 9677 } 9678 9679 /* Translate S2 section/page access permissions to protection flags 9680 * 9681 * @env: CPUARMState 9682 * @s2ap: The 2-bit stage2 access permissions (S2AP) 9683 * @xn: XN (execute-never) bit 9684 */ 9685 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 9686 { 9687 int prot = 0; 9688 9689 if (s2ap & 1) { 9690 prot |= PAGE_READ; 9691 } 9692 if (s2ap & 2) { 9693 prot |= PAGE_WRITE; 9694 } 9695 if (!xn) { 9696 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 9697 prot |= PAGE_EXEC; 9698 } 9699 } 9700 return prot; 9701 } 9702 9703 /* Translate section/page access permissions to protection flags 9704 * 9705 * @env: CPUARMState 9706 * @mmu_idx: MMU index indicating required translation regime 9707 * @is_aa64: TRUE if AArch64 9708 * @ap: The 2-bit simple AP (AP[2:1]) 9709 * @ns: NS (non-secure) bit 9710 * @xn: XN (execute-never) bit 9711 * @pxn: PXN (privileged execute-never) bit 9712 */ 9713 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 9714 int ap, int ns, int xn, int pxn) 9715 { 9716 bool is_user = regime_is_user(env, mmu_idx); 9717 int prot_rw, user_rw; 9718 bool have_wxn; 9719 int wxn = 0; 9720 9721 assert(mmu_idx != ARMMMUIdx_Stage2); 9722 9723 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 9724 if (is_user) { 9725 prot_rw = user_rw; 9726 } else { 9727 if (user_rw && regime_is_pan(env, mmu_idx)) { 9728 return 0; 9729 } 9730 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 9731 } 9732 9733 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 9734 return prot_rw; 9735 } 9736 9737 /* TODO have_wxn should be replaced with 9738 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 9739 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 9740 * compatible processors have EL2, which is required for [U]WXN. 9741 */ 9742 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 9743 9744 if (have_wxn) { 9745 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 9746 } 9747 9748 if (is_aa64) { 9749 if (regime_has_2_ranges(mmu_idx) && !is_user) { 9750 xn = pxn || (user_rw & PAGE_WRITE); 9751 } 9752 } else if (arm_feature(env, ARM_FEATURE_V7)) { 9753 switch (regime_el(env, mmu_idx)) { 9754 case 1: 9755 case 3: 9756 if (is_user) { 9757 xn = xn || !(user_rw & PAGE_READ); 9758 } else { 9759 int uwxn = 0; 9760 if (have_wxn) { 9761 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 9762 } 9763 xn = xn || !(prot_rw & PAGE_READ) || pxn || 9764 (uwxn && (user_rw & PAGE_WRITE)); 9765 } 9766 break; 9767 case 2: 9768 break; 9769 } 9770 } else { 9771 xn = wxn = 0; 9772 } 9773 9774 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 9775 return prot_rw; 9776 } 9777 return prot_rw | PAGE_EXEC; 9778 } 9779 9780 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 9781 uint32_t *table, uint32_t address) 9782 { 9783 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 9784 TCR *tcr = regime_tcr(env, mmu_idx); 9785 9786 if (address & tcr->mask) { 9787 if (tcr->raw_tcr & TTBCR_PD1) { 9788 /* Translation table walk disabled for TTBR1 */ 9789 return false; 9790 } 9791 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 9792 } else { 9793 if (tcr->raw_tcr & TTBCR_PD0) { 9794 /* Translation table walk disabled for TTBR0 */ 9795 return false; 9796 } 9797 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 9798 } 9799 *table |= (address >> 18) & 0x3ffc; 9800 return true; 9801 } 9802 9803 /* Translate a S1 pagetable walk through S2 if needed. */ 9804 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 9805 hwaddr addr, MemTxAttrs txattrs, 9806 ARMMMUFaultInfo *fi) 9807 { 9808 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && 9809 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 9810 target_ulong s2size; 9811 hwaddr s2pa; 9812 int s2prot; 9813 int ret; 9814 ARMCacheAttrs cacheattrs = {}; 9815 ARMCacheAttrs *pcacheattrs = NULL; 9816 9817 if (env->cp15.hcr_el2 & HCR_PTW) { 9818 /* 9819 * PTW means we must fault if this S1 walk touches S2 Device 9820 * memory; otherwise we don't care about the attributes and can 9821 * save the S2 translation the effort of computing them. 9822 */ 9823 pcacheattrs = &cacheattrs; 9824 } 9825 9826 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa, 9827 &txattrs, &s2prot, &s2size, fi, pcacheattrs); 9828 if (ret) { 9829 assert(fi->type != ARMFault_None); 9830 fi->s2addr = addr; 9831 fi->stage2 = true; 9832 fi->s1ptw = true; 9833 return ~0; 9834 } 9835 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { 9836 /* Access was to Device memory: generate Permission fault */ 9837 fi->type = ARMFault_Permission; 9838 fi->s2addr = addr; 9839 fi->stage2 = true; 9840 fi->s1ptw = true; 9841 return ~0; 9842 } 9843 addr = s2pa; 9844 } 9845 return addr; 9846 } 9847 9848 /* All loads done in the course of a page table walk go through here. */ 9849 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 9850 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 9851 { 9852 ARMCPU *cpu = ARM_CPU(cs); 9853 CPUARMState *env = &cpu->env; 9854 MemTxAttrs attrs = {}; 9855 MemTxResult result = MEMTX_OK; 9856 AddressSpace *as; 9857 uint32_t data; 9858 9859 attrs.secure = is_secure; 9860 as = arm_addressspace(cs, attrs); 9861 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 9862 if (fi->s1ptw) { 9863 return 0; 9864 } 9865 if (regime_translation_big_endian(env, mmu_idx)) { 9866 data = address_space_ldl_be(as, addr, attrs, &result); 9867 } else { 9868 data = address_space_ldl_le(as, addr, attrs, &result); 9869 } 9870 if (result == MEMTX_OK) { 9871 return data; 9872 } 9873 fi->type = ARMFault_SyncExternalOnWalk; 9874 fi->ea = arm_extabort_type(result); 9875 return 0; 9876 } 9877 9878 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 9879 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 9880 { 9881 ARMCPU *cpu = ARM_CPU(cs); 9882 CPUARMState *env = &cpu->env; 9883 MemTxAttrs attrs = {}; 9884 MemTxResult result = MEMTX_OK; 9885 AddressSpace *as; 9886 uint64_t data; 9887 9888 attrs.secure = is_secure; 9889 as = arm_addressspace(cs, attrs); 9890 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 9891 if (fi->s1ptw) { 9892 return 0; 9893 } 9894 if (regime_translation_big_endian(env, mmu_idx)) { 9895 data = address_space_ldq_be(as, addr, attrs, &result); 9896 } else { 9897 data = address_space_ldq_le(as, addr, attrs, &result); 9898 } 9899 if (result == MEMTX_OK) { 9900 return data; 9901 } 9902 fi->type = ARMFault_SyncExternalOnWalk; 9903 fi->ea = arm_extabort_type(result); 9904 return 0; 9905 } 9906 9907 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 9908 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9909 hwaddr *phys_ptr, int *prot, 9910 target_ulong *page_size, 9911 ARMMMUFaultInfo *fi) 9912 { 9913 CPUState *cs = env_cpu(env); 9914 int level = 1; 9915 uint32_t table; 9916 uint32_t desc; 9917 int type; 9918 int ap; 9919 int domain = 0; 9920 int domain_prot; 9921 hwaddr phys_addr; 9922 uint32_t dacr; 9923 9924 /* Pagetable walk. */ 9925 /* Lookup l1 descriptor. */ 9926 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 9927 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 9928 fi->type = ARMFault_Translation; 9929 goto do_fault; 9930 } 9931 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9932 mmu_idx, fi); 9933 if (fi->type != ARMFault_None) { 9934 goto do_fault; 9935 } 9936 type = (desc & 3); 9937 domain = (desc >> 5) & 0x0f; 9938 if (regime_el(env, mmu_idx) == 1) { 9939 dacr = env->cp15.dacr_ns; 9940 } else { 9941 dacr = env->cp15.dacr_s; 9942 } 9943 domain_prot = (dacr >> (domain * 2)) & 3; 9944 if (type == 0) { 9945 /* Section translation fault. */ 9946 fi->type = ARMFault_Translation; 9947 goto do_fault; 9948 } 9949 if (type != 2) { 9950 level = 2; 9951 } 9952 if (domain_prot == 0 || domain_prot == 2) { 9953 fi->type = ARMFault_Domain; 9954 goto do_fault; 9955 } 9956 if (type == 2) { 9957 /* 1Mb section. */ 9958 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 9959 ap = (desc >> 10) & 3; 9960 *page_size = 1024 * 1024; 9961 } else { 9962 /* Lookup l2 entry. */ 9963 if (type == 1) { 9964 /* Coarse pagetable. */ 9965 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 9966 } else { 9967 /* Fine pagetable. */ 9968 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 9969 } 9970 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9971 mmu_idx, fi); 9972 if (fi->type != ARMFault_None) { 9973 goto do_fault; 9974 } 9975 switch (desc & 3) { 9976 case 0: /* Page translation fault. */ 9977 fi->type = ARMFault_Translation; 9978 goto do_fault; 9979 case 1: /* 64k page. */ 9980 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 9981 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 9982 *page_size = 0x10000; 9983 break; 9984 case 2: /* 4k page. */ 9985 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9986 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 9987 *page_size = 0x1000; 9988 break; 9989 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 9990 if (type == 1) { 9991 /* ARMv6/XScale extended small page format */ 9992 if (arm_feature(env, ARM_FEATURE_XSCALE) 9993 || arm_feature(env, ARM_FEATURE_V6)) { 9994 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9995 *page_size = 0x1000; 9996 } else { 9997 /* UNPREDICTABLE in ARMv5; we choose to take a 9998 * page translation fault. 9999 */ 10000 fi->type = ARMFault_Translation; 10001 goto do_fault; 10002 } 10003 } else { 10004 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 10005 *page_size = 0x400; 10006 } 10007 ap = (desc >> 4) & 3; 10008 break; 10009 default: 10010 /* Never happens, but compiler isn't smart enough to tell. */ 10011 abort(); 10012 } 10013 } 10014 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10015 *prot |= *prot ? PAGE_EXEC : 0; 10016 if (!(*prot & (1 << access_type))) { 10017 /* Access permission fault. */ 10018 fi->type = ARMFault_Permission; 10019 goto do_fault; 10020 } 10021 *phys_ptr = phys_addr; 10022 return false; 10023 do_fault: 10024 fi->domain = domain; 10025 fi->level = level; 10026 return true; 10027 } 10028 10029 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 10030 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10031 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10032 target_ulong *page_size, ARMMMUFaultInfo *fi) 10033 { 10034 CPUState *cs = env_cpu(env); 10035 int level = 1; 10036 uint32_t table; 10037 uint32_t desc; 10038 uint32_t xn; 10039 uint32_t pxn = 0; 10040 int type; 10041 int ap; 10042 int domain = 0; 10043 int domain_prot; 10044 hwaddr phys_addr; 10045 uint32_t dacr; 10046 bool ns; 10047 10048 /* Pagetable walk. */ 10049 /* Lookup l1 descriptor. */ 10050 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10051 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10052 fi->type = ARMFault_Translation; 10053 goto do_fault; 10054 } 10055 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10056 mmu_idx, fi); 10057 if (fi->type != ARMFault_None) { 10058 goto do_fault; 10059 } 10060 type = (desc & 3); 10061 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 10062 /* Section translation fault, or attempt to use the encoding 10063 * which is Reserved on implementations without PXN. 10064 */ 10065 fi->type = ARMFault_Translation; 10066 goto do_fault; 10067 } 10068 if ((type == 1) || !(desc & (1 << 18))) { 10069 /* Page or Section. */ 10070 domain = (desc >> 5) & 0x0f; 10071 } 10072 if (regime_el(env, mmu_idx) == 1) { 10073 dacr = env->cp15.dacr_ns; 10074 } else { 10075 dacr = env->cp15.dacr_s; 10076 } 10077 if (type == 1) { 10078 level = 2; 10079 } 10080 domain_prot = (dacr >> (domain * 2)) & 3; 10081 if (domain_prot == 0 || domain_prot == 2) { 10082 /* Section or Page domain fault */ 10083 fi->type = ARMFault_Domain; 10084 goto do_fault; 10085 } 10086 if (type != 1) { 10087 if (desc & (1 << 18)) { 10088 /* Supersection. */ 10089 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 10090 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 10091 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 10092 *page_size = 0x1000000; 10093 } else { 10094 /* Section. */ 10095 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10096 *page_size = 0x100000; 10097 } 10098 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 10099 xn = desc & (1 << 4); 10100 pxn = desc & 1; 10101 ns = extract32(desc, 19, 1); 10102 } else { 10103 if (arm_feature(env, ARM_FEATURE_PXN)) { 10104 pxn = (desc >> 2) & 1; 10105 } 10106 ns = extract32(desc, 3, 1); 10107 /* Lookup l2 entry. */ 10108 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10109 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10110 mmu_idx, fi); 10111 if (fi->type != ARMFault_None) { 10112 goto do_fault; 10113 } 10114 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 10115 switch (desc & 3) { 10116 case 0: /* Page translation fault. */ 10117 fi->type = ARMFault_Translation; 10118 goto do_fault; 10119 case 1: /* 64k page. */ 10120 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10121 xn = desc & (1 << 15); 10122 *page_size = 0x10000; 10123 break; 10124 case 2: case 3: /* 4k page. */ 10125 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10126 xn = desc & 1; 10127 *page_size = 0x1000; 10128 break; 10129 default: 10130 /* Never happens, but compiler isn't smart enough to tell. */ 10131 abort(); 10132 } 10133 } 10134 if (domain_prot == 3) { 10135 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10136 } else { 10137 if (pxn && !regime_is_user(env, mmu_idx)) { 10138 xn = 1; 10139 } 10140 if (xn && access_type == MMU_INST_FETCH) { 10141 fi->type = ARMFault_Permission; 10142 goto do_fault; 10143 } 10144 10145 if (arm_feature(env, ARM_FEATURE_V6K) && 10146 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 10147 /* The simplified model uses AP[0] as an access control bit. */ 10148 if ((ap & 1) == 0) { 10149 /* Access flag fault. */ 10150 fi->type = ARMFault_AccessFlag; 10151 goto do_fault; 10152 } 10153 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 10154 } else { 10155 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10156 } 10157 if (*prot && !xn) { 10158 *prot |= PAGE_EXEC; 10159 } 10160 if (!(*prot & (1 << access_type))) { 10161 /* Access permission fault. */ 10162 fi->type = ARMFault_Permission; 10163 goto do_fault; 10164 } 10165 } 10166 if (ns) { 10167 /* The NS bit will (as required by the architecture) have no effect if 10168 * the CPU doesn't support TZ or this is a non-secure translation 10169 * regime, because the attribute will already be non-secure. 10170 */ 10171 attrs->secure = false; 10172 } 10173 *phys_ptr = phys_addr; 10174 return false; 10175 do_fault: 10176 fi->domain = domain; 10177 fi->level = level; 10178 return true; 10179 } 10180 10181 /* 10182 * check_s2_mmu_setup 10183 * @cpu: ARMCPU 10184 * @is_aa64: True if the translation regime is in AArch64 state 10185 * @startlevel: Suggested starting level 10186 * @inputsize: Bitsize of IPAs 10187 * @stride: Page-table stride (See the ARM ARM) 10188 * 10189 * Returns true if the suggested S2 translation parameters are OK and 10190 * false otherwise. 10191 */ 10192 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 10193 int inputsize, int stride) 10194 { 10195 const int grainsize = stride + 3; 10196 int startsizecheck; 10197 10198 /* Negative levels are never allowed. */ 10199 if (level < 0) { 10200 return false; 10201 } 10202 10203 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 10204 if (startsizecheck < 1 || startsizecheck > stride + 4) { 10205 return false; 10206 } 10207 10208 if (is_aa64) { 10209 CPUARMState *env = &cpu->env; 10210 unsigned int pamax = arm_pamax(cpu); 10211 10212 switch (stride) { 10213 case 13: /* 64KB Pages. */ 10214 if (level == 0 || (level == 1 && pamax <= 42)) { 10215 return false; 10216 } 10217 break; 10218 case 11: /* 16KB Pages. */ 10219 if (level == 0 || (level == 1 && pamax <= 40)) { 10220 return false; 10221 } 10222 break; 10223 case 9: /* 4KB Pages. */ 10224 if (level == 0 && pamax <= 42) { 10225 return false; 10226 } 10227 break; 10228 default: 10229 g_assert_not_reached(); 10230 } 10231 10232 /* Inputsize checks. */ 10233 if (inputsize > pamax && 10234 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 10235 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 10236 return false; 10237 } 10238 } else { 10239 /* AArch32 only supports 4KB pages. Assert on that. */ 10240 assert(stride == 9); 10241 10242 if (level == 0) { 10243 return false; 10244 } 10245 } 10246 return true; 10247 } 10248 10249 /* Translate from the 4-bit stage 2 representation of 10250 * memory attributes (without cache-allocation hints) to 10251 * the 8-bit representation of the stage 1 MAIR registers 10252 * (which includes allocation hints). 10253 * 10254 * ref: shared/translation/attrs/S2AttrDecode() 10255 * .../S2ConvertAttrsHints() 10256 */ 10257 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 10258 { 10259 uint8_t hiattr = extract32(s2attrs, 2, 2); 10260 uint8_t loattr = extract32(s2attrs, 0, 2); 10261 uint8_t hihint = 0, lohint = 0; 10262 10263 if (hiattr != 0) { /* normal memory */ 10264 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 10265 hiattr = loattr = 1; /* non-cacheable */ 10266 } else { 10267 if (hiattr != 1) { /* Write-through or write-back */ 10268 hihint = 3; /* RW allocate */ 10269 } 10270 if (loattr != 1) { /* Write-through or write-back */ 10271 lohint = 3; /* RW allocate */ 10272 } 10273 } 10274 } 10275 10276 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 10277 } 10278 #endif /* !CONFIG_USER_ONLY */ 10279 10280 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) 10281 { 10282 if (regime_has_2_ranges(mmu_idx)) { 10283 return extract64(tcr, 37, 2); 10284 } else if (mmu_idx == ARMMMUIdx_Stage2) { 10285 return 0; /* VTCR_EL2 */ 10286 } else { 10287 return extract32(tcr, 20, 1); 10288 } 10289 } 10290 10291 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) 10292 { 10293 if (regime_has_2_ranges(mmu_idx)) { 10294 return extract64(tcr, 51, 2); 10295 } else if (mmu_idx == ARMMMUIdx_Stage2) { 10296 return 0; /* VTCR_EL2 */ 10297 } else { 10298 return extract32(tcr, 29, 1); 10299 } 10300 } 10301 10302 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 10303 ARMMMUIdx mmu_idx, bool data) 10304 { 10305 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10306 bool epd, hpd, using16k, using64k; 10307 int select, tsz, tbi; 10308 10309 if (!regime_has_2_ranges(mmu_idx)) { 10310 select = 0; 10311 tsz = extract32(tcr, 0, 6); 10312 using64k = extract32(tcr, 14, 1); 10313 using16k = extract32(tcr, 15, 1); 10314 if (mmu_idx == ARMMMUIdx_Stage2) { 10315 /* VTCR_EL2 */ 10316 hpd = false; 10317 } else { 10318 hpd = extract32(tcr, 24, 1); 10319 } 10320 epd = false; 10321 } else { 10322 /* 10323 * Bit 55 is always between the two regions, and is canonical for 10324 * determining if address tagging is enabled. 10325 */ 10326 select = extract64(va, 55, 1); 10327 if (!select) { 10328 tsz = extract32(tcr, 0, 6); 10329 epd = extract32(tcr, 7, 1); 10330 using64k = extract32(tcr, 14, 1); 10331 using16k = extract32(tcr, 15, 1); 10332 hpd = extract64(tcr, 41, 1); 10333 } else { 10334 int tg = extract32(tcr, 30, 2); 10335 using16k = tg == 1; 10336 using64k = tg == 3; 10337 tsz = extract32(tcr, 16, 6); 10338 epd = extract32(tcr, 23, 1); 10339 hpd = extract64(tcr, 42, 1); 10340 } 10341 } 10342 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ 10343 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ 10344 10345 /* Present TBI as a composite with TBID. */ 10346 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 10347 if (!data) { 10348 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 10349 } 10350 tbi = (tbi >> select) & 1; 10351 10352 return (ARMVAParameters) { 10353 .tsz = tsz, 10354 .select = select, 10355 .tbi = tbi, 10356 .epd = epd, 10357 .hpd = hpd, 10358 .using16k = using16k, 10359 .using64k = using64k, 10360 }; 10361 } 10362 10363 #ifndef CONFIG_USER_ONLY 10364 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 10365 ARMMMUIdx mmu_idx) 10366 { 10367 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10368 uint32_t el = regime_el(env, mmu_idx); 10369 int select, tsz; 10370 bool epd, hpd; 10371 10372 if (mmu_idx == ARMMMUIdx_Stage2) { 10373 /* VTCR */ 10374 bool sext = extract32(tcr, 4, 1); 10375 bool sign = extract32(tcr, 3, 1); 10376 10377 /* 10378 * If the sign-extend bit is not the same as t0sz[3], the result 10379 * is unpredictable. Flag this as a guest error. 10380 */ 10381 if (sign != sext) { 10382 qemu_log_mask(LOG_GUEST_ERROR, 10383 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 10384 } 10385 tsz = sextract32(tcr, 0, 4) + 8; 10386 select = 0; 10387 hpd = false; 10388 epd = false; 10389 } else if (el == 2) { 10390 /* HTCR */ 10391 tsz = extract32(tcr, 0, 3); 10392 select = 0; 10393 hpd = extract64(tcr, 24, 1); 10394 epd = false; 10395 } else { 10396 int t0sz = extract32(tcr, 0, 3); 10397 int t1sz = extract32(tcr, 16, 3); 10398 10399 if (t1sz == 0) { 10400 select = va > (0xffffffffu >> t0sz); 10401 } else { 10402 /* Note that we will detect errors later. */ 10403 select = va >= ~(0xffffffffu >> t1sz); 10404 } 10405 if (!select) { 10406 tsz = t0sz; 10407 epd = extract32(tcr, 7, 1); 10408 hpd = extract64(tcr, 41, 1); 10409 } else { 10410 tsz = t1sz; 10411 epd = extract32(tcr, 23, 1); 10412 hpd = extract64(tcr, 42, 1); 10413 } 10414 /* For aarch32, hpd0 is not enabled without t2e as well. */ 10415 hpd &= extract32(tcr, 6, 1); 10416 } 10417 10418 return (ARMVAParameters) { 10419 .tsz = tsz, 10420 .select = select, 10421 .epd = epd, 10422 .hpd = hpd, 10423 }; 10424 } 10425 10426 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 10427 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10428 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 10429 target_ulong *page_size_ptr, 10430 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10431 { 10432 ARMCPU *cpu = env_archcpu(env); 10433 CPUState *cs = CPU(cpu); 10434 /* Read an LPAE long-descriptor translation table. */ 10435 ARMFaultType fault_type = ARMFault_Translation; 10436 uint32_t level; 10437 ARMVAParameters param; 10438 uint64_t ttbr; 10439 hwaddr descaddr, indexmask, indexmask_grainsize; 10440 uint32_t tableattrs; 10441 target_ulong page_size; 10442 uint32_t attrs; 10443 int32_t stride; 10444 int addrsize, inputsize; 10445 TCR *tcr = regime_tcr(env, mmu_idx); 10446 int ap, ns, xn, pxn; 10447 uint32_t el = regime_el(env, mmu_idx); 10448 uint64_t descaddrmask; 10449 bool aarch64 = arm_el_is_aa64(env, el); 10450 bool guarded = false; 10451 10452 /* TODO: 10453 * This code does not handle the different format TCR for VTCR_EL2. 10454 * This code also does not support shareability levels. 10455 * Attribute and permission bit handling should also be checked when adding 10456 * support for those page table walks. 10457 */ 10458 if (aarch64) { 10459 param = aa64_va_parameters(env, address, mmu_idx, 10460 access_type != MMU_INST_FETCH); 10461 level = 0; 10462 addrsize = 64 - 8 * param.tbi; 10463 inputsize = 64 - param.tsz; 10464 } else { 10465 param = aa32_va_parameters(env, address, mmu_idx); 10466 level = 1; 10467 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 10468 inputsize = addrsize - param.tsz; 10469 } 10470 10471 /* 10472 * We determined the region when collecting the parameters, but we 10473 * have not yet validated that the address is valid for the region. 10474 * Extract the top bits and verify that they all match select. 10475 * 10476 * For aa32, if inputsize == addrsize, then we have selected the 10477 * region by exclusion in aa32_va_parameters and there is no more 10478 * validation to do here. 10479 */ 10480 if (inputsize < addrsize) { 10481 target_ulong top_bits = sextract64(address, inputsize, 10482 addrsize - inputsize); 10483 if (-top_bits != param.select) { 10484 /* The gap between the two regions is a Translation fault */ 10485 fault_type = ARMFault_Translation; 10486 goto do_fault; 10487 } 10488 } 10489 10490 if (param.using64k) { 10491 stride = 13; 10492 } else if (param.using16k) { 10493 stride = 11; 10494 } else { 10495 stride = 9; 10496 } 10497 10498 /* Note that QEMU ignores shareability and cacheability attributes, 10499 * so we don't need to do anything with the SH, ORGN, IRGN fields 10500 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 10501 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 10502 * implement any ASID-like capability so we can ignore it (instead 10503 * we will always flush the TLB any time the ASID is changed). 10504 */ 10505 ttbr = regime_ttbr(env, mmu_idx, param.select); 10506 10507 /* Here we should have set up all the parameters for the translation: 10508 * inputsize, ttbr, epd, stride, tbi 10509 */ 10510 10511 if (param.epd) { 10512 /* Translation table walk disabled => Translation fault on TLB miss 10513 * Note: This is always 0 on 64-bit EL2 and EL3. 10514 */ 10515 goto do_fault; 10516 } 10517 10518 if (mmu_idx != ARMMMUIdx_Stage2) { 10519 /* The starting level depends on the virtual address size (which can 10520 * be up to 48 bits) and the translation granule size. It indicates 10521 * the number of strides (stride bits at a time) needed to 10522 * consume the bits of the input address. In the pseudocode this is: 10523 * level = 4 - RoundUp((inputsize - grainsize) / stride) 10524 * where their 'inputsize' is our 'inputsize', 'grainsize' is 10525 * our 'stride + 3' and 'stride' is our 'stride'. 10526 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 10527 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 10528 * = 4 - (inputsize - 4) / stride; 10529 */ 10530 level = 4 - (inputsize - 4) / stride; 10531 } else { 10532 /* For stage 2 translations the starting level is specified by the 10533 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 10534 */ 10535 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 10536 uint32_t startlevel; 10537 bool ok; 10538 10539 if (!aarch64 || stride == 9) { 10540 /* AArch32 or 4KB pages */ 10541 startlevel = 2 - sl0; 10542 } else { 10543 /* 16KB or 64KB pages */ 10544 startlevel = 3 - sl0; 10545 } 10546 10547 /* Check that the starting level is valid. */ 10548 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 10549 inputsize, stride); 10550 if (!ok) { 10551 fault_type = ARMFault_Translation; 10552 goto do_fault; 10553 } 10554 level = startlevel; 10555 } 10556 10557 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 10558 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 10559 10560 /* Now we can extract the actual base address from the TTBR */ 10561 descaddr = extract64(ttbr, 0, 48); 10562 descaddr &= ~indexmask; 10563 10564 /* The address field in the descriptor goes up to bit 39 for ARMv7 10565 * but up to bit 47 for ARMv8, but we use the descaddrmask 10566 * up to bit 39 for AArch32, because we don't need other bits in that case 10567 * to construct next descriptor address (anyway they should be all zeroes). 10568 */ 10569 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 10570 ~indexmask_grainsize; 10571 10572 /* Secure accesses start with the page table in secure memory and 10573 * can be downgraded to non-secure at any step. Non-secure accesses 10574 * remain non-secure. We implement this by just ORing in the NSTable/NS 10575 * bits at each step. 10576 */ 10577 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 10578 for (;;) { 10579 uint64_t descriptor; 10580 bool nstable; 10581 10582 descaddr |= (address >> (stride * (4 - level))) & indexmask; 10583 descaddr &= ~7ULL; 10584 nstable = extract32(tableattrs, 4, 1); 10585 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 10586 if (fi->type != ARMFault_None) { 10587 goto do_fault; 10588 } 10589 10590 if (!(descriptor & 1) || 10591 (!(descriptor & 2) && (level == 3))) { 10592 /* Invalid, or the Reserved level 3 encoding */ 10593 goto do_fault; 10594 } 10595 descaddr = descriptor & descaddrmask; 10596 10597 if ((descriptor & 2) && (level < 3)) { 10598 /* Table entry. The top five bits are attributes which may 10599 * propagate down through lower levels of the table (and 10600 * which are all arranged so that 0 means "no effect", so 10601 * we can gather them up by ORing in the bits at each level). 10602 */ 10603 tableattrs |= extract64(descriptor, 59, 5); 10604 level++; 10605 indexmask = indexmask_grainsize; 10606 continue; 10607 } 10608 /* Block entry at level 1 or 2, or page entry at level 3. 10609 * These are basically the same thing, although the number 10610 * of bits we pull in from the vaddr varies. 10611 */ 10612 page_size = (1ULL << ((stride * (4 - level)) + 3)); 10613 descaddr |= (address & (page_size - 1)); 10614 /* Extract attributes from the descriptor */ 10615 attrs = extract64(descriptor, 2, 10) 10616 | (extract64(descriptor, 52, 12) << 10); 10617 10618 if (mmu_idx == ARMMMUIdx_Stage2) { 10619 /* Stage 2 table descriptors do not include any attribute fields */ 10620 break; 10621 } 10622 /* Merge in attributes from table descriptors */ 10623 attrs |= nstable << 3; /* NS */ 10624 guarded = extract64(descriptor, 50, 1); /* GP */ 10625 if (param.hpd) { 10626 /* HPD disables all the table attributes except NSTable. */ 10627 break; 10628 } 10629 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 10630 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 10631 * means "force PL1 access only", which means forcing AP[1] to 0. 10632 */ 10633 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ 10634 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ 10635 break; 10636 } 10637 /* Here descaddr is the final physical address, and attributes 10638 * are all in attrs. 10639 */ 10640 fault_type = ARMFault_AccessFlag; 10641 if ((attrs & (1 << 8)) == 0) { 10642 /* Access flag */ 10643 goto do_fault; 10644 } 10645 10646 ap = extract32(attrs, 4, 2); 10647 xn = extract32(attrs, 12, 1); 10648 10649 if (mmu_idx == ARMMMUIdx_Stage2) { 10650 ns = true; 10651 *prot = get_S2prot(env, ap, xn); 10652 } else { 10653 ns = extract32(attrs, 3, 1); 10654 pxn = extract32(attrs, 11, 1); 10655 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 10656 } 10657 10658 fault_type = ARMFault_Permission; 10659 if (!(*prot & (1 << access_type))) { 10660 goto do_fault; 10661 } 10662 10663 if (ns) { 10664 /* The NS bit will (as required by the architecture) have no effect if 10665 * the CPU doesn't support TZ or this is a non-secure translation 10666 * regime, because the attribute will already be non-secure. 10667 */ 10668 txattrs->secure = false; 10669 } 10670 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ 10671 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { 10672 txattrs->target_tlb_bit0 = true; 10673 } 10674 10675 if (cacheattrs != NULL) { 10676 if (mmu_idx == ARMMMUIdx_Stage2) { 10677 cacheattrs->attrs = convert_stage2_attrs(env, 10678 extract32(attrs, 0, 4)); 10679 } else { 10680 /* Index into MAIR registers for cache attributes */ 10681 uint8_t attrindx = extract32(attrs, 0, 3); 10682 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 10683 assert(attrindx <= 7); 10684 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 10685 } 10686 cacheattrs->shareability = extract32(attrs, 6, 2); 10687 } 10688 10689 *phys_ptr = descaddr; 10690 *page_size_ptr = page_size; 10691 return false; 10692 10693 do_fault: 10694 fi->type = fault_type; 10695 fi->level = level; 10696 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 10697 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2); 10698 return true; 10699 } 10700 10701 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 10702 ARMMMUIdx mmu_idx, 10703 int32_t address, int *prot) 10704 { 10705 if (!arm_feature(env, ARM_FEATURE_M)) { 10706 *prot = PAGE_READ | PAGE_WRITE; 10707 switch (address) { 10708 case 0xF0000000 ... 0xFFFFFFFF: 10709 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 10710 /* hivecs execing is ok */ 10711 *prot |= PAGE_EXEC; 10712 } 10713 break; 10714 case 0x00000000 ... 0x7FFFFFFF: 10715 *prot |= PAGE_EXEC; 10716 break; 10717 } 10718 } else { 10719 /* Default system address map for M profile cores. 10720 * The architecture specifies which regions are execute-never; 10721 * at the MPU level no other checks are defined. 10722 */ 10723 switch (address) { 10724 case 0x00000000 ... 0x1fffffff: /* ROM */ 10725 case 0x20000000 ... 0x3fffffff: /* SRAM */ 10726 case 0x60000000 ... 0x7fffffff: /* RAM */ 10727 case 0x80000000 ... 0x9fffffff: /* RAM */ 10728 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10729 break; 10730 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 10731 case 0xa0000000 ... 0xbfffffff: /* Device */ 10732 case 0xc0000000 ... 0xdfffffff: /* Device */ 10733 case 0xe0000000 ... 0xffffffff: /* System */ 10734 *prot = PAGE_READ | PAGE_WRITE; 10735 break; 10736 default: 10737 g_assert_not_reached(); 10738 } 10739 } 10740 } 10741 10742 static bool pmsav7_use_background_region(ARMCPU *cpu, 10743 ARMMMUIdx mmu_idx, bool is_user) 10744 { 10745 /* Return true if we should use the default memory map as a 10746 * "background" region if there are no hits against any MPU regions. 10747 */ 10748 CPUARMState *env = &cpu->env; 10749 10750 if (is_user) { 10751 return false; 10752 } 10753 10754 if (arm_feature(env, ARM_FEATURE_M)) { 10755 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 10756 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 10757 } else { 10758 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 10759 } 10760 } 10761 10762 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 10763 { 10764 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 10765 return arm_feature(env, ARM_FEATURE_M) && 10766 extract32(address, 20, 12) == 0xe00; 10767 } 10768 10769 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 10770 { 10771 /* True if address is in the M profile system region 10772 * 0xe0000000 - 0xffffffff 10773 */ 10774 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 10775 } 10776 10777 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 10778 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10779 hwaddr *phys_ptr, int *prot, 10780 target_ulong *page_size, 10781 ARMMMUFaultInfo *fi) 10782 { 10783 ARMCPU *cpu = env_archcpu(env); 10784 int n; 10785 bool is_user = regime_is_user(env, mmu_idx); 10786 10787 *phys_ptr = address; 10788 *page_size = TARGET_PAGE_SIZE; 10789 *prot = 0; 10790 10791 if (regime_translation_disabled(env, mmu_idx) || 10792 m_is_ppb_region(env, address)) { 10793 /* MPU disabled or M profile PPB access: use default memory map. 10794 * The other case which uses the default memory map in the 10795 * v7M ARM ARM pseudocode is exception vector reads from the vector 10796 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 10797 * which always does a direct read using address_space_ldl(), rather 10798 * than going via this function, so we don't need to check that here. 10799 */ 10800 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10801 } else { /* MPU enabled */ 10802 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 10803 /* region search */ 10804 uint32_t base = env->pmsav7.drbar[n]; 10805 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 10806 uint32_t rmask; 10807 bool srdis = false; 10808 10809 if (!(env->pmsav7.drsr[n] & 0x1)) { 10810 continue; 10811 } 10812 10813 if (!rsize) { 10814 qemu_log_mask(LOG_GUEST_ERROR, 10815 "DRSR[%d]: Rsize field cannot be 0\n", n); 10816 continue; 10817 } 10818 rsize++; 10819 rmask = (1ull << rsize) - 1; 10820 10821 if (base & rmask) { 10822 qemu_log_mask(LOG_GUEST_ERROR, 10823 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 10824 "to DRSR region size, mask = 0x%" PRIx32 "\n", 10825 n, base, rmask); 10826 continue; 10827 } 10828 10829 if (address < base || address > base + rmask) { 10830 /* 10831 * Address not in this region. We must check whether the 10832 * region covers addresses in the same page as our address. 10833 * In that case we must not report a size that covers the 10834 * whole page for a subsequent hit against a different MPU 10835 * region or the background region, because it would result in 10836 * incorrect TLB hits for subsequent accesses to addresses that 10837 * are in this MPU region. 10838 */ 10839 if (ranges_overlap(base, rmask, 10840 address & TARGET_PAGE_MASK, 10841 TARGET_PAGE_SIZE)) { 10842 *page_size = 1; 10843 } 10844 continue; 10845 } 10846 10847 /* Region matched */ 10848 10849 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 10850 int i, snd; 10851 uint32_t srdis_mask; 10852 10853 rsize -= 3; /* sub region size (power of 2) */ 10854 snd = ((address - base) >> rsize) & 0x7; 10855 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 10856 10857 srdis_mask = srdis ? 0x3 : 0x0; 10858 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 10859 /* This will check in groups of 2, 4 and then 8, whether 10860 * the subregion bits are consistent. rsize is incremented 10861 * back up to give the region size, considering consistent 10862 * adjacent subregions as one region. Stop testing if rsize 10863 * is already big enough for an entire QEMU page. 10864 */ 10865 int snd_rounded = snd & ~(i - 1); 10866 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 10867 snd_rounded + 8, i); 10868 if (srdis_mask ^ srdis_multi) { 10869 break; 10870 } 10871 srdis_mask = (srdis_mask << i) | srdis_mask; 10872 rsize++; 10873 } 10874 } 10875 if (srdis) { 10876 continue; 10877 } 10878 if (rsize < TARGET_PAGE_BITS) { 10879 *page_size = 1 << rsize; 10880 } 10881 break; 10882 } 10883 10884 if (n == -1) { /* no hits */ 10885 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 10886 /* background fault */ 10887 fi->type = ARMFault_Background; 10888 return true; 10889 } 10890 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10891 } else { /* a MPU hit! */ 10892 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 10893 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 10894 10895 if (m_is_system_region(env, address)) { 10896 /* System space is always execute never */ 10897 xn = 1; 10898 } 10899 10900 if (is_user) { /* User mode AP bit decoding */ 10901 switch (ap) { 10902 case 0: 10903 case 1: 10904 case 5: 10905 break; /* no access */ 10906 case 3: 10907 *prot |= PAGE_WRITE; 10908 /* fall through */ 10909 case 2: 10910 case 6: 10911 *prot |= PAGE_READ | PAGE_EXEC; 10912 break; 10913 case 7: 10914 /* for v7M, same as 6; for R profile a reserved value */ 10915 if (arm_feature(env, ARM_FEATURE_M)) { 10916 *prot |= PAGE_READ | PAGE_EXEC; 10917 break; 10918 } 10919 /* fall through */ 10920 default: 10921 qemu_log_mask(LOG_GUEST_ERROR, 10922 "DRACR[%d]: Bad value for AP bits: 0x%" 10923 PRIx32 "\n", n, ap); 10924 } 10925 } else { /* Priv. mode AP bits decoding */ 10926 switch (ap) { 10927 case 0: 10928 break; /* no access */ 10929 case 1: 10930 case 2: 10931 case 3: 10932 *prot |= PAGE_WRITE; 10933 /* fall through */ 10934 case 5: 10935 case 6: 10936 *prot |= PAGE_READ | PAGE_EXEC; 10937 break; 10938 case 7: 10939 /* for v7M, same as 6; for R profile a reserved value */ 10940 if (arm_feature(env, ARM_FEATURE_M)) { 10941 *prot |= PAGE_READ | PAGE_EXEC; 10942 break; 10943 } 10944 /* fall through */ 10945 default: 10946 qemu_log_mask(LOG_GUEST_ERROR, 10947 "DRACR[%d]: Bad value for AP bits: 0x%" 10948 PRIx32 "\n", n, ap); 10949 } 10950 } 10951 10952 /* execute never */ 10953 if (xn) { 10954 *prot &= ~PAGE_EXEC; 10955 } 10956 } 10957 } 10958 10959 fi->type = ARMFault_Permission; 10960 fi->level = 1; 10961 return !(*prot & (1 << access_type)); 10962 } 10963 10964 static bool v8m_is_sau_exempt(CPUARMState *env, 10965 uint32_t address, MMUAccessType access_type) 10966 { 10967 /* The architecture specifies that certain address ranges are 10968 * exempt from v8M SAU/IDAU checks. 10969 */ 10970 return 10971 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 10972 (address >= 0xe0000000 && address <= 0xe0002fff) || 10973 (address >= 0xe000e000 && address <= 0xe000efff) || 10974 (address >= 0xe002e000 && address <= 0xe002efff) || 10975 (address >= 0xe0040000 && address <= 0xe0041fff) || 10976 (address >= 0xe00ff000 && address <= 0xe00fffff); 10977 } 10978 10979 void v8m_security_lookup(CPUARMState *env, uint32_t address, 10980 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10981 V8M_SAttributes *sattrs) 10982 { 10983 /* Look up the security attributes for this address. Compare the 10984 * pseudocode SecurityCheck() function. 10985 * We assume the caller has zero-initialized *sattrs. 10986 */ 10987 ARMCPU *cpu = env_archcpu(env); 10988 int r; 10989 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 10990 int idau_region = IREGION_NOTVALID; 10991 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 10992 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 10993 10994 if (cpu->idau) { 10995 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 10996 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 10997 10998 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 10999 &idau_nsc); 11000 } 11001 11002 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 11003 /* 0xf0000000..0xffffffff is always S for insn fetches */ 11004 return; 11005 } 11006 11007 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 11008 sattrs->ns = !regime_is_secure(env, mmu_idx); 11009 return; 11010 } 11011 11012 if (idau_region != IREGION_NOTVALID) { 11013 sattrs->irvalid = true; 11014 sattrs->iregion = idau_region; 11015 } 11016 11017 switch (env->sau.ctrl & 3) { 11018 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 11019 break; 11020 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 11021 sattrs->ns = true; 11022 break; 11023 default: /* SAU.ENABLE == 1 */ 11024 for (r = 0; r < cpu->sau_sregion; r++) { 11025 if (env->sau.rlar[r] & 1) { 11026 uint32_t base = env->sau.rbar[r] & ~0x1f; 11027 uint32_t limit = env->sau.rlar[r] | 0x1f; 11028 11029 if (base <= address && limit >= address) { 11030 if (base > addr_page_base || limit < addr_page_limit) { 11031 sattrs->subpage = true; 11032 } 11033 if (sattrs->srvalid) { 11034 /* If we hit in more than one region then we must report 11035 * as Secure, not NS-Callable, with no valid region 11036 * number info. 11037 */ 11038 sattrs->ns = false; 11039 sattrs->nsc = false; 11040 sattrs->sregion = 0; 11041 sattrs->srvalid = false; 11042 break; 11043 } else { 11044 if (env->sau.rlar[r] & 2) { 11045 sattrs->nsc = true; 11046 } else { 11047 sattrs->ns = true; 11048 } 11049 sattrs->srvalid = true; 11050 sattrs->sregion = r; 11051 } 11052 } else { 11053 /* 11054 * Address not in this region. We must check whether the 11055 * region covers addresses in the same page as our address. 11056 * In that case we must not report a size that covers the 11057 * whole page for a subsequent hit against a different MPU 11058 * region or the background region, because it would result 11059 * in incorrect TLB hits for subsequent accesses to 11060 * addresses that are in this MPU region. 11061 */ 11062 if (limit >= base && 11063 ranges_overlap(base, limit - base + 1, 11064 addr_page_base, 11065 TARGET_PAGE_SIZE)) { 11066 sattrs->subpage = true; 11067 } 11068 } 11069 } 11070 } 11071 break; 11072 } 11073 11074 /* 11075 * The IDAU will override the SAU lookup results if it specifies 11076 * higher security than the SAU does. 11077 */ 11078 if (!idau_ns) { 11079 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 11080 sattrs->ns = false; 11081 sattrs->nsc = idau_nsc; 11082 } 11083 } 11084 } 11085 11086 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 11087 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11088 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11089 int *prot, bool *is_subpage, 11090 ARMMMUFaultInfo *fi, uint32_t *mregion) 11091 { 11092 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 11093 * that a full phys-to-virt translation does). 11094 * mregion is (if not NULL) set to the region number which matched, 11095 * or -1 if no region number is returned (MPU off, address did not 11096 * hit a region, address hit in multiple regions). 11097 * We set is_subpage to true if the region hit doesn't cover the 11098 * entire TARGET_PAGE the address is within. 11099 */ 11100 ARMCPU *cpu = env_archcpu(env); 11101 bool is_user = regime_is_user(env, mmu_idx); 11102 uint32_t secure = regime_is_secure(env, mmu_idx); 11103 int n; 11104 int matchregion = -1; 11105 bool hit = false; 11106 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 11107 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 11108 11109 *is_subpage = false; 11110 *phys_ptr = address; 11111 *prot = 0; 11112 if (mregion) { 11113 *mregion = -1; 11114 } 11115 11116 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 11117 * was an exception vector read from the vector table (which is always 11118 * done using the default system address map), because those accesses 11119 * are done in arm_v7m_load_vector(), which always does a direct 11120 * read using address_space_ldl(), rather than going via this function. 11121 */ 11122 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 11123 hit = true; 11124 } else if (m_is_ppb_region(env, address)) { 11125 hit = true; 11126 } else { 11127 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11128 hit = true; 11129 } 11130 11131 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11132 /* region search */ 11133 /* Note that the base address is bits [31:5] from the register 11134 * with bits [4:0] all zeroes, but the limit address is bits 11135 * [31:5] from the register with bits [4:0] all ones. 11136 */ 11137 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 11138 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 11139 11140 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 11141 /* Region disabled */ 11142 continue; 11143 } 11144 11145 if (address < base || address > limit) { 11146 /* 11147 * Address not in this region. We must check whether the 11148 * region covers addresses in the same page as our address. 11149 * In that case we must not report a size that covers the 11150 * whole page for a subsequent hit against a different MPU 11151 * region or the background region, because it would result in 11152 * incorrect TLB hits for subsequent accesses to addresses that 11153 * are in this MPU region. 11154 */ 11155 if (limit >= base && 11156 ranges_overlap(base, limit - base + 1, 11157 addr_page_base, 11158 TARGET_PAGE_SIZE)) { 11159 *is_subpage = true; 11160 } 11161 continue; 11162 } 11163 11164 if (base > addr_page_base || limit < addr_page_limit) { 11165 *is_subpage = true; 11166 } 11167 11168 if (matchregion != -1) { 11169 /* Multiple regions match -- always a failure (unlike 11170 * PMSAv7 where highest-numbered-region wins) 11171 */ 11172 fi->type = ARMFault_Permission; 11173 fi->level = 1; 11174 return true; 11175 } 11176 11177 matchregion = n; 11178 hit = true; 11179 } 11180 } 11181 11182 if (!hit) { 11183 /* background fault */ 11184 fi->type = ARMFault_Background; 11185 return true; 11186 } 11187 11188 if (matchregion == -1) { 11189 /* hit using the background region */ 11190 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11191 } else { 11192 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 11193 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 11194 11195 if (m_is_system_region(env, address)) { 11196 /* System space is always execute never */ 11197 xn = 1; 11198 } 11199 11200 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 11201 if (*prot && !xn) { 11202 *prot |= PAGE_EXEC; 11203 } 11204 /* We don't need to look the attribute up in the MAIR0/MAIR1 11205 * registers because that only tells us about cacheability. 11206 */ 11207 if (mregion) { 11208 *mregion = matchregion; 11209 } 11210 } 11211 11212 fi->type = ARMFault_Permission; 11213 fi->level = 1; 11214 return !(*prot & (1 << access_type)); 11215 } 11216 11217 11218 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 11219 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11220 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11221 int *prot, target_ulong *page_size, 11222 ARMMMUFaultInfo *fi) 11223 { 11224 uint32_t secure = regime_is_secure(env, mmu_idx); 11225 V8M_SAttributes sattrs = {}; 11226 bool ret; 11227 bool mpu_is_subpage; 11228 11229 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 11230 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 11231 if (access_type == MMU_INST_FETCH) { 11232 /* Instruction fetches always use the MMU bank and the 11233 * transaction attribute determined by the fetch address, 11234 * regardless of CPU state. This is painful for QEMU 11235 * to handle, because it would mean we need to encode 11236 * into the mmu_idx not just the (user, negpri) information 11237 * for the current security state but also that for the 11238 * other security state, which would balloon the number 11239 * of mmu_idx values needed alarmingly. 11240 * Fortunately we can avoid this because it's not actually 11241 * possible to arbitrarily execute code from memory with 11242 * the wrong security attribute: it will always generate 11243 * an exception of some kind or another, apart from the 11244 * special case of an NS CPU executing an SG instruction 11245 * in S&NSC memory. So we always just fail the translation 11246 * here and sort things out in the exception handler 11247 * (including possibly emulating an SG instruction). 11248 */ 11249 if (sattrs.ns != !secure) { 11250 if (sattrs.nsc) { 11251 fi->type = ARMFault_QEMU_NSCExec; 11252 } else { 11253 fi->type = ARMFault_QEMU_SFault; 11254 } 11255 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11256 *phys_ptr = address; 11257 *prot = 0; 11258 return true; 11259 } 11260 } else { 11261 /* For data accesses we always use the MMU bank indicated 11262 * by the current CPU state, but the security attributes 11263 * might downgrade a secure access to nonsecure. 11264 */ 11265 if (sattrs.ns) { 11266 txattrs->secure = false; 11267 } else if (!secure) { 11268 /* NS access to S memory must fault. 11269 * Architecturally we should first check whether the 11270 * MPU information for this address indicates that we 11271 * are doing an unaligned access to Device memory, which 11272 * should generate a UsageFault instead. QEMU does not 11273 * currently check for that kind of unaligned access though. 11274 * If we added it we would need to do so as a special case 11275 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 11276 */ 11277 fi->type = ARMFault_QEMU_SFault; 11278 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11279 *phys_ptr = address; 11280 *prot = 0; 11281 return true; 11282 } 11283 } 11284 } 11285 11286 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 11287 txattrs, prot, &mpu_is_subpage, fi, NULL); 11288 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 11289 return ret; 11290 } 11291 11292 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 11293 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11294 hwaddr *phys_ptr, int *prot, 11295 ARMMMUFaultInfo *fi) 11296 { 11297 int n; 11298 uint32_t mask; 11299 uint32_t base; 11300 bool is_user = regime_is_user(env, mmu_idx); 11301 11302 if (regime_translation_disabled(env, mmu_idx)) { 11303 /* MPU disabled. */ 11304 *phys_ptr = address; 11305 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11306 return false; 11307 } 11308 11309 *phys_ptr = address; 11310 for (n = 7; n >= 0; n--) { 11311 base = env->cp15.c6_region[n]; 11312 if ((base & 1) == 0) { 11313 continue; 11314 } 11315 mask = 1 << ((base >> 1) & 0x1f); 11316 /* Keep this shift separate from the above to avoid an 11317 (undefined) << 32. */ 11318 mask = (mask << 1) - 1; 11319 if (((base ^ address) & ~mask) == 0) { 11320 break; 11321 } 11322 } 11323 if (n < 0) { 11324 fi->type = ARMFault_Background; 11325 return true; 11326 } 11327 11328 if (access_type == MMU_INST_FETCH) { 11329 mask = env->cp15.pmsav5_insn_ap; 11330 } else { 11331 mask = env->cp15.pmsav5_data_ap; 11332 } 11333 mask = (mask >> (n * 4)) & 0xf; 11334 switch (mask) { 11335 case 0: 11336 fi->type = ARMFault_Permission; 11337 fi->level = 1; 11338 return true; 11339 case 1: 11340 if (is_user) { 11341 fi->type = ARMFault_Permission; 11342 fi->level = 1; 11343 return true; 11344 } 11345 *prot = PAGE_READ | PAGE_WRITE; 11346 break; 11347 case 2: 11348 *prot = PAGE_READ; 11349 if (!is_user) { 11350 *prot |= PAGE_WRITE; 11351 } 11352 break; 11353 case 3: 11354 *prot = PAGE_READ | PAGE_WRITE; 11355 break; 11356 case 5: 11357 if (is_user) { 11358 fi->type = ARMFault_Permission; 11359 fi->level = 1; 11360 return true; 11361 } 11362 *prot = PAGE_READ; 11363 break; 11364 case 6: 11365 *prot = PAGE_READ; 11366 break; 11367 default: 11368 /* Bad permission. */ 11369 fi->type = ARMFault_Permission; 11370 fi->level = 1; 11371 return true; 11372 } 11373 *prot |= PAGE_EXEC; 11374 return false; 11375 } 11376 11377 /* Combine either inner or outer cacheability attributes for normal 11378 * memory, according to table D4-42 and pseudocode procedure 11379 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 11380 * 11381 * NB: only stage 1 includes allocation hints (RW bits), leading to 11382 * some asymmetry. 11383 */ 11384 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 11385 { 11386 if (s1 == 4 || s2 == 4) { 11387 /* non-cacheable has precedence */ 11388 return 4; 11389 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 11390 /* stage 1 write-through takes precedence */ 11391 return s1; 11392 } else if (extract32(s2, 2, 2) == 2) { 11393 /* stage 2 write-through takes precedence, but the allocation hint 11394 * is still taken from stage 1 11395 */ 11396 return (2 << 2) | extract32(s1, 0, 2); 11397 } else { /* write-back */ 11398 return s1; 11399 } 11400 } 11401 11402 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 11403 * and CombineS1S2Desc() 11404 * 11405 * @s1: Attributes from stage 1 walk 11406 * @s2: Attributes from stage 2 walk 11407 */ 11408 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 11409 { 11410 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 11411 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 11412 ARMCacheAttrs ret; 11413 11414 /* Combine shareability attributes (table D4-43) */ 11415 if (s1.shareability == 2 || s2.shareability == 2) { 11416 /* if either are outer-shareable, the result is outer-shareable */ 11417 ret.shareability = 2; 11418 } else if (s1.shareability == 3 || s2.shareability == 3) { 11419 /* if either are inner-shareable, the result is inner-shareable */ 11420 ret.shareability = 3; 11421 } else { 11422 /* both non-shareable */ 11423 ret.shareability = 0; 11424 } 11425 11426 /* Combine memory type and cacheability attributes */ 11427 if (s1hi == 0 || s2hi == 0) { 11428 /* Device has precedence over normal */ 11429 if (s1lo == 0 || s2lo == 0) { 11430 /* nGnRnE has precedence over anything */ 11431 ret.attrs = 0; 11432 } else if (s1lo == 4 || s2lo == 4) { 11433 /* non-Reordering has precedence over Reordering */ 11434 ret.attrs = 4; /* nGnRE */ 11435 } else if (s1lo == 8 || s2lo == 8) { 11436 /* non-Gathering has precedence over Gathering */ 11437 ret.attrs = 8; /* nGRE */ 11438 } else { 11439 ret.attrs = 0xc; /* GRE */ 11440 } 11441 11442 /* Any location for which the resultant memory type is any 11443 * type of Device memory is always treated as Outer Shareable. 11444 */ 11445 ret.shareability = 2; 11446 } else { /* Normal memory */ 11447 /* Outer/inner cacheability combine independently */ 11448 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 11449 | combine_cacheattr_nibble(s1lo, s2lo); 11450 11451 if (ret.attrs == 0x44) { 11452 /* Any location for which the resultant memory type is Normal 11453 * Inner Non-cacheable, Outer Non-cacheable is always treated 11454 * as Outer Shareable. 11455 */ 11456 ret.shareability = 2; 11457 } 11458 } 11459 11460 return ret; 11461 } 11462 11463 11464 /* get_phys_addr - get the physical address for this virtual address 11465 * 11466 * Find the physical address corresponding to the given virtual address, 11467 * by doing a translation table walk on MMU based systems or using the 11468 * MPU state on MPU based systems. 11469 * 11470 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 11471 * prot and page_size may not be filled in, and the populated fsr value provides 11472 * information on why the translation aborted, in the format of a 11473 * DFSR/IFSR fault register, with the following caveats: 11474 * * we honour the short vs long DFSR format differences. 11475 * * the WnR bit is never set (the caller must do this). 11476 * * for PSMAv5 based systems we don't bother to return a full FSR format 11477 * value. 11478 * 11479 * @env: CPUARMState 11480 * @address: virtual address to get physical address for 11481 * @access_type: 0 for read, 1 for write, 2 for execute 11482 * @mmu_idx: MMU index indicating required translation regime 11483 * @phys_ptr: set to the physical address corresponding to the virtual address 11484 * @attrs: set to the memory transaction attributes to use 11485 * @prot: set to the permissions for the page containing phys_ptr 11486 * @page_size: set to the size of the page containing phys_ptr 11487 * @fi: set to fault info if the translation fails 11488 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 11489 */ 11490 bool get_phys_addr(CPUARMState *env, target_ulong address, 11491 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11492 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 11493 target_ulong *page_size, 11494 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11495 { 11496 if (mmu_idx == ARMMMUIdx_E10_0 || 11497 mmu_idx == ARMMMUIdx_E10_1 || 11498 mmu_idx == ARMMMUIdx_E10_1_PAN) { 11499 /* Call ourselves recursively to do the stage 1 and then stage 2 11500 * translations. 11501 */ 11502 if (arm_feature(env, ARM_FEATURE_EL2)) { 11503 hwaddr ipa; 11504 int s2_prot; 11505 int ret; 11506 ARMCacheAttrs cacheattrs2 = {}; 11507 11508 ret = get_phys_addr(env, address, access_type, 11509 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 11510 prot, page_size, fi, cacheattrs); 11511 11512 /* If S1 fails or S2 is disabled, return early. */ 11513 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 11514 *phys_ptr = ipa; 11515 return ret; 11516 } 11517 11518 /* S1 is done. Now do S2 translation. */ 11519 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2, 11520 phys_ptr, attrs, &s2_prot, 11521 page_size, fi, 11522 cacheattrs != NULL ? &cacheattrs2 : NULL); 11523 fi->s2addr = ipa; 11524 /* Combine the S1 and S2 perms. */ 11525 *prot &= s2_prot; 11526 11527 /* Combine the S1 and S2 cache attributes, if needed */ 11528 if (!ret && cacheattrs != NULL) { 11529 if (env->cp15.hcr_el2 & HCR_DC) { 11530 /* 11531 * HCR.DC forces the first stage attributes to 11532 * Normal Non-Shareable, 11533 * Inner Write-Back Read-Allocate Write-Allocate, 11534 * Outer Write-Back Read-Allocate Write-Allocate. 11535 */ 11536 cacheattrs->attrs = 0xff; 11537 cacheattrs->shareability = 0; 11538 } 11539 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 11540 } 11541 11542 return ret; 11543 } else { 11544 /* 11545 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 11546 */ 11547 mmu_idx = stage_1_mmu_idx(mmu_idx); 11548 } 11549 } 11550 11551 /* The page table entries may downgrade secure to non-secure, but 11552 * cannot upgrade an non-secure translation regime's attributes 11553 * to secure. 11554 */ 11555 attrs->secure = regime_is_secure(env, mmu_idx); 11556 attrs->user = regime_is_user(env, mmu_idx); 11557 11558 /* Fast Context Switch Extension. This doesn't exist at all in v8. 11559 * In v7 and earlier it affects all stage 1 translations. 11560 */ 11561 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 11562 && !arm_feature(env, ARM_FEATURE_V8)) { 11563 if (regime_el(env, mmu_idx) == 3) { 11564 address += env->cp15.fcseidr_s; 11565 } else { 11566 address += env->cp15.fcseidr_ns; 11567 } 11568 } 11569 11570 if (arm_feature(env, ARM_FEATURE_PMSA)) { 11571 bool ret; 11572 *page_size = TARGET_PAGE_SIZE; 11573 11574 if (arm_feature(env, ARM_FEATURE_V8)) { 11575 /* PMSAv8 */ 11576 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 11577 phys_ptr, attrs, prot, page_size, fi); 11578 } else if (arm_feature(env, ARM_FEATURE_V7)) { 11579 /* PMSAv7 */ 11580 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 11581 phys_ptr, prot, page_size, fi); 11582 } else { 11583 /* Pre-v7 MPU */ 11584 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 11585 phys_ptr, prot, fi); 11586 } 11587 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 11588 " mmu_idx %u -> %s (prot %c%c%c)\n", 11589 access_type == MMU_DATA_LOAD ? "reading" : 11590 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 11591 (uint32_t)address, mmu_idx, 11592 ret ? "Miss" : "Hit", 11593 *prot & PAGE_READ ? 'r' : '-', 11594 *prot & PAGE_WRITE ? 'w' : '-', 11595 *prot & PAGE_EXEC ? 'x' : '-'); 11596 11597 return ret; 11598 } 11599 11600 /* Definitely a real MMU, not an MPU */ 11601 11602 if (regime_translation_disabled(env, mmu_idx)) { 11603 /* MMU disabled. */ 11604 *phys_ptr = address; 11605 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11606 *page_size = TARGET_PAGE_SIZE; 11607 return 0; 11608 } 11609 11610 if (regime_using_lpae_format(env, mmu_idx)) { 11611 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 11612 phys_ptr, attrs, prot, page_size, 11613 fi, cacheattrs); 11614 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 11615 return get_phys_addr_v6(env, address, access_type, mmu_idx, 11616 phys_ptr, attrs, prot, page_size, fi); 11617 } else { 11618 return get_phys_addr_v5(env, address, access_type, mmu_idx, 11619 phys_ptr, prot, page_size, fi); 11620 } 11621 } 11622 11623 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 11624 MemTxAttrs *attrs) 11625 { 11626 ARMCPU *cpu = ARM_CPU(cs); 11627 CPUARMState *env = &cpu->env; 11628 hwaddr phys_addr; 11629 target_ulong page_size; 11630 int prot; 11631 bool ret; 11632 ARMMMUFaultInfo fi = {}; 11633 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 11634 11635 *attrs = (MemTxAttrs) {}; 11636 11637 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 11638 attrs, &prot, &page_size, &fi, NULL); 11639 11640 if (ret) { 11641 return -1; 11642 } 11643 return phys_addr; 11644 } 11645 11646 #endif 11647 11648 /* Note that signed overflow is undefined in C. The following routines are 11649 careful to use unsigned types where modulo arithmetic is required. 11650 Failure to do so _will_ break on newer gcc. */ 11651 11652 /* Signed saturating arithmetic. */ 11653 11654 /* Perform 16-bit signed saturating addition. */ 11655 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 11656 { 11657 uint16_t res; 11658 11659 res = a + b; 11660 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 11661 if (a & 0x8000) 11662 res = 0x8000; 11663 else 11664 res = 0x7fff; 11665 } 11666 return res; 11667 } 11668 11669 /* Perform 8-bit signed saturating addition. */ 11670 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 11671 { 11672 uint8_t res; 11673 11674 res = a + b; 11675 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 11676 if (a & 0x80) 11677 res = 0x80; 11678 else 11679 res = 0x7f; 11680 } 11681 return res; 11682 } 11683 11684 /* Perform 16-bit signed saturating subtraction. */ 11685 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 11686 { 11687 uint16_t res; 11688 11689 res = a - b; 11690 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 11691 if (a & 0x8000) 11692 res = 0x8000; 11693 else 11694 res = 0x7fff; 11695 } 11696 return res; 11697 } 11698 11699 /* Perform 8-bit signed saturating subtraction. */ 11700 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 11701 { 11702 uint8_t res; 11703 11704 res = a - b; 11705 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 11706 if (a & 0x80) 11707 res = 0x80; 11708 else 11709 res = 0x7f; 11710 } 11711 return res; 11712 } 11713 11714 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 11715 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 11716 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 11717 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 11718 #define PFX q 11719 11720 #include "op_addsub.h" 11721 11722 /* Unsigned saturating arithmetic. */ 11723 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 11724 { 11725 uint16_t res; 11726 res = a + b; 11727 if (res < a) 11728 res = 0xffff; 11729 return res; 11730 } 11731 11732 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 11733 { 11734 if (a > b) 11735 return a - b; 11736 else 11737 return 0; 11738 } 11739 11740 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 11741 { 11742 uint8_t res; 11743 res = a + b; 11744 if (res < a) 11745 res = 0xff; 11746 return res; 11747 } 11748 11749 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 11750 { 11751 if (a > b) 11752 return a - b; 11753 else 11754 return 0; 11755 } 11756 11757 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 11758 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 11759 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 11760 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 11761 #define PFX uq 11762 11763 #include "op_addsub.h" 11764 11765 /* Signed modulo arithmetic. */ 11766 #define SARITH16(a, b, n, op) do { \ 11767 int32_t sum; \ 11768 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 11769 RESULT(sum, n, 16); \ 11770 if (sum >= 0) \ 11771 ge |= 3 << (n * 2); \ 11772 } while(0) 11773 11774 #define SARITH8(a, b, n, op) do { \ 11775 int32_t sum; \ 11776 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 11777 RESULT(sum, n, 8); \ 11778 if (sum >= 0) \ 11779 ge |= 1 << n; \ 11780 } while(0) 11781 11782 11783 #define ADD16(a, b, n) SARITH16(a, b, n, +) 11784 #define SUB16(a, b, n) SARITH16(a, b, n, -) 11785 #define ADD8(a, b, n) SARITH8(a, b, n, +) 11786 #define SUB8(a, b, n) SARITH8(a, b, n, -) 11787 #define PFX s 11788 #define ARITH_GE 11789 11790 #include "op_addsub.h" 11791 11792 /* Unsigned modulo arithmetic. */ 11793 #define ADD16(a, b, n) do { \ 11794 uint32_t sum; \ 11795 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 11796 RESULT(sum, n, 16); \ 11797 if ((sum >> 16) == 1) \ 11798 ge |= 3 << (n * 2); \ 11799 } while(0) 11800 11801 #define ADD8(a, b, n) do { \ 11802 uint32_t sum; \ 11803 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 11804 RESULT(sum, n, 8); \ 11805 if ((sum >> 8) == 1) \ 11806 ge |= 1 << n; \ 11807 } while(0) 11808 11809 #define SUB16(a, b, n) do { \ 11810 uint32_t sum; \ 11811 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 11812 RESULT(sum, n, 16); \ 11813 if ((sum >> 16) == 0) \ 11814 ge |= 3 << (n * 2); \ 11815 } while(0) 11816 11817 #define SUB8(a, b, n) do { \ 11818 uint32_t sum; \ 11819 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 11820 RESULT(sum, n, 8); \ 11821 if ((sum >> 8) == 0) \ 11822 ge |= 1 << n; \ 11823 } while(0) 11824 11825 #define PFX u 11826 #define ARITH_GE 11827 11828 #include "op_addsub.h" 11829 11830 /* Halved signed arithmetic. */ 11831 #define ADD16(a, b, n) \ 11832 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 11833 #define SUB16(a, b, n) \ 11834 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 11835 #define ADD8(a, b, n) \ 11836 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 11837 #define SUB8(a, b, n) \ 11838 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 11839 #define PFX sh 11840 11841 #include "op_addsub.h" 11842 11843 /* Halved unsigned arithmetic. */ 11844 #define ADD16(a, b, n) \ 11845 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11846 #define SUB16(a, b, n) \ 11847 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11848 #define ADD8(a, b, n) \ 11849 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11850 #define SUB8(a, b, n) \ 11851 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11852 #define PFX uh 11853 11854 #include "op_addsub.h" 11855 11856 static inline uint8_t do_usad(uint8_t a, uint8_t b) 11857 { 11858 if (a > b) 11859 return a - b; 11860 else 11861 return b - a; 11862 } 11863 11864 /* Unsigned sum of absolute byte differences. */ 11865 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 11866 { 11867 uint32_t sum; 11868 sum = do_usad(a, b); 11869 sum += do_usad(a >> 8, b >> 8); 11870 sum += do_usad(a >> 16, b >>16); 11871 sum += do_usad(a >> 24, b >> 24); 11872 return sum; 11873 } 11874 11875 /* For ARMv6 SEL instruction. */ 11876 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 11877 { 11878 uint32_t mask; 11879 11880 mask = 0; 11881 if (flags & 1) 11882 mask |= 0xff; 11883 if (flags & 2) 11884 mask |= 0xff00; 11885 if (flags & 4) 11886 mask |= 0xff0000; 11887 if (flags & 8) 11888 mask |= 0xff000000; 11889 return (a & mask) | (b & ~mask); 11890 } 11891 11892 /* CRC helpers. 11893 * The upper bytes of val (above the number specified by 'bytes') must have 11894 * been zeroed out by the caller. 11895 */ 11896 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 11897 { 11898 uint8_t buf[4]; 11899 11900 stl_le_p(buf, val); 11901 11902 /* zlib crc32 converts the accumulator and output to one's complement. */ 11903 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 11904 } 11905 11906 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 11907 { 11908 uint8_t buf[4]; 11909 11910 stl_le_p(buf, val); 11911 11912 /* Linux crc32c converts the output to one's complement. */ 11913 return crc32c(acc, buf, bytes) ^ 0xffffffff; 11914 } 11915 11916 /* Return the exception level to which FP-disabled exceptions should 11917 * be taken, or 0 if FP is enabled. 11918 */ 11919 int fp_exception_el(CPUARMState *env, int cur_el) 11920 { 11921 #ifndef CONFIG_USER_ONLY 11922 /* CPACR and the CPTR registers don't exist before v6, so FP is 11923 * always accessible 11924 */ 11925 if (!arm_feature(env, ARM_FEATURE_V6)) { 11926 return 0; 11927 } 11928 11929 if (arm_feature(env, ARM_FEATURE_M)) { 11930 /* CPACR can cause a NOCP UsageFault taken to current security state */ 11931 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { 11932 return 1; 11933 } 11934 11935 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { 11936 if (!extract32(env->v7m.nsacr, 10, 1)) { 11937 /* FP insns cause a NOCP UsageFault taken to Secure */ 11938 return 3; 11939 } 11940 } 11941 11942 return 0; 11943 } 11944 11945 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 11946 * 0, 2 : trap EL0 and EL1/PL1 accesses 11947 * 1 : trap only EL0 accesses 11948 * 3 : trap no accesses 11949 * This register is ignored if E2H+TGE are both set. 11950 */ 11951 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 11952 int fpen = extract32(env->cp15.cpacr_el1, 20, 2); 11953 11954 switch (fpen) { 11955 case 0: 11956 case 2: 11957 if (cur_el == 0 || cur_el == 1) { 11958 /* Trap to PL1, which might be EL1 or EL3 */ 11959 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 11960 return 3; 11961 } 11962 return 1; 11963 } 11964 if (cur_el == 3 && !is_a64(env)) { 11965 /* Secure PL1 running at EL3 */ 11966 return 3; 11967 } 11968 break; 11969 case 1: 11970 if (cur_el == 0) { 11971 return 1; 11972 } 11973 break; 11974 case 3: 11975 break; 11976 } 11977 } 11978 11979 /* 11980 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode 11981 * to control non-secure access to the FPU. It doesn't have any 11982 * effect if EL3 is AArch64 or if EL3 doesn't exist at all. 11983 */ 11984 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 11985 cur_el <= 2 && !arm_is_secure_below_el3(env))) { 11986 if (!extract32(env->cp15.nsacr, 10, 1)) { 11987 /* FP insns act as UNDEF */ 11988 return cur_el == 2 ? 2 : 1; 11989 } 11990 } 11991 11992 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 11993 * check because zero bits in the registers mean "don't trap". 11994 */ 11995 11996 /* CPTR_EL2 : present in v7VE or v8 */ 11997 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 11998 && !arm_is_secure_below_el3(env)) { 11999 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 12000 return 2; 12001 } 12002 12003 /* CPTR_EL3 : present in v8 */ 12004 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 12005 /* Trap all FP ops to EL3 */ 12006 return 3; 12007 } 12008 #endif 12009 return 0; 12010 } 12011 12012 /* Return the exception level we're running at if this is our mmu_idx */ 12013 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 12014 { 12015 if (mmu_idx & ARM_MMU_IDX_M) { 12016 return mmu_idx & ARM_MMU_IDX_M_PRIV; 12017 } 12018 12019 switch (mmu_idx) { 12020 case ARMMMUIdx_E10_0: 12021 case ARMMMUIdx_E20_0: 12022 case ARMMMUIdx_SE10_0: 12023 return 0; 12024 case ARMMMUIdx_E10_1: 12025 case ARMMMUIdx_E10_1_PAN: 12026 case ARMMMUIdx_SE10_1: 12027 case ARMMMUIdx_SE10_1_PAN: 12028 return 1; 12029 case ARMMMUIdx_E2: 12030 case ARMMMUIdx_E20_2: 12031 case ARMMMUIdx_E20_2_PAN: 12032 return 2; 12033 case ARMMMUIdx_SE3: 12034 return 3; 12035 default: 12036 g_assert_not_reached(); 12037 } 12038 } 12039 12040 #ifndef CONFIG_TCG 12041 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 12042 { 12043 g_assert_not_reached(); 12044 } 12045 #endif 12046 12047 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) 12048 { 12049 if (arm_feature(env, ARM_FEATURE_M)) { 12050 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 12051 } 12052 12053 /* See ARM pseudo-function ELIsInHost. */ 12054 switch (el) { 12055 case 0: 12056 if (arm_is_secure_below_el3(env)) { 12057 return ARMMMUIdx_SE10_0; 12058 } 12059 if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE) 12060 && arm_el_is_aa64(env, 2)) { 12061 return ARMMMUIdx_E20_0; 12062 } 12063 return ARMMMUIdx_E10_0; 12064 case 1: 12065 if (arm_is_secure_below_el3(env)) { 12066 if (env->pstate & PSTATE_PAN) { 12067 return ARMMMUIdx_SE10_1_PAN; 12068 } 12069 return ARMMMUIdx_SE10_1; 12070 } 12071 if (env->pstate & PSTATE_PAN) { 12072 return ARMMMUIdx_E10_1_PAN; 12073 } 12074 return ARMMMUIdx_E10_1; 12075 case 2: 12076 /* TODO: ARMv8.4-SecEL2 */ 12077 /* Note that TGE does not apply at EL2. */ 12078 if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) { 12079 if (env->pstate & PSTATE_PAN) { 12080 return ARMMMUIdx_E20_2_PAN; 12081 } 12082 return ARMMMUIdx_E20_2; 12083 } 12084 return ARMMMUIdx_E2; 12085 case 3: 12086 return ARMMMUIdx_SE3; 12087 default: 12088 g_assert_not_reached(); 12089 } 12090 } 12091 12092 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 12093 { 12094 return arm_mmu_idx_el(env, arm_current_el(env)); 12095 } 12096 12097 int cpu_mmu_index(CPUARMState *env, bool ifetch) 12098 { 12099 return arm_to_core_mmu_idx(arm_mmu_idx(env)); 12100 } 12101 12102 #ifndef CONFIG_USER_ONLY 12103 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 12104 { 12105 return stage_1_mmu_idx(arm_mmu_idx(env)); 12106 } 12107 #endif 12108 12109 static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, 12110 ARMMMUIdx mmu_idx, uint32_t flags) 12111 { 12112 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el); 12113 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, 12114 arm_to_core_mmu_idx(mmu_idx)); 12115 12116 if (arm_singlestep_active(env)) { 12117 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1); 12118 } 12119 return flags; 12120 } 12121 12122 static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, 12123 ARMMMUIdx mmu_idx, uint32_t flags) 12124 { 12125 bool sctlr_b = arm_sctlr_b(env); 12126 12127 if (sctlr_b) { 12128 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1); 12129 } 12130 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { 12131 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 12132 } 12133 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env)); 12134 12135 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 12136 } 12137 12138 static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, 12139 ARMMMUIdx mmu_idx) 12140 { 12141 uint32_t flags = 0; 12142 12143 if (arm_v7m_is_handler_mode(env)) { 12144 flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1); 12145 } 12146 12147 /* 12148 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN 12149 * is suppressing them because the requested execution priority 12150 * is less than 0. 12151 */ 12152 if (arm_feature(env, ARM_FEATURE_V8) && 12153 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 12154 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 12155 flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1); 12156 } 12157 12158 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 12159 } 12160 12161 static uint32_t rebuild_hflags_aprofile(CPUARMState *env) 12162 { 12163 int flags = 0; 12164 12165 flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, 12166 arm_debug_target_el(env)); 12167 return flags; 12168 } 12169 12170 static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, 12171 ARMMMUIdx mmu_idx) 12172 { 12173 uint32_t flags = rebuild_hflags_aprofile(env); 12174 12175 if (arm_el_is_aa64(env, 1)) { 12176 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 12177 } 12178 12179 if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && 12180 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 12181 flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1); 12182 } 12183 12184 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 12185 } 12186 12187 static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, 12188 ARMMMUIdx mmu_idx) 12189 { 12190 uint32_t flags = rebuild_hflags_aprofile(env); 12191 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); 12192 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 12193 uint64_t sctlr; 12194 int tbii, tbid; 12195 12196 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); 12197 12198 /* Get control bits for tagged addresses. */ 12199 tbid = aa64_va_parameter_tbi(tcr, mmu_idx); 12200 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); 12201 12202 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); 12203 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid); 12204 12205 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 12206 int sve_el = sve_exception_el(env, el); 12207 uint32_t zcr_len; 12208 12209 /* 12210 * If SVE is disabled, but FP is enabled, 12211 * then the effective len is 0. 12212 */ 12213 if (sve_el != 0 && fp_el == 0) { 12214 zcr_len = 0; 12215 } else { 12216 zcr_len = sve_zcr_len_for_el(env, el); 12217 } 12218 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el); 12219 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len); 12220 } 12221 12222 sctlr = regime_sctlr(env, stage1); 12223 12224 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { 12225 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 12226 } 12227 12228 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { 12229 /* 12230 * In order to save space in flags, we record only whether 12231 * pauth is "inactive", meaning all insns are implemented as 12232 * a nop, or "active" when some action must be performed. 12233 * The decision of which action to take is left to a helper. 12234 */ 12235 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { 12236 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1); 12237 } 12238 } 12239 12240 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 12241 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ 12242 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { 12243 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1); 12244 } 12245 } 12246 12247 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ 12248 if (!(env->pstate & PSTATE_UAO)) { 12249 switch (mmu_idx) { 12250 case ARMMMUIdx_E10_1: 12251 case ARMMMUIdx_E10_1_PAN: 12252 case ARMMMUIdx_SE10_1: 12253 case ARMMMUIdx_SE10_1_PAN: 12254 /* TODO: ARMv8.3-NV */ 12255 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); 12256 break; 12257 case ARMMMUIdx_E20_2: 12258 case ARMMMUIdx_E20_2_PAN: 12259 /* TODO: ARMv8.4-SecEL2 */ 12260 /* 12261 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is 12262 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. 12263 */ 12264 if (env->cp15.hcr_el2 & HCR_TGE) { 12265 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); 12266 } 12267 break; 12268 default: 12269 break; 12270 } 12271 } 12272 12273 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 12274 } 12275 12276 static uint32_t rebuild_hflags_internal(CPUARMState *env) 12277 { 12278 int el = arm_current_el(env); 12279 int fp_el = fp_exception_el(env, el); 12280 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12281 12282 if (is_a64(env)) { 12283 return rebuild_hflags_a64(env, el, fp_el, mmu_idx); 12284 } else if (arm_feature(env, ARM_FEATURE_M)) { 12285 return rebuild_hflags_m32(env, fp_el, mmu_idx); 12286 } else { 12287 return rebuild_hflags_a32(env, fp_el, mmu_idx); 12288 } 12289 } 12290 12291 void arm_rebuild_hflags(CPUARMState *env) 12292 { 12293 env->hflags = rebuild_hflags_internal(env); 12294 } 12295 12296 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) 12297 { 12298 int fp_el = fp_exception_el(env, el); 12299 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12300 12301 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 12302 } 12303 12304 /* 12305 * If we have triggered a EL state change we can't rely on the 12306 * translator having passed it too us, we need to recompute. 12307 */ 12308 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) 12309 { 12310 int el = arm_current_el(env); 12311 int fp_el = fp_exception_el(env, el); 12312 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12313 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 12314 } 12315 12316 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) 12317 { 12318 int fp_el = fp_exception_el(env, el); 12319 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12320 12321 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 12322 } 12323 12324 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) 12325 { 12326 int fp_el = fp_exception_el(env, el); 12327 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12328 12329 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); 12330 } 12331 12332 static inline void assert_hflags_rebuild_correctly(CPUARMState *env) 12333 { 12334 #ifdef CONFIG_DEBUG_TCG 12335 uint32_t env_flags_current = env->hflags; 12336 uint32_t env_flags_rebuilt = rebuild_hflags_internal(env); 12337 12338 if (unlikely(env_flags_current != env_flags_rebuilt)) { 12339 fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", 12340 env_flags_current, env_flags_rebuilt); 12341 abort(); 12342 } 12343 #endif 12344 } 12345 12346 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12347 target_ulong *cs_base, uint32_t *pflags) 12348 { 12349 uint32_t flags = env->hflags; 12350 uint32_t pstate_for_ss; 12351 12352 *cs_base = 0; 12353 assert_hflags_rebuild_correctly(env); 12354 12355 if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) { 12356 *pc = env->pc; 12357 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 12358 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype); 12359 } 12360 pstate_for_ss = env->pstate; 12361 } else { 12362 *pc = env->regs[15]; 12363 12364 if (arm_feature(env, ARM_FEATURE_M)) { 12365 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 12366 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) 12367 != env->v7m.secure) { 12368 flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1); 12369 } 12370 12371 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && 12372 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || 12373 (env->v7m.secure && 12374 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { 12375 /* 12376 * ASPEN is set, but FPCA/SFPA indicate that there is no 12377 * active FP context; we must create a new FP context before 12378 * executing any FP insn. 12379 */ 12380 flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1); 12381 } 12382 12383 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 12384 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { 12385 flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1); 12386 } 12387 } else { 12388 /* 12389 * Note that XSCALE_CPAR shares bits with VECSTRIDE. 12390 * Note that VECLEN+VECSTRIDE are RES0 for M-profile. 12391 */ 12392 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 12393 flags = FIELD_DP32(flags, TBFLAG_A32, 12394 XSCALE_CPAR, env->cp15.c15_cpar); 12395 } else { 12396 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, 12397 env->vfp.vec_len); 12398 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, 12399 env->vfp.vec_stride); 12400 } 12401 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { 12402 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 12403 } 12404 } 12405 12406 flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb); 12407 flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits); 12408 pstate_for_ss = env->uncached_cpsr; 12409 } 12410 12411 /* 12412 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12413 * states defined in the ARM ARM for software singlestep: 12414 * SS_ACTIVE PSTATE.SS State 12415 * 0 x Inactive (the TB flag for SS is always 0) 12416 * 1 0 Active-pending 12417 * 1 1 Active-not-pending 12418 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB. 12419 */ 12420 if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) && 12421 (pstate_for_ss & PSTATE_SS)) { 12422 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); 12423 } 12424 12425 *pflags = flags; 12426 } 12427 12428 #ifdef TARGET_AARCH64 12429 /* 12430 * The manual says that when SVE is enabled and VQ is widened the 12431 * implementation is allowed to zero the previously inaccessible 12432 * portion of the registers. The corollary to that is that when 12433 * SVE is enabled and VQ is narrowed we are also allowed to zero 12434 * the now inaccessible portion of the registers. 12435 * 12436 * The intent of this is that no predicate bit beyond VQ is ever set. 12437 * Which means that some operations on predicate registers themselves 12438 * may operate on full uint64_t or even unrolled across the maximum 12439 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 12440 * may well be cheaper than conditionals to restrict the operation 12441 * to the relevant portion of a uint16_t[16]. 12442 */ 12443 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 12444 { 12445 int i, j; 12446 uint64_t pmask; 12447 12448 assert(vq >= 1 && vq <= ARM_MAX_VQ); 12449 assert(vq <= env_archcpu(env)->sve_max_vq); 12450 12451 /* Zap the high bits of the zregs. */ 12452 for (i = 0; i < 32; i++) { 12453 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 12454 } 12455 12456 /* Zap the high bits of the pregs and ffr. */ 12457 pmask = 0; 12458 if (vq & 3) { 12459 pmask = ~(-1ULL << (16 * (vq & 3))); 12460 } 12461 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 12462 for (i = 0; i < 17; ++i) { 12463 env->vfp.pregs[i].p[j] &= pmask; 12464 } 12465 pmask = 0; 12466 } 12467 } 12468 12469 /* 12470 * Notice a change in SVE vector size when changing EL. 12471 */ 12472 void aarch64_sve_change_el(CPUARMState *env, int old_el, 12473 int new_el, bool el0_a64) 12474 { 12475 ARMCPU *cpu = env_archcpu(env); 12476 int old_len, new_len; 12477 bool old_a64, new_a64; 12478 12479 /* Nothing to do if no SVE. */ 12480 if (!cpu_isar_feature(aa64_sve, cpu)) { 12481 return; 12482 } 12483 12484 /* Nothing to do if FP is disabled in either EL. */ 12485 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 12486 return; 12487 } 12488 12489 /* 12490 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 12491 * at ELx, or not available because the EL is in AArch32 state, then 12492 * for all purposes other than a direct read, the ZCR_ELx.LEN field 12493 * has an effective value of 0". 12494 * 12495 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 12496 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 12497 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 12498 * we already have the correct register contents when encountering the 12499 * vq0->vq0 transition between EL0->EL1. 12500 */ 12501 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 12502 old_len = (old_a64 && !sve_exception_el(env, old_el) 12503 ? sve_zcr_len_for_el(env, old_el) : 0); 12504 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 12505 new_len = (new_a64 && !sve_exception_el(env, new_el) 12506 ? sve_zcr_len_for_el(env, new_el) : 0); 12507 12508 /* When changing vector length, clear inaccessible state. */ 12509 if (new_len < old_len) { 12510 aarch64_sve_narrow_vq(env, new_len + 1); 12511 } 12512 } 12513 #endif 12514