1 /* 2 * ARM generic helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/units.h" 11 #include "target/arm/idau.h" 12 #include "trace.h" 13 #include "cpu.h" 14 #include "internals.h" 15 #include "exec/gdbstub.h" 16 #include "exec/helper-proto.h" 17 #include "qemu/host-utils.h" 18 #include "qemu/main-loop.h" 19 #include "qemu/bitops.h" 20 #include "qemu/crc32c.h" 21 #include "qemu/qemu-print.h" 22 #include "exec/exec-all.h" 23 #include <zlib.h> /* For crc32 */ 24 #include "hw/irq.h" 25 #include "hw/semihosting/semihost.h" 26 #include "sysemu/cpus.h" 27 #include "sysemu/kvm.h" 28 #include "sysemu/tcg.h" 29 #include "qemu/range.h" 30 #include "qapi/qapi-commands-machine-target.h" 31 #include "qapi/error.h" 32 #include "qemu/guest-random.h" 33 #ifdef CONFIG_TCG 34 #include "arm_ldst.h" 35 #include "exec/cpu_ldst.h" 36 #endif 37 38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 39 40 #ifndef CONFIG_USER_ONLY 41 42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 43 MMUAccessType access_type, ARMMMUIdx mmu_idx, 44 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 45 target_ulong *page_size_ptr, 46 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 47 #endif 48 49 static void switch_mode(CPUARMState *env, int mode); 50 51 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 52 { 53 int nregs; 54 55 /* VFP data registers are always little-endian. */ 56 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 57 if (reg < nregs) { 58 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 59 return 8; 60 } 61 if (arm_feature(env, ARM_FEATURE_NEON)) { 62 /* Aliases for Q regs. */ 63 nregs += 16; 64 if (reg < nregs) { 65 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 66 stq_le_p(buf, q[0]); 67 stq_le_p(buf + 8, q[1]); 68 return 16; 69 } 70 } 71 switch (reg - nregs) { 72 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 73 case 1: stl_p(buf, vfp_get_fpscr(env)); return 4; 74 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 75 } 76 return 0; 77 } 78 79 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 80 { 81 int nregs; 82 83 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 84 if (reg < nregs) { 85 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 86 return 8; 87 } 88 if (arm_feature(env, ARM_FEATURE_NEON)) { 89 nregs += 16; 90 if (reg < nregs) { 91 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 92 q[0] = ldq_le_p(buf); 93 q[1] = ldq_le_p(buf + 8); 94 return 16; 95 } 96 } 97 switch (reg - nregs) { 98 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 99 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; 100 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 101 } 102 return 0; 103 } 104 105 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 106 { 107 switch (reg) { 108 case 0 ... 31: 109 /* 128 bit FP register */ 110 { 111 uint64_t *q = aa64_vfp_qreg(env, reg); 112 stq_le_p(buf, q[0]); 113 stq_le_p(buf + 8, q[1]); 114 return 16; 115 } 116 case 32: 117 /* FPSR */ 118 stl_p(buf, vfp_get_fpsr(env)); 119 return 4; 120 case 33: 121 /* FPCR */ 122 stl_p(buf, vfp_get_fpcr(env)); 123 return 4; 124 default: 125 return 0; 126 } 127 } 128 129 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 130 { 131 switch (reg) { 132 case 0 ... 31: 133 /* 128 bit FP register */ 134 { 135 uint64_t *q = aa64_vfp_qreg(env, reg); 136 q[0] = ldq_le_p(buf); 137 q[1] = ldq_le_p(buf + 8); 138 return 16; 139 } 140 case 32: 141 /* FPSR */ 142 vfp_set_fpsr(env, ldl_p(buf)); 143 return 4; 144 case 33: 145 /* FPCR */ 146 vfp_set_fpcr(env, ldl_p(buf)); 147 return 4; 148 default: 149 return 0; 150 } 151 } 152 153 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 154 { 155 assert(ri->fieldoffset); 156 if (cpreg_field_is_64bit(ri)) { 157 return CPREG_FIELD64(env, ri); 158 } else { 159 return CPREG_FIELD32(env, ri); 160 } 161 } 162 163 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 164 uint64_t value) 165 { 166 assert(ri->fieldoffset); 167 if (cpreg_field_is_64bit(ri)) { 168 CPREG_FIELD64(env, ri) = value; 169 } else { 170 CPREG_FIELD32(env, ri) = value; 171 } 172 } 173 174 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 175 { 176 return (char *)env + ri->fieldoffset; 177 } 178 179 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 180 { 181 /* Raw read of a coprocessor register (as needed for migration, etc). */ 182 if (ri->type & ARM_CP_CONST) { 183 return ri->resetvalue; 184 } else if (ri->raw_readfn) { 185 return ri->raw_readfn(env, ri); 186 } else if (ri->readfn) { 187 return ri->readfn(env, ri); 188 } else { 189 return raw_read(env, ri); 190 } 191 } 192 193 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 194 uint64_t v) 195 { 196 /* Raw write of a coprocessor register (as needed for migration, etc). 197 * Note that constant registers are treated as write-ignored; the 198 * caller should check for success by whether a readback gives the 199 * value written. 200 */ 201 if (ri->type & ARM_CP_CONST) { 202 return; 203 } else if (ri->raw_writefn) { 204 ri->raw_writefn(env, ri, v); 205 } else if (ri->writefn) { 206 ri->writefn(env, ri, v); 207 } else { 208 raw_write(env, ri, v); 209 } 210 } 211 212 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 213 { 214 ARMCPU *cpu = env_archcpu(env); 215 const ARMCPRegInfo *ri; 216 uint32_t key; 217 218 key = cpu->dyn_xml.cpregs_keys[reg]; 219 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 220 if (ri) { 221 if (cpreg_field_is_64bit(ri)) { 222 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 223 } else { 224 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 225 } 226 } 227 return 0; 228 } 229 230 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 231 { 232 return 0; 233 } 234 235 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 236 { 237 /* Return true if the regdef would cause an assertion if you called 238 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 239 * program bug for it not to have the NO_RAW flag). 240 * NB that returning false here doesn't necessarily mean that calling 241 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 242 * read/write access functions which are safe for raw use" from "has 243 * read/write access functions which have side effects but has forgotten 244 * to provide raw access functions". 245 * The tests here line up with the conditions in read/write_raw_cp_reg() 246 * and assertions in raw_read()/raw_write(). 247 */ 248 if ((ri->type & ARM_CP_CONST) || 249 ri->fieldoffset || 250 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 251 return false; 252 } 253 return true; 254 } 255 256 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 257 { 258 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 259 int i; 260 bool ok = true; 261 262 for (i = 0; i < cpu->cpreg_array_len; i++) { 263 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 264 const ARMCPRegInfo *ri; 265 uint64_t newval; 266 267 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 268 if (!ri) { 269 ok = false; 270 continue; 271 } 272 if (ri->type & ARM_CP_NO_RAW) { 273 continue; 274 } 275 276 newval = read_raw_cp_reg(&cpu->env, ri); 277 if (kvm_sync) { 278 /* 279 * Only sync if the previous list->cpustate sync succeeded. 280 * Rather than tracking the success/failure state for every 281 * item in the list, we just recheck "does the raw write we must 282 * have made in write_list_to_cpustate() read back OK" here. 283 */ 284 uint64_t oldval = cpu->cpreg_values[i]; 285 286 if (oldval == newval) { 287 continue; 288 } 289 290 write_raw_cp_reg(&cpu->env, ri, oldval); 291 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 292 continue; 293 } 294 295 write_raw_cp_reg(&cpu->env, ri, newval); 296 } 297 cpu->cpreg_values[i] = newval; 298 } 299 return ok; 300 } 301 302 bool write_list_to_cpustate(ARMCPU *cpu) 303 { 304 int i; 305 bool ok = true; 306 307 for (i = 0; i < cpu->cpreg_array_len; i++) { 308 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 309 uint64_t v = cpu->cpreg_values[i]; 310 const ARMCPRegInfo *ri; 311 312 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 313 if (!ri) { 314 ok = false; 315 continue; 316 } 317 if (ri->type & ARM_CP_NO_RAW) { 318 continue; 319 } 320 /* Write value and confirm it reads back as written 321 * (to catch read-only registers and partially read-only 322 * registers where the incoming migration value doesn't match) 323 */ 324 write_raw_cp_reg(&cpu->env, ri, v); 325 if (read_raw_cp_reg(&cpu->env, ri) != v) { 326 ok = false; 327 } 328 } 329 return ok; 330 } 331 332 static void add_cpreg_to_list(gpointer key, gpointer opaque) 333 { 334 ARMCPU *cpu = opaque; 335 uint64_t regidx; 336 const ARMCPRegInfo *ri; 337 338 regidx = *(uint32_t *)key; 339 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 340 341 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 342 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 343 /* The value array need not be initialized at this point */ 344 cpu->cpreg_array_len++; 345 } 346 } 347 348 static void count_cpreg(gpointer key, gpointer opaque) 349 { 350 ARMCPU *cpu = opaque; 351 uint64_t regidx; 352 const ARMCPRegInfo *ri; 353 354 regidx = *(uint32_t *)key; 355 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 356 357 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 358 cpu->cpreg_array_len++; 359 } 360 } 361 362 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 363 { 364 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 365 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 366 367 if (aidx > bidx) { 368 return 1; 369 } 370 if (aidx < bidx) { 371 return -1; 372 } 373 return 0; 374 } 375 376 void init_cpreg_list(ARMCPU *cpu) 377 { 378 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 379 * Note that we require cpreg_tuples[] to be sorted by key ID. 380 */ 381 GList *keys; 382 int arraylen; 383 384 keys = g_hash_table_get_keys(cpu->cp_regs); 385 keys = g_list_sort(keys, cpreg_key_compare); 386 387 cpu->cpreg_array_len = 0; 388 389 g_list_foreach(keys, count_cpreg, cpu); 390 391 arraylen = cpu->cpreg_array_len; 392 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 393 cpu->cpreg_values = g_new(uint64_t, arraylen); 394 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 395 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 396 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 397 cpu->cpreg_array_len = 0; 398 399 g_list_foreach(keys, add_cpreg_to_list, cpu); 400 401 assert(cpu->cpreg_array_len == arraylen); 402 403 g_list_free(keys); 404 } 405 406 /* 407 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 408 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 409 * 410 * access_el3_aa32ns: Used to check AArch32 register views. 411 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 412 */ 413 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 414 const ARMCPRegInfo *ri, 415 bool isread) 416 { 417 bool secure = arm_is_secure_below_el3(env); 418 419 assert(!arm_el_is_aa64(env, 3)); 420 if (secure) { 421 return CP_ACCESS_TRAP_UNCATEGORIZED; 422 } 423 return CP_ACCESS_OK; 424 } 425 426 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 427 const ARMCPRegInfo *ri, 428 bool isread) 429 { 430 if (!arm_el_is_aa64(env, 3)) { 431 return access_el3_aa32ns(env, ri, isread); 432 } 433 return CP_ACCESS_OK; 434 } 435 436 /* Some secure-only AArch32 registers trap to EL3 if used from 437 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 438 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 439 * We assume that the .access field is set to PL1_RW. 440 */ 441 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 442 const ARMCPRegInfo *ri, 443 bool isread) 444 { 445 if (arm_current_el(env) == 3) { 446 return CP_ACCESS_OK; 447 } 448 if (arm_is_secure_below_el3(env)) { 449 return CP_ACCESS_TRAP_EL3; 450 } 451 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 452 return CP_ACCESS_TRAP_UNCATEGORIZED; 453 } 454 455 /* Check for traps to "powerdown debug" registers, which are controlled 456 * by MDCR.TDOSA 457 */ 458 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 459 bool isread) 460 { 461 int el = arm_current_el(env); 462 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || 463 (env->cp15.mdcr_el2 & MDCR_TDE) || 464 (arm_hcr_el2_eff(env) & HCR_TGE); 465 466 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { 467 return CP_ACCESS_TRAP_EL2; 468 } 469 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 470 return CP_ACCESS_TRAP_EL3; 471 } 472 return CP_ACCESS_OK; 473 } 474 475 /* Check for traps to "debug ROM" registers, which are controlled 476 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 477 */ 478 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 479 bool isread) 480 { 481 int el = arm_current_el(env); 482 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || 483 (env->cp15.mdcr_el2 & MDCR_TDE) || 484 (arm_hcr_el2_eff(env) & HCR_TGE); 485 486 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { 487 return CP_ACCESS_TRAP_EL2; 488 } 489 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 490 return CP_ACCESS_TRAP_EL3; 491 } 492 return CP_ACCESS_OK; 493 } 494 495 /* Check for traps to general debug registers, which are controlled 496 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 497 */ 498 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 499 bool isread) 500 { 501 int el = arm_current_el(env); 502 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || 503 (env->cp15.mdcr_el2 & MDCR_TDE) || 504 (arm_hcr_el2_eff(env) & HCR_TGE); 505 506 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { 507 return CP_ACCESS_TRAP_EL2; 508 } 509 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 510 return CP_ACCESS_TRAP_EL3; 511 } 512 return CP_ACCESS_OK; 513 } 514 515 /* Check for traps to performance monitor registers, which are controlled 516 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 517 */ 518 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 519 bool isread) 520 { 521 int el = arm_current_el(env); 522 523 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 524 && !arm_is_secure_below_el3(env)) { 525 return CP_ACCESS_TRAP_EL2; 526 } 527 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 528 return CP_ACCESS_TRAP_EL3; 529 } 530 return CP_ACCESS_OK; 531 } 532 533 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 534 { 535 ARMCPU *cpu = env_archcpu(env); 536 537 raw_write(env, ri, value); 538 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 539 } 540 541 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 542 { 543 ARMCPU *cpu = env_archcpu(env); 544 545 if (raw_read(env, ri) != value) { 546 /* Unlike real hardware the qemu TLB uses virtual addresses, 547 * not modified virtual addresses, so this causes a TLB flush. 548 */ 549 tlb_flush(CPU(cpu)); 550 raw_write(env, ri, value); 551 } 552 } 553 554 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 555 uint64_t value) 556 { 557 ARMCPU *cpu = env_archcpu(env); 558 559 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 560 && !extended_addresses_enabled(env)) { 561 /* For VMSA (when not using the LPAE long descriptor page table 562 * format) this register includes the ASID, so do a TLB flush. 563 * For PMSA it is purely a process ID and no action is needed. 564 */ 565 tlb_flush(CPU(cpu)); 566 } 567 raw_write(env, ri, value); 568 } 569 570 /* IS variants of TLB operations must affect all cores */ 571 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 572 uint64_t value) 573 { 574 CPUState *cs = env_cpu(env); 575 576 tlb_flush_all_cpus_synced(cs); 577 } 578 579 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 580 uint64_t value) 581 { 582 CPUState *cs = env_cpu(env); 583 584 tlb_flush_all_cpus_synced(cs); 585 } 586 587 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 588 uint64_t value) 589 { 590 CPUState *cs = env_cpu(env); 591 592 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 593 } 594 595 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 596 uint64_t value) 597 { 598 CPUState *cs = env_cpu(env); 599 600 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 601 } 602 603 /* 604 * Non-IS variants of TLB operations are upgraded to 605 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to 606 * force broadcast of these operations. 607 */ 608 static bool tlb_force_broadcast(CPUARMState *env) 609 { 610 return (env->cp15.hcr_el2 & HCR_FB) && 611 arm_current_el(env) == 1 && arm_is_secure_below_el3(env); 612 } 613 614 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 615 uint64_t value) 616 { 617 /* Invalidate all (TLBIALL) */ 618 CPUState *cs = env_cpu(env); 619 620 if (tlb_force_broadcast(env)) { 621 tlb_flush_all_cpus_synced(cs); 622 } else { 623 tlb_flush(cs); 624 } 625 } 626 627 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 628 uint64_t value) 629 { 630 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 631 CPUState *cs = env_cpu(env); 632 633 value &= TARGET_PAGE_MASK; 634 if (tlb_force_broadcast(env)) { 635 tlb_flush_page_all_cpus_synced(cs, value); 636 } else { 637 tlb_flush_page(cs, value); 638 } 639 } 640 641 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 642 uint64_t value) 643 { 644 /* Invalidate by ASID (TLBIASID) */ 645 CPUState *cs = env_cpu(env); 646 647 if (tlb_force_broadcast(env)) { 648 tlb_flush_all_cpus_synced(cs); 649 } else { 650 tlb_flush(cs); 651 } 652 } 653 654 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 655 uint64_t value) 656 { 657 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 658 CPUState *cs = env_cpu(env); 659 660 value &= TARGET_PAGE_MASK; 661 if (tlb_force_broadcast(env)) { 662 tlb_flush_page_all_cpus_synced(cs, value); 663 } else { 664 tlb_flush_page(cs, value); 665 } 666 } 667 668 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 669 uint64_t value) 670 { 671 CPUState *cs = env_cpu(env); 672 673 tlb_flush_by_mmuidx(cs, 674 ARMMMUIdxBit_E10_1 | 675 ARMMMUIdxBit_E10_1_PAN | 676 ARMMMUIdxBit_E10_0 | 677 ARMMMUIdxBit_Stage2); 678 } 679 680 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 681 uint64_t value) 682 { 683 CPUState *cs = env_cpu(env); 684 685 tlb_flush_by_mmuidx_all_cpus_synced(cs, 686 ARMMMUIdxBit_E10_1 | 687 ARMMMUIdxBit_E10_1_PAN | 688 ARMMMUIdxBit_E10_0 | 689 ARMMMUIdxBit_Stage2); 690 } 691 692 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 693 uint64_t value) 694 { 695 /* Invalidate by IPA. This has to invalidate any structures that 696 * contain only stage 2 translation information, but does not need 697 * to apply to structures that contain combined stage 1 and stage 2 698 * translation information. 699 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 700 */ 701 CPUState *cs = env_cpu(env); 702 uint64_t pageaddr; 703 704 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 705 return; 706 } 707 708 pageaddr = sextract64(value << 12, 0, 40); 709 710 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); 711 } 712 713 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 714 uint64_t value) 715 { 716 CPUState *cs = env_cpu(env); 717 uint64_t pageaddr; 718 719 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 720 return; 721 } 722 723 pageaddr = sextract64(value << 12, 0, 40); 724 725 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 726 ARMMMUIdxBit_Stage2); 727 } 728 729 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 730 uint64_t value) 731 { 732 CPUState *cs = env_cpu(env); 733 734 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); 735 } 736 737 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 738 uint64_t value) 739 { 740 CPUState *cs = env_cpu(env); 741 742 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); 743 } 744 745 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 746 uint64_t value) 747 { 748 CPUState *cs = env_cpu(env); 749 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 750 751 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); 752 } 753 754 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 755 uint64_t value) 756 { 757 CPUState *cs = env_cpu(env); 758 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 759 760 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 761 ARMMMUIdxBit_E2); 762 } 763 764 static const ARMCPRegInfo cp_reginfo[] = { 765 /* Define the secure and non-secure FCSE identifier CP registers 766 * separately because there is no secure bank in V8 (no _EL3). This allows 767 * the secure register to be properly reset and migrated. There is also no 768 * v8 EL1 version of the register so the non-secure instance stands alone. 769 */ 770 { .name = "FCSEIDR", 771 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 772 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 773 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 774 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 775 { .name = "FCSEIDR_S", 776 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 777 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 778 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 779 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 780 /* Define the secure and non-secure context identifier CP registers 781 * separately because there is no secure bank in V8 (no _EL3). This allows 782 * the secure register to be properly reset and migrated. In the 783 * non-secure case, the 32-bit register will have reset and migration 784 * disabled during registration as it is handled by the 64-bit instance. 785 */ 786 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 787 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 788 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 789 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 790 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 791 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 792 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 793 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 794 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 795 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 796 REGINFO_SENTINEL 797 }; 798 799 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 800 /* NB: Some of these registers exist in v8 but with more precise 801 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 802 */ 803 /* MMU Domain access control / MPU write buffer control */ 804 { .name = "DACR", 805 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 806 .access = PL1_RW, .resetvalue = 0, 807 .writefn = dacr_write, .raw_writefn = raw_write, 808 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 809 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 810 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 811 * For v6 and v5, these mappings are overly broad. 812 */ 813 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 814 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 815 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 816 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 817 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 818 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 819 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 820 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 821 /* Cache maintenance ops; some of this space may be overridden later. */ 822 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 823 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 824 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 825 REGINFO_SENTINEL 826 }; 827 828 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 829 /* Not all pre-v6 cores implemented this WFI, so this is slightly 830 * over-broad. 831 */ 832 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 833 .access = PL1_W, .type = ARM_CP_WFI }, 834 REGINFO_SENTINEL 835 }; 836 837 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 838 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 839 * is UNPREDICTABLE; we choose to NOP as most implementations do). 840 */ 841 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 842 .access = PL1_W, .type = ARM_CP_WFI }, 843 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 844 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 845 * OMAPCP will override this space. 846 */ 847 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 848 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 849 .resetvalue = 0 }, 850 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 851 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 852 .resetvalue = 0 }, 853 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 854 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 855 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 856 .resetvalue = 0 }, 857 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 858 * implementing it as RAZ means the "debug architecture version" bits 859 * will read as a reserved value, which should cause Linux to not try 860 * to use the debug hardware. 861 */ 862 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 863 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 864 /* MMU TLB control. Note that the wildcarding means we cover not just 865 * the unified TLB ops but also the dside/iside/inner-shareable variants. 866 */ 867 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 868 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 869 .type = ARM_CP_NO_RAW }, 870 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 871 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 872 .type = ARM_CP_NO_RAW }, 873 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 874 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 875 .type = ARM_CP_NO_RAW }, 876 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 877 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 878 .type = ARM_CP_NO_RAW }, 879 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 880 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 881 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 882 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 883 REGINFO_SENTINEL 884 }; 885 886 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 887 uint64_t value) 888 { 889 uint32_t mask = 0; 890 891 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 892 if (!arm_feature(env, ARM_FEATURE_V8)) { 893 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 894 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 895 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 896 */ 897 if (arm_feature(env, ARM_FEATURE_VFP)) { 898 /* VFP coprocessor: cp10 & cp11 [23:20] */ 899 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 900 901 if (!arm_feature(env, ARM_FEATURE_NEON)) { 902 /* ASEDIS [31] bit is RAO/WI */ 903 value |= (1 << 31); 904 } 905 906 /* VFPv3 and upwards with NEON implement 32 double precision 907 * registers (D0-D31). 908 */ 909 if (!arm_feature(env, ARM_FEATURE_NEON) || 910 !arm_feature(env, ARM_FEATURE_VFP3)) { 911 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 912 value |= (1 << 30); 913 } 914 } 915 value &= mask; 916 } 917 918 /* 919 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 920 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 921 */ 922 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 923 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 924 value &= ~(0xf << 20); 925 value |= env->cp15.cpacr_el1 & (0xf << 20); 926 } 927 928 env->cp15.cpacr_el1 = value; 929 } 930 931 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) 932 { 933 /* 934 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 935 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 936 */ 937 uint64_t value = env->cp15.cpacr_el1; 938 939 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 940 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 941 value &= ~(0xf << 20); 942 } 943 return value; 944 } 945 946 947 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 948 { 949 /* Call cpacr_write() so that we reset with the correct RAO bits set 950 * for our CPU features. 951 */ 952 cpacr_write(env, ri, 0); 953 } 954 955 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 956 bool isread) 957 { 958 if (arm_feature(env, ARM_FEATURE_V8)) { 959 /* Check if CPACR accesses are to be trapped to EL2 */ 960 if (arm_current_el(env) == 1 && 961 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 962 return CP_ACCESS_TRAP_EL2; 963 /* Check if CPACR accesses are to be trapped to EL3 */ 964 } else if (arm_current_el(env) < 3 && 965 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 966 return CP_ACCESS_TRAP_EL3; 967 } 968 } 969 970 return CP_ACCESS_OK; 971 } 972 973 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 974 bool isread) 975 { 976 /* Check if CPTR accesses are set to trap to EL3 */ 977 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 978 return CP_ACCESS_TRAP_EL3; 979 } 980 981 return CP_ACCESS_OK; 982 } 983 984 static const ARMCPRegInfo v6_cp_reginfo[] = { 985 /* prefetch by MVA in v6, NOP in v7 */ 986 { .name = "MVA_prefetch", 987 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 988 .access = PL1_W, .type = ARM_CP_NOP }, 989 /* We need to break the TB after ISB to execute self-modifying code 990 * correctly and also to take any pending interrupts immediately. 991 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 992 */ 993 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 994 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 995 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 996 .access = PL0_W, .type = ARM_CP_NOP }, 997 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 998 .access = PL0_W, .type = ARM_CP_NOP }, 999 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 1000 .access = PL1_RW, 1001 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 1002 offsetof(CPUARMState, cp15.ifar_ns) }, 1003 .resetvalue = 0, }, 1004 /* Watchpoint Fault Address Register : should actually only be present 1005 * for 1136, 1176, 11MPCore. 1006 */ 1007 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 1008 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 1009 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 1010 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 1011 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 1012 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, 1013 REGINFO_SENTINEL 1014 }; 1015 1016 /* Definitions for the PMU registers */ 1017 #define PMCRN_MASK 0xf800 1018 #define PMCRN_SHIFT 11 1019 #define PMCRLC 0x40 1020 #define PMCRDP 0x10 1021 #define PMCRD 0x8 1022 #define PMCRC 0x4 1023 #define PMCRP 0x2 1024 #define PMCRE 0x1 1025 1026 #define PMXEVTYPER_P 0x80000000 1027 #define PMXEVTYPER_U 0x40000000 1028 #define PMXEVTYPER_NSK 0x20000000 1029 #define PMXEVTYPER_NSU 0x10000000 1030 #define PMXEVTYPER_NSH 0x08000000 1031 #define PMXEVTYPER_M 0x04000000 1032 #define PMXEVTYPER_MT 0x02000000 1033 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1034 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1035 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1036 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1037 PMXEVTYPER_EVTCOUNT) 1038 1039 #define PMCCFILTR 0xf8000000 1040 #define PMCCFILTR_M PMXEVTYPER_M 1041 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1042 1043 static inline uint32_t pmu_num_counters(CPUARMState *env) 1044 { 1045 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 1046 } 1047 1048 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1049 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1050 { 1051 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 1052 } 1053 1054 typedef struct pm_event { 1055 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 1056 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 1057 bool (*supported)(CPUARMState *); 1058 /* 1059 * Retrieve the current count of the underlying event. The programmed 1060 * counters hold a difference from the return value from this function 1061 */ 1062 uint64_t (*get_count)(CPUARMState *); 1063 /* 1064 * Return how many nanoseconds it will take (at a minimum) for count events 1065 * to occur. A negative value indicates the counter will never overflow, or 1066 * that the counter has otherwise arranged for the overflow bit to be set 1067 * and the PMU interrupt to be raised on overflow. 1068 */ 1069 int64_t (*ns_per_count)(uint64_t); 1070 } pm_event; 1071 1072 static bool event_always_supported(CPUARMState *env) 1073 { 1074 return true; 1075 } 1076 1077 static uint64_t swinc_get_count(CPUARMState *env) 1078 { 1079 /* 1080 * SW_INCR events are written directly to the pmevcntr's by writes to 1081 * PMSWINC, so there is no underlying count maintained by the PMU itself 1082 */ 1083 return 0; 1084 } 1085 1086 static int64_t swinc_ns_per(uint64_t ignored) 1087 { 1088 return -1; 1089 } 1090 1091 /* 1092 * Return the underlying cycle count for the PMU cycle counters. If we're in 1093 * usermode, simply return 0. 1094 */ 1095 static uint64_t cycles_get_count(CPUARMState *env) 1096 { 1097 #ifndef CONFIG_USER_ONLY 1098 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1099 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1100 #else 1101 return cpu_get_host_ticks(); 1102 #endif 1103 } 1104 1105 #ifndef CONFIG_USER_ONLY 1106 static int64_t cycles_ns_per(uint64_t cycles) 1107 { 1108 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 1109 } 1110 1111 static bool instructions_supported(CPUARMState *env) 1112 { 1113 return use_icount == 1 /* Precise instruction counting */; 1114 } 1115 1116 static uint64_t instructions_get_count(CPUARMState *env) 1117 { 1118 return (uint64_t)cpu_get_icount_raw(); 1119 } 1120 1121 static int64_t instructions_ns_per(uint64_t icount) 1122 { 1123 return cpu_icount_to_ns((int64_t)icount); 1124 } 1125 #endif 1126 1127 static const pm_event pm_events[] = { 1128 { .number = 0x000, /* SW_INCR */ 1129 .supported = event_always_supported, 1130 .get_count = swinc_get_count, 1131 .ns_per_count = swinc_ns_per, 1132 }, 1133 #ifndef CONFIG_USER_ONLY 1134 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 1135 .supported = instructions_supported, 1136 .get_count = instructions_get_count, 1137 .ns_per_count = instructions_ns_per, 1138 }, 1139 { .number = 0x011, /* CPU_CYCLES, Cycle */ 1140 .supported = event_always_supported, 1141 .get_count = cycles_get_count, 1142 .ns_per_count = cycles_ns_per, 1143 } 1144 #endif 1145 }; 1146 1147 /* 1148 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 1149 * events (i.e. the statistical profiling extension), this implementation 1150 * should first be updated to something sparse instead of the current 1151 * supported_event_map[] array. 1152 */ 1153 #define MAX_EVENT_ID 0x11 1154 #define UNSUPPORTED_EVENT UINT16_MAX 1155 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 1156 1157 /* 1158 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 1159 * of ARM event numbers to indices in our pm_events array. 1160 * 1161 * Note: Events in the 0x40XX range are not currently supported. 1162 */ 1163 void pmu_init(ARMCPU *cpu) 1164 { 1165 unsigned int i; 1166 1167 /* 1168 * Empty supported_event_map and cpu->pmceid[01] before adding supported 1169 * events to them 1170 */ 1171 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 1172 supported_event_map[i] = UNSUPPORTED_EVENT; 1173 } 1174 cpu->pmceid0 = 0; 1175 cpu->pmceid1 = 0; 1176 1177 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 1178 const pm_event *cnt = &pm_events[i]; 1179 assert(cnt->number <= MAX_EVENT_ID); 1180 /* We do not currently support events in the 0x40xx range */ 1181 assert(cnt->number <= 0x3f); 1182 1183 if (cnt->supported(&cpu->env)) { 1184 supported_event_map[cnt->number] = i; 1185 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); 1186 if (cnt->number & 0x20) { 1187 cpu->pmceid1 |= event_mask; 1188 } else { 1189 cpu->pmceid0 |= event_mask; 1190 } 1191 } 1192 } 1193 } 1194 1195 /* 1196 * Check at runtime whether a PMU event is supported for the current machine 1197 */ 1198 static bool event_supported(uint16_t number) 1199 { 1200 if (number > MAX_EVENT_ID) { 1201 return false; 1202 } 1203 return supported_event_map[number] != UNSUPPORTED_EVENT; 1204 } 1205 1206 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 1207 bool isread) 1208 { 1209 /* Performance monitor registers user accessibility is controlled 1210 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 1211 * trapping to EL2 or EL3 for other accesses. 1212 */ 1213 int el = arm_current_el(env); 1214 1215 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 1216 return CP_ACCESS_TRAP; 1217 } 1218 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 1219 && !arm_is_secure_below_el3(env)) { 1220 return CP_ACCESS_TRAP_EL2; 1221 } 1222 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 1223 return CP_ACCESS_TRAP_EL3; 1224 } 1225 1226 return CP_ACCESS_OK; 1227 } 1228 1229 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 1230 const ARMCPRegInfo *ri, 1231 bool isread) 1232 { 1233 /* ER: event counter read trap control */ 1234 if (arm_feature(env, ARM_FEATURE_V8) 1235 && arm_current_el(env) == 0 1236 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 1237 && isread) { 1238 return CP_ACCESS_OK; 1239 } 1240 1241 return pmreg_access(env, ri, isread); 1242 } 1243 1244 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1245 const ARMCPRegInfo *ri, 1246 bool isread) 1247 { 1248 /* SW: software increment write trap control */ 1249 if (arm_feature(env, ARM_FEATURE_V8) 1250 && arm_current_el(env) == 0 1251 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1252 && !isread) { 1253 return CP_ACCESS_OK; 1254 } 1255 1256 return pmreg_access(env, ri, isread); 1257 } 1258 1259 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1260 const ARMCPRegInfo *ri, 1261 bool isread) 1262 { 1263 /* ER: event counter read trap control */ 1264 if (arm_feature(env, ARM_FEATURE_V8) 1265 && arm_current_el(env) == 0 1266 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1267 return CP_ACCESS_OK; 1268 } 1269 1270 return pmreg_access(env, ri, isread); 1271 } 1272 1273 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1274 const ARMCPRegInfo *ri, 1275 bool isread) 1276 { 1277 /* CR: cycle counter read trap control */ 1278 if (arm_feature(env, ARM_FEATURE_V8) 1279 && arm_current_el(env) == 0 1280 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1281 && isread) { 1282 return CP_ACCESS_OK; 1283 } 1284 1285 return pmreg_access(env, ri, isread); 1286 } 1287 1288 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using 1289 * the current EL, security state, and register configuration. 1290 */ 1291 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 1292 { 1293 uint64_t filter; 1294 bool e, p, u, nsk, nsu, nsh, m; 1295 bool enabled, prohibited, filtered; 1296 bool secure = arm_is_secure(env); 1297 int el = arm_current_el(env); 1298 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; 1299 1300 if (!arm_feature(env, ARM_FEATURE_PMU)) { 1301 return false; 1302 } 1303 1304 if (!arm_feature(env, ARM_FEATURE_EL2) || 1305 (counter < hpmn || counter == 31)) { 1306 e = env->cp15.c9_pmcr & PMCRE; 1307 } else { 1308 e = env->cp15.mdcr_el2 & MDCR_HPME; 1309 } 1310 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 1311 1312 if (!secure) { 1313 if (el == 2 && (counter < hpmn || counter == 31)) { 1314 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; 1315 } else { 1316 prohibited = false; 1317 } 1318 } else { 1319 prohibited = arm_feature(env, ARM_FEATURE_EL3) && 1320 (env->cp15.mdcr_el3 & MDCR_SPME); 1321 } 1322 1323 if (prohibited && counter == 31) { 1324 prohibited = env->cp15.c9_pmcr & PMCRDP; 1325 } 1326 1327 if (counter == 31) { 1328 filter = env->cp15.pmccfiltr_el0; 1329 } else { 1330 filter = env->cp15.c14_pmevtyper[counter]; 1331 } 1332 1333 p = filter & PMXEVTYPER_P; 1334 u = filter & PMXEVTYPER_U; 1335 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1336 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1337 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1338 m = arm_el_is_aa64(env, 1) && 1339 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1340 1341 if (el == 0) { 1342 filtered = secure ? u : u != nsu; 1343 } else if (el == 1) { 1344 filtered = secure ? p : p != nsk; 1345 } else if (el == 2) { 1346 filtered = !nsh; 1347 } else { /* EL3 */ 1348 filtered = m != p; 1349 } 1350 1351 if (counter != 31) { 1352 /* 1353 * If not checking PMCCNTR, ensure the counter is setup to an event we 1354 * support 1355 */ 1356 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1357 if (!event_supported(event)) { 1358 return false; 1359 } 1360 } 1361 1362 return enabled && !prohibited && !filtered; 1363 } 1364 1365 static void pmu_update_irq(CPUARMState *env) 1366 { 1367 ARMCPU *cpu = env_archcpu(env); 1368 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1369 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1370 } 1371 1372 /* 1373 * Ensure c15_ccnt is the guest-visible count so that operations such as 1374 * enabling/disabling the counter or filtering, modifying the count itself, 1375 * etc. can be done logically. This is essentially a no-op if the counter is 1376 * not enabled at the time of the call. 1377 */ 1378 static void pmccntr_op_start(CPUARMState *env) 1379 { 1380 uint64_t cycles = cycles_get_count(env); 1381 1382 if (pmu_counter_enabled(env, 31)) { 1383 uint64_t eff_cycles = cycles; 1384 if (env->cp15.c9_pmcr & PMCRD) { 1385 /* Increment once every 64 processor clock cycles */ 1386 eff_cycles /= 64; 1387 } 1388 1389 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1390 1391 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1392 1ull << 63 : 1ull << 31; 1393 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1394 env->cp15.c9_pmovsr |= (1 << 31); 1395 pmu_update_irq(env); 1396 } 1397 1398 env->cp15.c15_ccnt = new_pmccntr; 1399 } 1400 env->cp15.c15_ccnt_delta = cycles; 1401 } 1402 1403 /* 1404 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1405 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1406 * pmccntr_op_start. 1407 */ 1408 static void pmccntr_op_finish(CPUARMState *env) 1409 { 1410 if (pmu_counter_enabled(env, 31)) { 1411 #ifndef CONFIG_USER_ONLY 1412 /* Calculate when the counter will next overflow */ 1413 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1414 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1415 remaining_cycles = (uint32_t)remaining_cycles; 1416 } 1417 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1418 1419 if (overflow_in > 0) { 1420 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1421 overflow_in; 1422 ARMCPU *cpu = env_archcpu(env); 1423 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1424 } 1425 #endif 1426 1427 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1428 if (env->cp15.c9_pmcr & PMCRD) { 1429 /* Increment once every 64 processor clock cycles */ 1430 prev_cycles /= 64; 1431 } 1432 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1433 } 1434 } 1435 1436 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1437 { 1438 1439 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1440 uint64_t count = 0; 1441 if (event_supported(event)) { 1442 uint16_t event_idx = supported_event_map[event]; 1443 count = pm_events[event_idx].get_count(env); 1444 } 1445 1446 if (pmu_counter_enabled(env, counter)) { 1447 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1448 1449 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { 1450 env->cp15.c9_pmovsr |= (1 << counter); 1451 pmu_update_irq(env); 1452 } 1453 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1454 } 1455 env->cp15.c14_pmevcntr_delta[counter] = count; 1456 } 1457 1458 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1459 { 1460 if (pmu_counter_enabled(env, counter)) { 1461 #ifndef CONFIG_USER_ONLY 1462 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1463 uint16_t event_idx = supported_event_map[event]; 1464 uint64_t delta = UINT32_MAX - 1465 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; 1466 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); 1467 1468 if (overflow_in > 0) { 1469 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1470 overflow_in; 1471 ARMCPU *cpu = env_archcpu(env); 1472 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1473 } 1474 #endif 1475 1476 env->cp15.c14_pmevcntr_delta[counter] -= 1477 env->cp15.c14_pmevcntr[counter]; 1478 } 1479 } 1480 1481 void pmu_op_start(CPUARMState *env) 1482 { 1483 unsigned int i; 1484 pmccntr_op_start(env); 1485 for (i = 0; i < pmu_num_counters(env); i++) { 1486 pmevcntr_op_start(env, i); 1487 } 1488 } 1489 1490 void pmu_op_finish(CPUARMState *env) 1491 { 1492 unsigned int i; 1493 pmccntr_op_finish(env); 1494 for (i = 0; i < pmu_num_counters(env); i++) { 1495 pmevcntr_op_finish(env, i); 1496 } 1497 } 1498 1499 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1500 { 1501 pmu_op_start(&cpu->env); 1502 } 1503 1504 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1505 { 1506 pmu_op_finish(&cpu->env); 1507 } 1508 1509 void arm_pmu_timer_cb(void *opaque) 1510 { 1511 ARMCPU *cpu = opaque; 1512 1513 /* 1514 * Update all the counter values based on the current underlying counts, 1515 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1516 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1517 * counter may expire. 1518 */ 1519 pmu_op_start(&cpu->env); 1520 pmu_op_finish(&cpu->env); 1521 } 1522 1523 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1524 uint64_t value) 1525 { 1526 pmu_op_start(env); 1527 1528 if (value & PMCRC) { 1529 /* The counter has been reset */ 1530 env->cp15.c15_ccnt = 0; 1531 } 1532 1533 if (value & PMCRP) { 1534 unsigned int i; 1535 for (i = 0; i < pmu_num_counters(env); i++) { 1536 env->cp15.c14_pmevcntr[i] = 0; 1537 } 1538 } 1539 1540 /* only the DP, X, D and E bits are writable */ 1541 env->cp15.c9_pmcr &= ~0x39; 1542 env->cp15.c9_pmcr |= (value & 0x39); 1543 1544 pmu_op_finish(env); 1545 } 1546 1547 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1548 uint64_t value) 1549 { 1550 unsigned int i; 1551 for (i = 0; i < pmu_num_counters(env); i++) { 1552 /* Increment a counter's count iff: */ 1553 if ((value & (1 << i)) && /* counter's bit is set */ 1554 /* counter is enabled and not filtered */ 1555 pmu_counter_enabled(env, i) && 1556 /* counter is SW_INCR */ 1557 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1558 pmevcntr_op_start(env, i); 1559 1560 /* 1561 * Detect if this write causes an overflow since we can't predict 1562 * PMSWINC overflows like we can for other events 1563 */ 1564 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1565 1566 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { 1567 env->cp15.c9_pmovsr |= (1 << i); 1568 pmu_update_irq(env); 1569 } 1570 1571 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1572 1573 pmevcntr_op_finish(env, i); 1574 } 1575 } 1576 } 1577 1578 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1579 { 1580 uint64_t ret; 1581 pmccntr_op_start(env); 1582 ret = env->cp15.c15_ccnt; 1583 pmccntr_op_finish(env); 1584 return ret; 1585 } 1586 1587 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1588 uint64_t value) 1589 { 1590 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1591 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1592 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1593 * accessed. 1594 */ 1595 env->cp15.c9_pmselr = value & 0x1f; 1596 } 1597 1598 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1599 uint64_t value) 1600 { 1601 pmccntr_op_start(env); 1602 env->cp15.c15_ccnt = value; 1603 pmccntr_op_finish(env); 1604 } 1605 1606 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1607 uint64_t value) 1608 { 1609 uint64_t cur_val = pmccntr_read(env, NULL); 1610 1611 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1612 } 1613 1614 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1615 uint64_t value) 1616 { 1617 pmccntr_op_start(env); 1618 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1619 pmccntr_op_finish(env); 1620 } 1621 1622 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1623 uint64_t value) 1624 { 1625 pmccntr_op_start(env); 1626 /* M is not accessible from AArch32 */ 1627 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1628 (value & PMCCFILTR); 1629 pmccntr_op_finish(env); 1630 } 1631 1632 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1633 { 1634 /* M is not visible in AArch32 */ 1635 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1636 } 1637 1638 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1639 uint64_t value) 1640 { 1641 value &= pmu_counter_mask(env); 1642 env->cp15.c9_pmcnten |= value; 1643 } 1644 1645 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1646 uint64_t value) 1647 { 1648 value &= pmu_counter_mask(env); 1649 env->cp15.c9_pmcnten &= ~value; 1650 } 1651 1652 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1653 uint64_t value) 1654 { 1655 value &= pmu_counter_mask(env); 1656 env->cp15.c9_pmovsr &= ~value; 1657 pmu_update_irq(env); 1658 } 1659 1660 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1661 uint64_t value) 1662 { 1663 value &= pmu_counter_mask(env); 1664 env->cp15.c9_pmovsr |= value; 1665 pmu_update_irq(env); 1666 } 1667 1668 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1669 uint64_t value, const uint8_t counter) 1670 { 1671 if (counter == 31) { 1672 pmccfiltr_write(env, ri, value); 1673 } else if (counter < pmu_num_counters(env)) { 1674 pmevcntr_op_start(env, counter); 1675 1676 /* 1677 * If this counter's event type is changing, store the current 1678 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1679 * pmevcntr_op_finish has the correct baseline when it converts back to 1680 * a delta. 1681 */ 1682 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1683 PMXEVTYPER_EVTCOUNT; 1684 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1685 if (old_event != new_event) { 1686 uint64_t count = 0; 1687 if (event_supported(new_event)) { 1688 uint16_t event_idx = supported_event_map[new_event]; 1689 count = pm_events[event_idx].get_count(env); 1690 } 1691 env->cp15.c14_pmevcntr_delta[counter] = count; 1692 } 1693 1694 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1695 pmevcntr_op_finish(env, counter); 1696 } 1697 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1698 * PMSELR value is equal to or greater than the number of implemented 1699 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1700 */ 1701 } 1702 1703 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1704 const uint8_t counter) 1705 { 1706 if (counter == 31) { 1707 return env->cp15.pmccfiltr_el0; 1708 } else if (counter < pmu_num_counters(env)) { 1709 return env->cp15.c14_pmevtyper[counter]; 1710 } else { 1711 /* 1712 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1713 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1714 */ 1715 return 0; 1716 } 1717 } 1718 1719 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1720 uint64_t value) 1721 { 1722 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1723 pmevtyper_write(env, ri, value, counter); 1724 } 1725 1726 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1727 uint64_t value) 1728 { 1729 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1730 env->cp15.c14_pmevtyper[counter] = value; 1731 1732 /* 1733 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1734 * pmu_op_finish calls when loading saved state for a migration. Because 1735 * we're potentially updating the type of event here, the value written to 1736 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a 1737 * different counter type. Therefore, we need to set this value to the 1738 * current count for the counter type we're writing so that pmu_op_finish 1739 * has the correct count for its calculation. 1740 */ 1741 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1742 if (event_supported(event)) { 1743 uint16_t event_idx = supported_event_map[event]; 1744 env->cp15.c14_pmevcntr_delta[counter] = 1745 pm_events[event_idx].get_count(env); 1746 } 1747 } 1748 1749 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1750 { 1751 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1752 return pmevtyper_read(env, ri, counter); 1753 } 1754 1755 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1756 uint64_t value) 1757 { 1758 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1759 } 1760 1761 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1762 { 1763 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1764 } 1765 1766 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1767 uint64_t value, uint8_t counter) 1768 { 1769 if (counter < pmu_num_counters(env)) { 1770 pmevcntr_op_start(env, counter); 1771 env->cp15.c14_pmevcntr[counter] = value; 1772 pmevcntr_op_finish(env, counter); 1773 } 1774 /* 1775 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1776 * are CONSTRAINED UNPREDICTABLE. 1777 */ 1778 } 1779 1780 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1781 uint8_t counter) 1782 { 1783 if (counter < pmu_num_counters(env)) { 1784 uint64_t ret; 1785 pmevcntr_op_start(env, counter); 1786 ret = env->cp15.c14_pmevcntr[counter]; 1787 pmevcntr_op_finish(env, counter); 1788 return ret; 1789 } else { 1790 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1791 * are CONSTRAINED UNPREDICTABLE. */ 1792 return 0; 1793 } 1794 } 1795 1796 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1797 uint64_t value) 1798 { 1799 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1800 pmevcntr_write(env, ri, value, counter); 1801 } 1802 1803 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1804 { 1805 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1806 return pmevcntr_read(env, ri, counter); 1807 } 1808 1809 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1810 uint64_t value) 1811 { 1812 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1813 assert(counter < pmu_num_counters(env)); 1814 env->cp15.c14_pmevcntr[counter] = value; 1815 pmevcntr_write(env, ri, value, counter); 1816 } 1817 1818 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1819 { 1820 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1821 assert(counter < pmu_num_counters(env)); 1822 return env->cp15.c14_pmevcntr[counter]; 1823 } 1824 1825 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1826 uint64_t value) 1827 { 1828 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1829 } 1830 1831 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1832 { 1833 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1834 } 1835 1836 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1837 uint64_t value) 1838 { 1839 if (arm_feature(env, ARM_FEATURE_V8)) { 1840 env->cp15.c9_pmuserenr = value & 0xf; 1841 } else { 1842 env->cp15.c9_pmuserenr = value & 1; 1843 } 1844 } 1845 1846 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1847 uint64_t value) 1848 { 1849 /* We have no event counters so only the C bit can be changed */ 1850 value &= pmu_counter_mask(env); 1851 env->cp15.c9_pminten |= value; 1852 pmu_update_irq(env); 1853 } 1854 1855 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1856 uint64_t value) 1857 { 1858 value &= pmu_counter_mask(env); 1859 env->cp15.c9_pminten &= ~value; 1860 pmu_update_irq(env); 1861 } 1862 1863 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1864 uint64_t value) 1865 { 1866 /* Note that even though the AArch64 view of this register has bits 1867 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1868 * architectural requirements for bits which are RES0 only in some 1869 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1870 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1871 */ 1872 raw_write(env, ri, value & ~0x1FULL); 1873 } 1874 1875 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1876 { 1877 /* Begin with base v8.0 state. */ 1878 uint32_t valid_mask = 0x3fff; 1879 ARMCPU *cpu = env_archcpu(env); 1880 1881 if (arm_el_is_aa64(env, 3)) { 1882 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ 1883 valid_mask &= ~SCR_NET; 1884 } else { 1885 valid_mask &= ~(SCR_RW | SCR_ST); 1886 } 1887 1888 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1889 valid_mask &= ~SCR_HCE; 1890 1891 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1892 * supported if EL2 exists. The bit is UNK/SBZP when 1893 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1894 * when EL2 is unavailable. 1895 * On ARMv8, this bit is always available. 1896 */ 1897 if (arm_feature(env, ARM_FEATURE_V7) && 1898 !arm_feature(env, ARM_FEATURE_V8)) { 1899 valid_mask &= ~SCR_SMD; 1900 } 1901 } 1902 if (cpu_isar_feature(aa64_lor, cpu)) { 1903 valid_mask |= SCR_TLOR; 1904 } 1905 if (cpu_isar_feature(aa64_pauth, cpu)) { 1906 valid_mask |= SCR_API | SCR_APK; 1907 } 1908 1909 /* Clear all-context RES0 bits. */ 1910 value &= valid_mask; 1911 raw_write(env, ri, value); 1912 } 1913 1914 static CPAccessResult access_aa64_tid2(CPUARMState *env, 1915 const ARMCPRegInfo *ri, 1916 bool isread) 1917 { 1918 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { 1919 return CP_ACCESS_TRAP_EL2; 1920 } 1921 1922 return CP_ACCESS_OK; 1923 } 1924 1925 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1926 { 1927 ARMCPU *cpu = env_archcpu(env); 1928 1929 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1930 * bank 1931 */ 1932 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1933 ri->secure & ARM_CP_SECSTATE_S); 1934 1935 return cpu->ccsidr[index]; 1936 } 1937 1938 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1939 uint64_t value) 1940 { 1941 raw_write(env, ri, value & 0xf); 1942 } 1943 1944 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1945 { 1946 CPUState *cs = env_cpu(env); 1947 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 1948 uint64_t ret = 0; 1949 bool allow_virt = (arm_current_el(env) == 1 && 1950 (!arm_is_secure_below_el3(env) || 1951 (env->cp15.scr_el3 & SCR_EEL2))); 1952 1953 if (allow_virt && (hcr_el2 & HCR_IMO)) { 1954 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 1955 ret |= CPSR_I; 1956 } 1957 } else { 1958 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1959 ret |= CPSR_I; 1960 } 1961 } 1962 1963 if (allow_virt && (hcr_el2 & HCR_FMO)) { 1964 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 1965 ret |= CPSR_F; 1966 } 1967 } else { 1968 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1969 ret |= CPSR_F; 1970 } 1971 } 1972 1973 /* External aborts are not possible in QEMU so A bit is always clear */ 1974 return ret; 1975 } 1976 1977 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 1978 bool isread) 1979 { 1980 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { 1981 return CP_ACCESS_TRAP_EL2; 1982 } 1983 1984 return CP_ACCESS_OK; 1985 } 1986 1987 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 1988 bool isread) 1989 { 1990 if (arm_feature(env, ARM_FEATURE_V8)) { 1991 return access_aa64_tid1(env, ri, isread); 1992 } 1993 1994 return CP_ACCESS_OK; 1995 } 1996 1997 static const ARMCPRegInfo v7_cp_reginfo[] = { 1998 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1999 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 2000 .access = PL1_W, .type = ARM_CP_NOP }, 2001 /* Performance monitors are implementation defined in v7, 2002 * but with an ARM recommended set of registers, which we 2003 * follow. 2004 * 2005 * Performance registers fall into three categories: 2006 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 2007 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 2008 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 2009 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 2010 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 2011 */ 2012 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 2013 .access = PL0_RW, .type = ARM_CP_ALIAS, 2014 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2015 .writefn = pmcntenset_write, 2016 .accessfn = pmreg_access, 2017 .raw_writefn = raw_write }, 2018 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 2019 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 2020 .access = PL0_RW, .accessfn = pmreg_access, 2021 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 2022 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 2023 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 2024 .access = PL0_RW, 2025 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2026 .accessfn = pmreg_access, 2027 .writefn = pmcntenclr_write, 2028 .type = ARM_CP_ALIAS }, 2029 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 2030 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 2031 .access = PL0_RW, .accessfn = pmreg_access, 2032 .type = ARM_CP_ALIAS, 2033 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 2034 .writefn = pmcntenclr_write }, 2035 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 2036 .access = PL0_RW, .type = ARM_CP_IO, 2037 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2038 .accessfn = pmreg_access, 2039 .writefn = pmovsr_write, 2040 .raw_writefn = raw_write }, 2041 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 2042 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 2043 .access = PL0_RW, .accessfn = pmreg_access, 2044 .type = ARM_CP_ALIAS | ARM_CP_IO, 2045 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2046 .writefn = pmovsr_write, 2047 .raw_writefn = raw_write }, 2048 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 2049 .access = PL0_W, .accessfn = pmreg_access_swinc, 2050 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2051 .writefn = pmswinc_write }, 2052 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 2053 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 2054 .access = PL0_W, .accessfn = pmreg_access_swinc, 2055 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2056 .writefn = pmswinc_write }, 2057 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 2058 .access = PL0_RW, .type = ARM_CP_ALIAS, 2059 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 2060 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 2061 .raw_writefn = raw_write}, 2062 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 2063 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 2064 .access = PL0_RW, .accessfn = pmreg_access_selr, 2065 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 2066 .writefn = pmselr_write, .raw_writefn = raw_write, }, 2067 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 2068 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 2069 .readfn = pmccntr_read, .writefn = pmccntr_write32, 2070 .accessfn = pmreg_access_ccntr }, 2071 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 2072 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 2073 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 2074 .type = ARM_CP_IO, 2075 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 2076 .readfn = pmccntr_read, .writefn = pmccntr_write, 2077 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 2078 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 2079 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 2080 .access = PL0_RW, .accessfn = pmreg_access, 2081 .type = ARM_CP_ALIAS | ARM_CP_IO, 2082 .resetvalue = 0, }, 2083 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 2084 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 2085 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 2086 .access = PL0_RW, .accessfn = pmreg_access, 2087 .type = ARM_CP_IO, 2088 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 2089 .resetvalue = 0, }, 2090 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 2091 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2092 .accessfn = pmreg_access, 2093 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2094 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 2095 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 2096 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2097 .accessfn = pmreg_access, 2098 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2099 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 2100 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2101 .accessfn = pmreg_access_xevcntr, 2102 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2103 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 2104 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 2105 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2106 .accessfn = pmreg_access_xevcntr, 2107 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2108 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 2109 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 2110 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 2111 .resetvalue = 0, 2112 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2113 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2114 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2115 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2116 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2117 .resetvalue = 0, 2118 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2119 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2120 .access = PL1_RW, .accessfn = access_tpm, 2121 .type = ARM_CP_ALIAS | ARM_CP_IO, 2122 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2123 .resetvalue = 0, 2124 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2125 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2126 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2127 .access = PL1_RW, .accessfn = access_tpm, 2128 .type = ARM_CP_IO, 2129 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2130 .writefn = pmintenset_write, .raw_writefn = raw_write, 2131 .resetvalue = 0x0 }, 2132 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2133 .access = PL1_RW, .accessfn = access_tpm, 2134 .type = ARM_CP_ALIAS | ARM_CP_IO, 2135 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2136 .writefn = pmintenclr_write, }, 2137 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2138 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2139 .access = PL1_RW, .accessfn = access_tpm, 2140 .type = ARM_CP_ALIAS | ARM_CP_IO, 2141 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2142 .writefn = pmintenclr_write }, 2143 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2144 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2145 .access = PL1_R, 2146 .accessfn = access_aa64_tid2, 2147 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2148 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2149 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2150 .access = PL1_RW, 2151 .accessfn = access_aa64_tid2, 2152 .writefn = csselr_write, .resetvalue = 0, 2153 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2154 offsetof(CPUARMState, cp15.csselr_ns) } }, 2155 /* Auxiliary ID register: this actually has an IMPDEF value but for now 2156 * just RAZ for all cores: 2157 */ 2158 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2159 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2160 .access = PL1_R, .type = ARM_CP_CONST, 2161 .accessfn = access_aa64_tid1, 2162 .resetvalue = 0 }, 2163 /* Auxiliary fault status registers: these also are IMPDEF, and we 2164 * choose to RAZ/WI for all cores. 2165 */ 2166 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2167 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2168 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 2169 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2170 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2171 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 2172 /* MAIR can just read-as-written because we don't implement caches 2173 * and so don't need to care about memory attributes. 2174 */ 2175 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2176 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2177 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2178 .resetvalue = 0 }, 2179 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2180 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2181 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2182 .resetvalue = 0 }, 2183 /* For non-long-descriptor page tables these are PRRR and NMRR; 2184 * regardless they still act as reads-as-written for QEMU. 2185 */ 2186 /* MAIR0/1 are defined separately from their 64-bit counterpart which 2187 * allows them to assign the correct fieldoffset based on the endianness 2188 * handled in the field definitions. 2189 */ 2190 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2191 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 2192 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2193 offsetof(CPUARMState, cp15.mair0_ns) }, 2194 .resetfn = arm_cp_reset_ignore }, 2195 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2196 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 2197 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2198 offsetof(CPUARMState, cp15.mair1_ns) }, 2199 .resetfn = arm_cp_reset_ignore }, 2200 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2201 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2202 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2203 /* 32 bit ITLB invalidates */ 2204 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 2205 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2206 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 2207 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2208 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 2209 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2210 /* 32 bit DTLB invalidates */ 2211 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 2212 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2213 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 2214 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2215 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 2216 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2217 /* 32 bit TLB invalidates */ 2218 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 2219 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2220 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 2221 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2222 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 2223 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2224 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 2225 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 2226 REGINFO_SENTINEL 2227 }; 2228 2229 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 2230 /* 32 bit TLB invalidates, Inner Shareable */ 2231 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 2232 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 2233 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 2234 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 2235 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 2236 .type = ARM_CP_NO_RAW, .access = PL1_W, 2237 .writefn = tlbiasid_is_write }, 2238 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 2239 .type = ARM_CP_NO_RAW, .access = PL1_W, 2240 .writefn = tlbimvaa_is_write }, 2241 REGINFO_SENTINEL 2242 }; 2243 2244 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2245 /* PMOVSSET is not implemented in v7 before v7ve */ 2246 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2247 .access = PL0_RW, .accessfn = pmreg_access, 2248 .type = ARM_CP_ALIAS | ARM_CP_IO, 2249 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2250 .writefn = pmovsset_write, 2251 .raw_writefn = raw_write }, 2252 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2253 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2254 .access = PL0_RW, .accessfn = pmreg_access, 2255 .type = ARM_CP_ALIAS | ARM_CP_IO, 2256 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2257 .writefn = pmovsset_write, 2258 .raw_writefn = raw_write }, 2259 REGINFO_SENTINEL 2260 }; 2261 2262 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2263 uint64_t value) 2264 { 2265 value &= 1; 2266 env->teecr = value; 2267 } 2268 2269 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2270 bool isread) 2271 { 2272 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2273 return CP_ACCESS_TRAP; 2274 } 2275 return CP_ACCESS_OK; 2276 } 2277 2278 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2279 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2280 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2281 .resetvalue = 0, 2282 .writefn = teecr_write }, 2283 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2284 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2285 .accessfn = teehbr_access, .resetvalue = 0 }, 2286 REGINFO_SENTINEL 2287 }; 2288 2289 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2290 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2291 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2292 .access = PL0_RW, 2293 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2294 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2295 .access = PL0_RW, 2296 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2297 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2298 .resetfn = arm_cp_reset_ignore }, 2299 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2300 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2301 .access = PL0_R|PL1_W, 2302 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2303 .resetvalue = 0}, 2304 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2305 .access = PL0_R|PL1_W, 2306 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2307 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2308 .resetfn = arm_cp_reset_ignore }, 2309 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2310 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2311 .access = PL1_RW, 2312 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2313 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2314 .access = PL1_RW, 2315 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2316 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2317 .resetvalue = 0 }, 2318 REGINFO_SENTINEL 2319 }; 2320 2321 #ifndef CONFIG_USER_ONLY 2322 2323 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2324 bool isread) 2325 { 2326 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2327 * Writable only at the highest implemented exception level. 2328 */ 2329 int el = arm_current_el(env); 2330 uint64_t hcr; 2331 uint32_t cntkctl; 2332 2333 switch (el) { 2334 case 0: 2335 hcr = arm_hcr_el2_eff(env); 2336 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2337 cntkctl = env->cp15.cnthctl_el2; 2338 } else { 2339 cntkctl = env->cp15.c14_cntkctl; 2340 } 2341 if (!extract32(cntkctl, 0, 2)) { 2342 return CP_ACCESS_TRAP; 2343 } 2344 break; 2345 case 1: 2346 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2347 arm_is_secure_below_el3(env)) { 2348 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2349 return CP_ACCESS_TRAP_UNCATEGORIZED; 2350 } 2351 break; 2352 case 2: 2353 case 3: 2354 break; 2355 } 2356 2357 if (!isread && el < arm_highest_el(env)) { 2358 return CP_ACCESS_TRAP_UNCATEGORIZED; 2359 } 2360 2361 return CP_ACCESS_OK; 2362 } 2363 2364 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2365 bool isread) 2366 { 2367 unsigned int cur_el = arm_current_el(env); 2368 bool secure = arm_is_secure(env); 2369 uint64_t hcr = arm_hcr_el2_eff(env); 2370 2371 switch (cur_el) { 2372 case 0: 2373 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ 2374 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2375 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) 2376 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2377 } 2378 2379 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ 2380 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2381 return CP_ACCESS_TRAP; 2382 } 2383 2384 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */ 2385 if (hcr & HCR_E2H) { 2386 if (timeridx == GTIMER_PHYS && 2387 !extract32(env->cp15.cnthctl_el2, 10, 1)) { 2388 return CP_ACCESS_TRAP_EL2; 2389 } 2390 } else { 2391 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2392 if (arm_feature(env, ARM_FEATURE_EL2) && 2393 timeridx == GTIMER_PHYS && !secure && 2394 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 2395 return CP_ACCESS_TRAP_EL2; 2396 } 2397 } 2398 break; 2399 2400 case 1: 2401 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ 2402 if (arm_feature(env, ARM_FEATURE_EL2) && 2403 timeridx == GTIMER_PHYS && !secure && 2404 (hcr & HCR_E2H 2405 ? !extract32(env->cp15.cnthctl_el2, 10, 1) 2406 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { 2407 return CP_ACCESS_TRAP_EL2; 2408 } 2409 break; 2410 } 2411 return CP_ACCESS_OK; 2412 } 2413 2414 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2415 bool isread) 2416 { 2417 unsigned int cur_el = arm_current_el(env); 2418 bool secure = arm_is_secure(env); 2419 uint64_t hcr = arm_hcr_el2_eff(env); 2420 2421 switch (cur_el) { 2422 case 0: 2423 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2424 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ 2425 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) 2426 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2427 } 2428 2429 /* 2430 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from 2431 * EL0 if EL0[PV]TEN is zero. 2432 */ 2433 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2434 return CP_ACCESS_TRAP; 2435 } 2436 /* fall through */ 2437 2438 case 1: 2439 if (arm_feature(env, ARM_FEATURE_EL2) && 2440 timeridx == GTIMER_PHYS && !secure) { 2441 if (hcr & HCR_E2H) { 2442 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ 2443 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { 2444 return CP_ACCESS_TRAP_EL2; 2445 } 2446 } else { 2447 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2448 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { 2449 return CP_ACCESS_TRAP_EL2; 2450 } 2451 } 2452 } 2453 break; 2454 } 2455 return CP_ACCESS_OK; 2456 } 2457 2458 static CPAccessResult gt_pct_access(CPUARMState *env, 2459 const ARMCPRegInfo *ri, 2460 bool isread) 2461 { 2462 return gt_counter_access(env, GTIMER_PHYS, isread); 2463 } 2464 2465 static CPAccessResult gt_vct_access(CPUARMState *env, 2466 const ARMCPRegInfo *ri, 2467 bool isread) 2468 { 2469 return gt_counter_access(env, GTIMER_VIRT, isread); 2470 } 2471 2472 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2473 bool isread) 2474 { 2475 return gt_timer_access(env, GTIMER_PHYS, isread); 2476 } 2477 2478 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2479 bool isread) 2480 { 2481 return gt_timer_access(env, GTIMER_VIRT, isread); 2482 } 2483 2484 static CPAccessResult gt_stimer_access(CPUARMState *env, 2485 const ARMCPRegInfo *ri, 2486 bool isread) 2487 { 2488 /* The AArch64 register view of the secure physical timer is 2489 * always accessible from EL3, and configurably accessible from 2490 * Secure EL1. 2491 */ 2492 switch (arm_current_el(env)) { 2493 case 1: 2494 if (!arm_is_secure(env)) { 2495 return CP_ACCESS_TRAP; 2496 } 2497 if (!(env->cp15.scr_el3 & SCR_ST)) { 2498 return CP_ACCESS_TRAP_EL3; 2499 } 2500 return CP_ACCESS_OK; 2501 case 0: 2502 case 2: 2503 return CP_ACCESS_TRAP; 2504 case 3: 2505 return CP_ACCESS_OK; 2506 default: 2507 g_assert_not_reached(); 2508 } 2509 } 2510 2511 static uint64_t gt_get_countervalue(CPUARMState *env) 2512 { 2513 ARMCPU *cpu = env_archcpu(env); 2514 2515 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); 2516 } 2517 2518 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2519 { 2520 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2521 2522 if (gt->ctl & 1) { 2523 /* Timer enabled: calculate and set current ISTATUS, irq, and 2524 * reset timer to when ISTATUS next has to change 2525 */ 2526 uint64_t offset = timeridx == GTIMER_VIRT ? 2527 cpu->env.cp15.cntvoff_el2 : 0; 2528 uint64_t count = gt_get_countervalue(&cpu->env); 2529 /* Note that this must be unsigned 64 bit arithmetic: */ 2530 int istatus = count - offset >= gt->cval; 2531 uint64_t nexttick; 2532 int irqstate; 2533 2534 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2535 2536 irqstate = (istatus && !(gt->ctl & 2)); 2537 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2538 2539 if (istatus) { 2540 /* Next transition is when count rolls back over to zero */ 2541 nexttick = UINT64_MAX; 2542 } else { 2543 /* Next transition is when we hit cval */ 2544 nexttick = gt->cval + offset; 2545 } 2546 /* Note that the desired next expiry time might be beyond the 2547 * signed-64-bit range of a QEMUTimer -- in this case we just 2548 * set the timer for as far in the future as possible. When the 2549 * timer expires we will reset the timer for any remaining period. 2550 */ 2551 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { 2552 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); 2553 } else { 2554 timer_mod(cpu->gt_timer[timeridx], nexttick); 2555 } 2556 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 2557 } else { 2558 /* Timer disabled: ISTATUS and timer output always clear */ 2559 gt->ctl &= ~4; 2560 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 2561 timer_del(cpu->gt_timer[timeridx]); 2562 trace_arm_gt_recalc_disabled(timeridx); 2563 } 2564 } 2565 2566 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2567 int timeridx) 2568 { 2569 ARMCPU *cpu = env_archcpu(env); 2570 2571 timer_del(cpu->gt_timer[timeridx]); 2572 } 2573 2574 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2575 { 2576 return gt_get_countervalue(env); 2577 } 2578 2579 static uint64_t gt_virt_cnt_offset(CPUARMState *env) 2580 { 2581 uint64_t hcr; 2582 2583 switch (arm_current_el(env)) { 2584 case 2: 2585 hcr = arm_hcr_el2_eff(env); 2586 if (hcr & HCR_E2H) { 2587 return 0; 2588 } 2589 break; 2590 case 0: 2591 hcr = arm_hcr_el2_eff(env); 2592 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2593 return 0; 2594 } 2595 break; 2596 } 2597 2598 return env->cp15.cntvoff_el2; 2599 } 2600 2601 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2602 { 2603 return gt_get_countervalue(env) - gt_virt_cnt_offset(env); 2604 } 2605 2606 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2607 int timeridx, 2608 uint64_t value) 2609 { 2610 trace_arm_gt_cval_write(timeridx, value); 2611 env->cp15.c14_timer[timeridx].cval = value; 2612 gt_recalc_timer(env_archcpu(env), timeridx); 2613 } 2614 2615 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2616 int timeridx) 2617 { 2618 uint64_t offset = 0; 2619 2620 switch (timeridx) { 2621 case GTIMER_VIRT: 2622 case GTIMER_HYPVIRT: 2623 offset = gt_virt_cnt_offset(env); 2624 break; 2625 } 2626 2627 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2628 (gt_get_countervalue(env) - offset)); 2629 } 2630 2631 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2632 int timeridx, 2633 uint64_t value) 2634 { 2635 uint64_t offset = 0; 2636 2637 switch (timeridx) { 2638 case GTIMER_VIRT: 2639 case GTIMER_HYPVIRT: 2640 offset = gt_virt_cnt_offset(env); 2641 break; 2642 } 2643 2644 trace_arm_gt_tval_write(timeridx, value); 2645 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2646 sextract64(value, 0, 32); 2647 gt_recalc_timer(env_archcpu(env), timeridx); 2648 } 2649 2650 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2651 int timeridx, 2652 uint64_t value) 2653 { 2654 ARMCPU *cpu = env_archcpu(env); 2655 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2656 2657 trace_arm_gt_ctl_write(timeridx, value); 2658 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2659 if ((oldval ^ value) & 1) { 2660 /* Enable toggled */ 2661 gt_recalc_timer(cpu, timeridx); 2662 } else if ((oldval ^ value) & 2) { 2663 /* IMASK toggled: don't need to recalculate, 2664 * just set the interrupt line based on ISTATUS 2665 */ 2666 int irqstate = (oldval & 4) && !(value & 2); 2667 2668 trace_arm_gt_imask_toggle(timeridx, irqstate); 2669 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2670 } 2671 } 2672 2673 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2674 { 2675 gt_timer_reset(env, ri, GTIMER_PHYS); 2676 } 2677 2678 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2679 uint64_t value) 2680 { 2681 gt_cval_write(env, ri, GTIMER_PHYS, value); 2682 } 2683 2684 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2685 { 2686 return gt_tval_read(env, ri, GTIMER_PHYS); 2687 } 2688 2689 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2690 uint64_t value) 2691 { 2692 gt_tval_write(env, ri, GTIMER_PHYS, value); 2693 } 2694 2695 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2696 uint64_t value) 2697 { 2698 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2699 } 2700 2701 static int gt_phys_redir_timeridx(CPUARMState *env) 2702 { 2703 switch (arm_mmu_idx(env)) { 2704 case ARMMMUIdx_E20_0: 2705 case ARMMMUIdx_E20_2: 2706 case ARMMMUIdx_E20_2_PAN: 2707 return GTIMER_HYP; 2708 default: 2709 return GTIMER_PHYS; 2710 } 2711 } 2712 2713 static int gt_virt_redir_timeridx(CPUARMState *env) 2714 { 2715 switch (arm_mmu_idx(env)) { 2716 case ARMMMUIdx_E20_0: 2717 case ARMMMUIdx_E20_2: 2718 case ARMMMUIdx_E20_2_PAN: 2719 return GTIMER_HYPVIRT; 2720 default: 2721 return GTIMER_VIRT; 2722 } 2723 } 2724 2725 static uint64_t gt_phys_redir_cval_read(CPUARMState *env, 2726 const ARMCPRegInfo *ri) 2727 { 2728 int timeridx = gt_phys_redir_timeridx(env); 2729 return env->cp15.c14_timer[timeridx].cval; 2730 } 2731 2732 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2733 uint64_t value) 2734 { 2735 int timeridx = gt_phys_redir_timeridx(env); 2736 gt_cval_write(env, ri, timeridx, value); 2737 } 2738 2739 static uint64_t gt_phys_redir_tval_read(CPUARMState *env, 2740 const ARMCPRegInfo *ri) 2741 { 2742 int timeridx = gt_phys_redir_timeridx(env); 2743 return gt_tval_read(env, ri, timeridx); 2744 } 2745 2746 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2747 uint64_t value) 2748 { 2749 int timeridx = gt_phys_redir_timeridx(env); 2750 gt_tval_write(env, ri, timeridx, value); 2751 } 2752 2753 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, 2754 const ARMCPRegInfo *ri) 2755 { 2756 int timeridx = gt_phys_redir_timeridx(env); 2757 return env->cp15.c14_timer[timeridx].ctl; 2758 } 2759 2760 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2761 uint64_t value) 2762 { 2763 int timeridx = gt_phys_redir_timeridx(env); 2764 gt_ctl_write(env, ri, timeridx, value); 2765 } 2766 2767 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2768 { 2769 gt_timer_reset(env, ri, GTIMER_VIRT); 2770 } 2771 2772 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2773 uint64_t value) 2774 { 2775 gt_cval_write(env, ri, GTIMER_VIRT, value); 2776 } 2777 2778 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2779 { 2780 return gt_tval_read(env, ri, GTIMER_VIRT); 2781 } 2782 2783 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2784 uint64_t value) 2785 { 2786 gt_tval_write(env, ri, GTIMER_VIRT, value); 2787 } 2788 2789 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2790 uint64_t value) 2791 { 2792 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2793 } 2794 2795 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2796 uint64_t value) 2797 { 2798 ARMCPU *cpu = env_archcpu(env); 2799 2800 trace_arm_gt_cntvoff_write(value); 2801 raw_write(env, ri, value); 2802 gt_recalc_timer(cpu, GTIMER_VIRT); 2803 } 2804 2805 static uint64_t gt_virt_redir_cval_read(CPUARMState *env, 2806 const ARMCPRegInfo *ri) 2807 { 2808 int timeridx = gt_virt_redir_timeridx(env); 2809 return env->cp15.c14_timer[timeridx].cval; 2810 } 2811 2812 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2813 uint64_t value) 2814 { 2815 int timeridx = gt_virt_redir_timeridx(env); 2816 gt_cval_write(env, ri, timeridx, value); 2817 } 2818 2819 static uint64_t gt_virt_redir_tval_read(CPUARMState *env, 2820 const ARMCPRegInfo *ri) 2821 { 2822 int timeridx = gt_virt_redir_timeridx(env); 2823 return gt_tval_read(env, ri, timeridx); 2824 } 2825 2826 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2827 uint64_t value) 2828 { 2829 int timeridx = gt_virt_redir_timeridx(env); 2830 gt_tval_write(env, ri, timeridx, value); 2831 } 2832 2833 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, 2834 const ARMCPRegInfo *ri) 2835 { 2836 int timeridx = gt_virt_redir_timeridx(env); 2837 return env->cp15.c14_timer[timeridx].ctl; 2838 } 2839 2840 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2841 uint64_t value) 2842 { 2843 int timeridx = gt_virt_redir_timeridx(env); 2844 gt_ctl_write(env, ri, timeridx, value); 2845 } 2846 2847 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2848 { 2849 gt_timer_reset(env, ri, GTIMER_HYP); 2850 } 2851 2852 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2853 uint64_t value) 2854 { 2855 gt_cval_write(env, ri, GTIMER_HYP, value); 2856 } 2857 2858 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2859 { 2860 return gt_tval_read(env, ri, GTIMER_HYP); 2861 } 2862 2863 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2864 uint64_t value) 2865 { 2866 gt_tval_write(env, ri, GTIMER_HYP, value); 2867 } 2868 2869 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2870 uint64_t value) 2871 { 2872 gt_ctl_write(env, ri, GTIMER_HYP, value); 2873 } 2874 2875 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2876 { 2877 gt_timer_reset(env, ri, GTIMER_SEC); 2878 } 2879 2880 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2881 uint64_t value) 2882 { 2883 gt_cval_write(env, ri, GTIMER_SEC, value); 2884 } 2885 2886 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2887 { 2888 return gt_tval_read(env, ri, GTIMER_SEC); 2889 } 2890 2891 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2892 uint64_t value) 2893 { 2894 gt_tval_write(env, ri, GTIMER_SEC, value); 2895 } 2896 2897 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2898 uint64_t value) 2899 { 2900 gt_ctl_write(env, ri, GTIMER_SEC, value); 2901 } 2902 2903 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2904 { 2905 gt_timer_reset(env, ri, GTIMER_HYPVIRT); 2906 } 2907 2908 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2909 uint64_t value) 2910 { 2911 gt_cval_write(env, ri, GTIMER_HYPVIRT, value); 2912 } 2913 2914 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2915 { 2916 return gt_tval_read(env, ri, GTIMER_HYPVIRT); 2917 } 2918 2919 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2920 uint64_t value) 2921 { 2922 gt_tval_write(env, ri, GTIMER_HYPVIRT, value); 2923 } 2924 2925 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2926 uint64_t value) 2927 { 2928 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); 2929 } 2930 2931 void arm_gt_ptimer_cb(void *opaque) 2932 { 2933 ARMCPU *cpu = opaque; 2934 2935 gt_recalc_timer(cpu, GTIMER_PHYS); 2936 } 2937 2938 void arm_gt_vtimer_cb(void *opaque) 2939 { 2940 ARMCPU *cpu = opaque; 2941 2942 gt_recalc_timer(cpu, GTIMER_VIRT); 2943 } 2944 2945 void arm_gt_htimer_cb(void *opaque) 2946 { 2947 ARMCPU *cpu = opaque; 2948 2949 gt_recalc_timer(cpu, GTIMER_HYP); 2950 } 2951 2952 void arm_gt_stimer_cb(void *opaque) 2953 { 2954 ARMCPU *cpu = opaque; 2955 2956 gt_recalc_timer(cpu, GTIMER_SEC); 2957 } 2958 2959 void arm_gt_hvtimer_cb(void *opaque) 2960 { 2961 ARMCPU *cpu = opaque; 2962 2963 gt_recalc_timer(cpu, GTIMER_HYPVIRT); 2964 } 2965 2966 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) 2967 { 2968 ARMCPU *cpu = env_archcpu(env); 2969 2970 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; 2971 } 2972 2973 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2974 /* Note that CNTFRQ is purely reads-as-written for the benefit 2975 * of software; writing it doesn't actually change the timer frequency. 2976 * Our reset value matches the fixed frequency we implement the timer at. 2977 */ 2978 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 2979 .type = ARM_CP_ALIAS, 2980 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2981 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 2982 }, 2983 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2984 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2985 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2986 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2987 .resetfn = arm_gt_cntfrq_reset, 2988 }, 2989 /* overall control: mostly access permissions */ 2990 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2991 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2992 .access = PL1_RW, 2993 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2994 .resetvalue = 0, 2995 }, 2996 /* per-timer control */ 2997 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2998 .secure = ARM_CP_SECSTATE_NS, 2999 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3000 .accessfn = gt_ptimer_access, 3001 .fieldoffset = offsetoflow32(CPUARMState, 3002 cp15.c14_timer[GTIMER_PHYS].ctl), 3003 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3004 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3005 }, 3006 { .name = "CNTP_CTL_S", 3007 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3008 .secure = ARM_CP_SECSTATE_S, 3009 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3010 .accessfn = gt_ptimer_access, 3011 .fieldoffset = offsetoflow32(CPUARMState, 3012 cp15.c14_timer[GTIMER_SEC].ctl), 3013 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3014 }, 3015 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 3016 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 3017 .type = ARM_CP_IO, .access = PL0_RW, 3018 .accessfn = gt_ptimer_access, 3019 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 3020 .resetvalue = 0, 3021 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3022 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3023 }, 3024 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 3025 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3026 .accessfn = gt_vtimer_access, 3027 .fieldoffset = offsetoflow32(CPUARMState, 3028 cp15.c14_timer[GTIMER_VIRT].ctl), 3029 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3030 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3031 }, 3032 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 3033 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 3034 .type = ARM_CP_IO, .access = PL0_RW, 3035 .accessfn = gt_vtimer_access, 3036 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 3037 .resetvalue = 0, 3038 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3039 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3040 }, 3041 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 3042 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3043 .secure = ARM_CP_SECSTATE_NS, 3044 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3045 .accessfn = gt_ptimer_access, 3046 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3047 }, 3048 { .name = "CNTP_TVAL_S", 3049 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3050 .secure = ARM_CP_SECSTATE_S, 3051 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3052 .accessfn = gt_ptimer_access, 3053 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 3054 }, 3055 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3056 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 3057 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3058 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 3059 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3060 }, 3061 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 3062 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3063 .accessfn = gt_vtimer_access, 3064 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3065 }, 3066 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3067 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 3068 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3069 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 3070 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3071 }, 3072 /* The counter itself */ 3073 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 3074 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3075 .accessfn = gt_pct_access, 3076 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 3077 }, 3078 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 3079 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 3080 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3081 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 3082 }, 3083 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 3084 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3085 .accessfn = gt_vct_access, 3086 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 3087 }, 3088 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3089 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3090 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3091 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 3092 }, 3093 /* Comparison value, indicating when the timer goes off */ 3094 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 3095 .secure = ARM_CP_SECSTATE_NS, 3096 .access = PL0_RW, 3097 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3098 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3099 .accessfn = gt_ptimer_access, 3100 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3101 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3102 }, 3103 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 3104 .secure = ARM_CP_SECSTATE_S, 3105 .access = PL0_RW, 3106 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3107 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3108 .accessfn = gt_ptimer_access, 3109 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3110 }, 3111 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3112 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 3113 .access = PL0_RW, 3114 .type = ARM_CP_IO, 3115 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3116 .resetvalue = 0, .accessfn = gt_ptimer_access, 3117 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3118 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3119 }, 3120 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 3121 .access = PL0_RW, 3122 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3123 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3124 .accessfn = gt_vtimer_access, 3125 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3126 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3127 }, 3128 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3129 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 3130 .access = PL0_RW, 3131 .type = ARM_CP_IO, 3132 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3133 .resetvalue = 0, .accessfn = gt_vtimer_access, 3134 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3135 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3136 }, 3137 /* Secure timer -- this is actually restricted to only EL3 3138 * and configurably Secure-EL1 via the accessfn. 3139 */ 3140 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 3141 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 3142 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 3143 .accessfn = gt_stimer_access, 3144 .readfn = gt_sec_tval_read, 3145 .writefn = gt_sec_tval_write, 3146 .resetfn = gt_sec_timer_reset, 3147 }, 3148 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 3149 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 3150 .type = ARM_CP_IO, .access = PL1_RW, 3151 .accessfn = gt_stimer_access, 3152 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 3153 .resetvalue = 0, 3154 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3155 }, 3156 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 3157 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 3158 .type = ARM_CP_IO, .access = PL1_RW, 3159 .accessfn = gt_stimer_access, 3160 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3161 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3162 }, 3163 REGINFO_SENTINEL 3164 }; 3165 3166 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, 3167 bool isread) 3168 { 3169 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { 3170 return CP_ACCESS_TRAP; 3171 } 3172 return CP_ACCESS_OK; 3173 } 3174 3175 #else 3176 3177 /* In user-mode most of the generic timer registers are inaccessible 3178 * however modern kernels (4.12+) allow access to cntvct_el0 3179 */ 3180 3181 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 3182 { 3183 ARMCPU *cpu = env_archcpu(env); 3184 3185 /* Currently we have no support for QEMUTimer in linux-user so we 3186 * can't call gt_get_countervalue(env), instead we directly 3187 * call the lower level functions. 3188 */ 3189 return cpu_get_clock() / gt_cntfrq_period_ns(cpu); 3190 } 3191 3192 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3193 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3194 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3195 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 3196 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3197 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 3198 }, 3199 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3200 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3201 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3202 .readfn = gt_virt_cnt_read, 3203 }, 3204 REGINFO_SENTINEL 3205 }; 3206 3207 #endif 3208 3209 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3210 { 3211 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3212 raw_write(env, ri, value); 3213 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3214 raw_write(env, ri, value & 0xfffff6ff); 3215 } else { 3216 raw_write(env, ri, value & 0xfffff1ff); 3217 } 3218 } 3219 3220 #ifndef CONFIG_USER_ONLY 3221 /* get_phys_addr() isn't present for user-mode-only targets */ 3222 3223 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 3224 bool isread) 3225 { 3226 if (ri->opc2 & 4) { 3227 /* The ATS12NSO* operations must trap to EL3 if executed in 3228 * Secure EL1 (which can only happen if EL3 is AArch64). 3229 * They are simply UNDEF if executed from NS EL1. 3230 * They function normally from EL2 or EL3. 3231 */ 3232 if (arm_current_el(env) == 1) { 3233 if (arm_is_secure_below_el3(env)) { 3234 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 3235 } 3236 return CP_ACCESS_TRAP_UNCATEGORIZED; 3237 } 3238 } 3239 return CP_ACCESS_OK; 3240 } 3241 3242 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 3243 MMUAccessType access_type, ARMMMUIdx mmu_idx) 3244 { 3245 hwaddr phys_addr; 3246 target_ulong page_size; 3247 int prot; 3248 bool ret; 3249 uint64_t par64; 3250 bool format64 = false; 3251 MemTxAttrs attrs = {}; 3252 ARMMMUFaultInfo fi = {}; 3253 ARMCacheAttrs cacheattrs = {}; 3254 3255 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 3256 &prot, &page_size, &fi, &cacheattrs); 3257 3258 if (ret) { 3259 /* 3260 * Some kinds of translation fault must cause exceptions rather 3261 * than being reported in the PAR. 3262 */ 3263 int current_el = arm_current_el(env); 3264 int target_el; 3265 uint32_t syn, fsr, fsc; 3266 bool take_exc = false; 3267 3268 if (fi.s1ptw && current_el == 1 && !arm_is_secure(env) 3269 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 3270 /* 3271 * Synchronous stage 2 fault on an access made as part of the 3272 * translation table walk for AT S1E0* or AT S1E1* insn 3273 * executed from NS EL1. If this is a synchronous external abort 3274 * and SCR_EL3.EA == 1, then we take a synchronous external abort 3275 * to EL3. Otherwise the fault is taken as an exception to EL2, 3276 * and HPFAR_EL2 holds the faulting IPA. 3277 */ 3278 if (fi.type == ARMFault_SyncExternalOnWalk && 3279 (env->cp15.scr_el3 & SCR_EA)) { 3280 target_el = 3; 3281 } else { 3282 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; 3283 target_el = 2; 3284 } 3285 take_exc = true; 3286 } else if (fi.type == ARMFault_SyncExternalOnWalk) { 3287 /* 3288 * Synchronous external aborts during a translation table walk 3289 * are taken as Data Abort exceptions. 3290 */ 3291 if (fi.stage2) { 3292 if (current_el == 3) { 3293 target_el = 3; 3294 } else { 3295 target_el = 2; 3296 } 3297 } else { 3298 target_el = exception_target_el(env); 3299 } 3300 take_exc = true; 3301 } 3302 3303 if (take_exc) { 3304 /* Construct FSR and FSC using same logic as arm_deliver_fault() */ 3305 if (target_el == 2 || arm_el_is_aa64(env, target_el) || 3306 arm_s1_regime_using_lpae_format(env, mmu_idx)) { 3307 fsr = arm_fi_to_lfsc(&fi); 3308 fsc = extract32(fsr, 0, 6); 3309 } else { 3310 fsr = arm_fi_to_sfsc(&fi); 3311 fsc = 0x3f; 3312 } 3313 /* 3314 * Report exception with ESR indicating a fault due to a 3315 * translation table walk for a cache maintenance instruction. 3316 */ 3317 syn = syn_data_abort_no_iss(current_el == target_el, 3318 fi.ea, 1, fi.s1ptw, 1, fsc); 3319 env->exception.vaddress = value; 3320 env->exception.fsr = fsr; 3321 raise_exception(env, EXCP_DATA_ABORT, syn, target_el); 3322 } 3323 } 3324 3325 if (is_a64(env)) { 3326 format64 = true; 3327 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 3328 /* 3329 * ATS1Cxx: 3330 * * TTBCR.EAE determines whether the result is returned using the 3331 * 32-bit or the 64-bit PAR format 3332 * * Instructions executed in Hyp mode always use the 64bit format 3333 * 3334 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 3335 * * The Non-secure TTBCR.EAE bit is set to 1 3336 * * The implementation includes EL2, and the value of HCR.VM is 1 3337 * 3338 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 3339 * 3340 * ATS1Hx always uses the 64bit format. 3341 */ 3342 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 3343 3344 if (arm_feature(env, ARM_FEATURE_EL2)) { 3345 if (mmu_idx == ARMMMUIdx_E10_0 || 3346 mmu_idx == ARMMMUIdx_E10_1 || 3347 mmu_idx == ARMMMUIdx_E10_1_PAN) { 3348 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 3349 } else { 3350 format64 |= arm_current_el(env) == 2; 3351 } 3352 } 3353 } 3354 3355 if (format64) { 3356 /* Create a 64-bit PAR */ 3357 par64 = (1 << 11); /* LPAE bit always set */ 3358 if (!ret) { 3359 par64 |= phys_addr & ~0xfffULL; 3360 if (!attrs.secure) { 3361 par64 |= (1 << 9); /* NS */ 3362 } 3363 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 3364 par64 |= cacheattrs.shareability << 7; /* SH */ 3365 } else { 3366 uint32_t fsr = arm_fi_to_lfsc(&fi); 3367 3368 par64 |= 1; /* F */ 3369 par64 |= (fsr & 0x3f) << 1; /* FS */ 3370 if (fi.stage2) { 3371 par64 |= (1 << 9); /* S */ 3372 } 3373 if (fi.s1ptw) { 3374 par64 |= (1 << 8); /* PTW */ 3375 } 3376 } 3377 } else { 3378 /* fsr is a DFSR/IFSR value for the short descriptor 3379 * translation table format (with WnR always clear). 3380 * Convert it to a 32-bit PAR. 3381 */ 3382 if (!ret) { 3383 /* We do not set any attribute bits in the PAR */ 3384 if (page_size == (1 << 24) 3385 && arm_feature(env, ARM_FEATURE_V7)) { 3386 par64 = (phys_addr & 0xff000000) | (1 << 1); 3387 } else { 3388 par64 = phys_addr & 0xfffff000; 3389 } 3390 if (!attrs.secure) { 3391 par64 |= (1 << 9); /* NS */ 3392 } 3393 } else { 3394 uint32_t fsr = arm_fi_to_sfsc(&fi); 3395 3396 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3397 ((fsr & 0xf) << 1) | 1; 3398 } 3399 } 3400 return par64; 3401 } 3402 3403 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3404 { 3405 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3406 uint64_t par64; 3407 ARMMMUIdx mmu_idx; 3408 int el = arm_current_el(env); 3409 bool secure = arm_is_secure_below_el3(env); 3410 3411 switch (ri->opc2 & 6) { 3412 case 0: 3413 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ 3414 switch (el) { 3415 case 3: 3416 mmu_idx = ARMMMUIdx_SE3; 3417 break; 3418 case 2: 3419 g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */ 3420 /* fall through */ 3421 case 1: 3422 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { 3423 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN 3424 : ARMMMUIdx_Stage1_E1_PAN); 3425 } else { 3426 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; 3427 } 3428 break; 3429 default: 3430 g_assert_not_reached(); 3431 } 3432 break; 3433 case 2: 3434 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3435 switch (el) { 3436 case 3: 3437 mmu_idx = ARMMMUIdx_SE10_0; 3438 break; 3439 case 2: 3440 mmu_idx = ARMMMUIdx_Stage1_E0; 3441 break; 3442 case 1: 3443 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; 3444 break; 3445 default: 3446 g_assert_not_reached(); 3447 } 3448 break; 3449 case 4: 3450 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3451 mmu_idx = ARMMMUIdx_E10_1; 3452 break; 3453 case 6: 3454 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3455 mmu_idx = ARMMMUIdx_E10_0; 3456 break; 3457 default: 3458 g_assert_not_reached(); 3459 } 3460 3461 par64 = do_ats_write(env, value, access_type, mmu_idx); 3462 3463 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3464 } 3465 3466 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3467 uint64_t value) 3468 { 3469 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3470 uint64_t par64; 3471 3472 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); 3473 3474 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3475 } 3476 3477 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3478 bool isread) 3479 { 3480 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 3481 return CP_ACCESS_TRAP; 3482 } 3483 return CP_ACCESS_OK; 3484 } 3485 3486 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3487 uint64_t value) 3488 { 3489 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3490 ARMMMUIdx mmu_idx; 3491 int secure = arm_is_secure_below_el3(env); 3492 3493 switch (ri->opc2 & 6) { 3494 case 0: 3495 switch (ri->opc1) { 3496 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ 3497 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { 3498 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN 3499 : ARMMMUIdx_Stage1_E1_PAN); 3500 } else { 3501 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; 3502 } 3503 break; 3504 case 4: /* AT S1E2R, AT S1E2W */ 3505 mmu_idx = ARMMMUIdx_E2; 3506 break; 3507 case 6: /* AT S1E3R, AT S1E3W */ 3508 mmu_idx = ARMMMUIdx_SE3; 3509 break; 3510 default: 3511 g_assert_not_reached(); 3512 } 3513 break; 3514 case 2: /* AT S1E0R, AT S1E0W */ 3515 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; 3516 break; 3517 case 4: /* AT S12E1R, AT S12E1W */ 3518 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; 3519 break; 3520 case 6: /* AT S12E0R, AT S12E0W */ 3521 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; 3522 break; 3523 default: 3524 g_assert_not_reached(); 3525 } 3526 3527 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 3528 } 3529 #endif 3530 3531 static const ARMCPRegInfo vapa_cp_reginfo[] = { 3532 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 3533 .access = PL1_RW, .resetvalue = 0, 3534 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 3535 offsetoflow32(CPUARMState, cp15.par_ns) }, 3536 .writefn = par_write }, 3537 #ifndef CONFIG_USER_ONLY 3538 /* This underdecoding is safe because the reginfo is NO_RAW. */ 3539 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 3540 .access = PL1_W, .accessfn = ats_access, 3541 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 3542 #endif 3543 REGINFO_SENTINEL 3544 }; 3545 3546 /* Return basic MPU access permission bits. */ 3547 static uint32_t simple_mpu_ap_bits(uint32_t val) 3548 { 3549 uint32_t ret; 3550 uint32_t mask; 3551 int i; 3552 ret = 0; 3553 mask = 3; 3554 for (i = 0; i < 16; i += 2) { 3555 ret |= (val >> i) & mask; 3556 mask <<= 2; 3557 } 3558 return ret; 3559 } 3560 3561 /* Pad basic MPU access permission bits to extended format. */ 3562 static uint32_t extended_mpu_ap_bits(uint32_t val) 3563 { 3564 uint32_t ret; 3565 uint32_t mask; 3566 int i; 3567 ret = 0; 3568 mask = 3; 3569 for (i = 0; i < 16; i += 2) { 3570 ret |= (val & mask) << i; 3571 mask <<= 2; 3572 } 3573 return ret; 3574 } 3575 3576 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3577 uint64_t value) 3578 { 3579 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3580 } 3581 3582 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3583 { 3584 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3585 } 3586 3587 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3588 uint64_t value) 3589 { 3590 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3591 } 3592 3593 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3594 { 3595 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3596 } 3597 3598 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3599 { 3600 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3601 3602 if (!u32p) { 3603 return 0; 3604 } 3605 3606 u32p += env->pmsav7.rnr[M_REG_NS]; 3607 return *u32p; 3608 } 3609 3610 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3611 uint64_t value) 3612 { 3613 ARMCPU *cpu = env_archcpu(env); 3614 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3615 3616 if (!u32p) { 3617 return; 3618 } 3619 3620 u32p += env->pmsav7.rnr[M_REG_NS]; 3621 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3622 *u32p = value; 3623 } 3624 3625 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3626 uint64_t value) 3627 { 3628 ARMCPU *cpu = env_archcpu(env); 3629 uint32_t nrgs = cpu->pmsav7_dregion; 3630 3631 if (value >= nrgs) { 3632 qemu_log_mask(LOG_GUEST_ERROR, 3633 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3634 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3635 return; 3636 } 3637 3638 raw_write(env, ri, value); 3639 } 3640 3641 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3642 /* Reset for all these registers is handled in arm_cpu_reset(), 3643 * because the PMSAv7 is also used by M-profile CPUs, which do 3644 * not register cpregs but still need the state to be reset. 3645 */ 3646 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 3647 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3648 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 3649 .readfn = pmsav7_read, .writefn = pmsav7_write, 3650 .resetfn = arm_cp_reset_ignore }, 3651 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 3652 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3653 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 3654 .readfn = pmsav7_read, .writefn = pmsav7_write, 3655 .resetfn = arm_cp_reset_ignore }, 3656 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 3657 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3658 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 3659 .readfn = pmsav7_read, .writefn = pmsav7_write, 3660 .resetfn = arm_cp_reset_ignore }, 3661 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 3662 .access = PL1_RW, 3663 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 3664 .writefn = pmsav7_rgnr_write, 3665 .resetfn = arm_cp_reset_ignore }, 3666 REGINFO_SENTINEL 3667 }; 3668 3669 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 3670 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3671 .access = PL1_RW, .type = ARM_CP_ALIAS, 3672 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3673 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 3674 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3675 .access = PL1_RW, .type = ARM_CP_ALIAS, 3676 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3677 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 3678 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 3679 .access = PL1_RW, 3680 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3681 .resetvalue = 0, }, 3682 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 3683 .access = PL1_RW, 3684 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3685 .resetvalue = 0, }, 3686 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 3687 .access = PL1_RW, 3688 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 3689 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 3690 .access = PL1_RW, 3691 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 3692 /* Protection region base and size registers */ 3693 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 3694 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3695 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 3696 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 3697 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3698 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 3699 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 3700 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3701 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 3702 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 3703 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3704 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 3705 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 3706 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3707 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 3708 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 3709 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3710 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 3711 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 3712 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3713 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 3714 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 3715 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3716 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 3717 REGINFO_SENTINEL 3718 }; 3719 3720 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 3721 uint64_t value) 3722 { 3723 TCR *tcr = raw_ptr(env, ri); 3724 int maskshift = extract32(value, 0, 3); 3725 3726 if (!arm_feature(env, ARM_FEATURE_V8)) { 3727 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 3728 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 3729 * using Long-desciptor translation table format */ 3730 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 3731 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 3732 /* In an implementation that includes the Security Extensions 3733 * TTBCR has additional fields PD0 [4] and PD1 [5] for 3734 * Short-descriptor translation table format. 3735 */ 3736 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 3737 } else { 3738 value &= TTBCR_N; 3739 } 3740 } 3741 3742 /* Update the masks corresponding to the TCR bank being written 3743 * Note that we always calculate mask and base_mask, but 3744 * they are only used for short-descriptor tables (ie if EAE is 0); 3745 * for long-descriptor tables the TCR fields are used differently 3746 * and the mask and base_mask values are meaningless. 3747 */ 3748 tcr->raw_tcr = value; 3749 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 3750 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 3751 } 3752 3753 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3754 uint64_t value) 3755 { 3756 ARMCPU *cpu = env_archcpu(env); 3757 TCR *tcr = raw_ptr(env, ri); 3758 3759 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3760 /* With LPAE the TTBCR could result in a change of ASID 3761 * via the TTBCR.A1 bit, so do a TLB flush. 3762 */ 3763 tlb_flush(CPU(cpu)); 3764 } 3765 /* Preserve the high half of TCR_EL1, set via TTBCR2. */ 3766 value = deposit64(tcr->raw_tcr, 0, 32, value); 3767 vmsa_ttbcr_raw_write(env, ri, value); 3768 } 3769 3770 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3771 { 3772 TCR *tcr = raw_ptr(env, ri); 3773 3774 /* Reset both the TCR as well as the masks corresponding to the bank of 3775 * the TCR being reset. 3776 */ 3777 tcr->raw_tcr = 0; 3778 tcr->mask = 0; 3779 tcr->base_mask = 0xffffc000u; 3780 } 3781 3782 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, 3783 uint64_t value) 3784 { 3785 ARMCPU *cpu = env_archcpu(env); 3786 TCR *tcr = raw_ptr(env, ri); 3787 3788 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 3789 tlb_flush(CPU(cpu)); 3790 tcr->raw_tcr = value; 3791 } 3792 3793 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3794 uint64_t value) 3795 { 3796 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3797 if (cpreg_field_is_64bit(ri) && 3798 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3799 ARMCPU *cpu = env_archcpu(env); 3800 tlb_flush(CPU(cpu)); 3801 } 3802 raw_write(env, ri, value); 3803 } 3804 3805 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3806 uint64_t value) 3807 { 3808 /* 3809 * If we are running with E2&0 regime, then an ASID is active. 3810 * Flush if that might be changing. Note we're not checking 3811 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that 3812 * holds the active ASID, only checking the field that might. 3813 */ 3814 if (extract64(raw_read(env, ri) ^ value, 48, 16) && 3815 (arm_hcr_el2_eff(env) & HCR_E2H)) { 3816 tlb_flush_by_mmuidx(env_cpu(env), 3817 ARMMMUIdxBit_E20_2 | 3818 ARMMMUIdxBit_E20_2_PAN | 3819 ARMMMUIdxBit_E20_0); 3820 } 3821 raw_write(env, ri, value); 3822 } 3823 3824 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3825 uint64_t value) 3826 { 3827 ARMCPU *cpu = env_archcpu(env); 3828 CPUState *cs = CPU(cpu); 3829 3830 /* 3831 * A change in VMID to the stage2 page table (Stage2) invalidates 3832 * the combined stage 1&2 tlbs (EL10_1 and EL10_0). 3833 */ 3834 if (raw_read(env, ri) != value) { 3835 tlb_flush_by_mmuidx(cs, 3836 ARMMMUIdxBit_E10_1 | 3837 ARMMMUIdxBit_E10_1_PAN | 3838 ARMMMUIdxBit_E10_0 | 3839 ARMMMUIdxBit_Stage2); 3840 raw_write(env, ri, value); 3841 } 3842 } 3843 3844 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 3845 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3846 .access = PL1_RW, .type = ARM_CP_ALIAS, 3847 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 3848 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 3849 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3850 .access = PL1_RW, .resetvalue = 0, 3851 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 3852 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 3853 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 3854 .access = PL1_RW, .resetvalue = 0, 3855 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 3856 offsetof(CPUARMState, cp15.dfar_ns) } }, 3857 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 3858 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 3859 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 3860 .resetvalue = 0, }, 3861 REGINFO_SENTINEL 3862 }; 3863 3864 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 3865 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 3866 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 3867 .access = PL1_RW, 3868 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 3869 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 3870 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 3871 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 3872 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3873 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 3874 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 3875 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 3876 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 3877 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3878 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 3879 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 3880 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3881 .access = PL1_RW, .writefn = vmsa_tcr_el12_write, 3882 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 3883 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 3884 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3885 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 3886 .raw_writefn = vmsa_ttbcr_raw_write, 3887 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 3888 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 3889 REGINFO_SENTINEL 3890 }; 3891 3892 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing 3893 * qemu tlbs nor adjusting cached masks. 3894 */ 3895 static const ARMCPRegInfo ttbcr2_reginfo = { 3896 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 3897 .access = PL1_RW, .type = ARM_CP_ALIAS, 3898 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]), 3899 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) }, 3900 }; 3901 3902 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 3903 uint64_t value) 3904 { 3905 env->cp15.c15_ticonfig = value & 0xe7; 3906 /* The OS_TYPE bit in this register changes the reported CPUID! */ 3907 env->cp15.c0_cpuid = (value & (1 << 5)) ? 3908 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 3909 } 3910 3911 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 3912 uint64_t value) 3913 { 3914 env->cp15.c15_threadid = value & 0xffff; 3915 } 3916 3917 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 3918 uint64_t value) 3919 { 3920 /* Wait-for-interrupt (deprecated) */ 3921 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 3922 } 3923 3924 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 3925 uint64_t value) 3926 { 3927 /* On OMAP there are registers indicating the max/min index of dcache lines 3928 * containing a dirty line; cache flush operations have to reset these. 3929 */ 3930 env->cp15.c15_i_max = 0x000; 3931 env->cp15.c15_i_min = 0xff0; 3932 } 3933 3934 static const ARMCPRegInfo omap_cp_reginfo[] = { 3935 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 3936 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 3937 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 3938 .resetvalue = 0, }, 3939 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 3940 .access = PL1_RW, .type = ARM_CP_NOP }, 3941 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 3942 .access = PL1_RW, 3943 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 3944 .writefn = omap_ticonfig_write }, 3945 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 3946 .access = PL1_RW, 3947 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 3948 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 3949 .access = PL1_RW, .resetvalue = 0xff0, 3950 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 3951 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 3952 .access = PL1_RW, 3953 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 3954 .writefn = omap_threadid_write }, 3955 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 3956 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 3957 .type = ARM_CP_NO_RAW, 3958 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 3959 /* TODO: Peripheral port remap register: 3960 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 3961 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 3962 * when MMU is off. 3963 */ 3964 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 3965 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 3966 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 3967 .writefn = omap_cachemaint_write }, 3968 { .name = "C9", .cp = 15, .crn = 9, 3969 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 3970 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 3971 REGINFO_SENTINEL 3972 }; 3973 3974 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 3975 uint64_t value) 3976 { 3977 env->cp15.c15_cpar = value & 0x3fff; 3978 } 3979 3980 static const ARMCPRegInfo xscale_cp_reginfo[] = { 3981 { .name = "XSCALE_CPAR", 3982 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 3983 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 3984 .writefn = xscale_cpar_write, }, 3985 { .name = "XSCALE_AUXCR", 3986 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 3987 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 3988 .resetvalue = 0, }, 3989 /* XScale specific cache-lockdown: since we have no cache we NOP these 3990 * and hope the guest does not really rely on cache behaviour. 3991 */ 3992 { .name = "XSCALE_LOCK_ICACHE_LINE", 3993 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 3994 .access = PL1_W, .type = ARM_CP_NOP }, 3995 { .name = "XSCALE_UNLOCK_ICACHE", 3996 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 3997 .access = PL1_W, .type = ARM_CP_NOP }, 3998 { .name = "XSCALE_DCACHE_LOCK", 3999 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 4000 .access = PL1_RW, .type = ARM_CP_NOP }, 4001 { .name = "XSCALE_UNLOCK_DCACHE", 4002 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 4003 .access = PL1_W, .type = ARM_CP_NOP }, 4004 REGINFO_SENTINEL 4005 }; 4006 4007 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 4008 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 4009 * implementation of this implementation-defined space. 4010 * Ideally this should eventually disappear in favour of actually 4011 * implementing the correct behaviour for all cores. 4012 */ 4013 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 4014 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4015 .access = PL1_RW, 4016 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 4017 .resetvalue = 0 }, 4018 REGINFO_SENTINEL 4019 }; 4020 4021 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 4022 /* Cache status: RAZ because we have no cache so it's always clean */ 4023 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 4024 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4025 .resetvalue = 0 }, 4026 REGINFO_SENTINEL 4027 }; 4028 4029 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 4030 /* We never have a a block transfer operation in progress */ 4031 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 4032 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4033 .resetvalue = 0 }, 4034 /* The cache ops themselves: these all NOP for QEMU */ 4035 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 4036 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4037 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 4038 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4039 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 4040 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4041 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 4042 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4043 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 4044 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4045 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 4046 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4047 REGINFO_SENTINEL 4048 }; 4049 4050 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 4051 /* The cache test-and-clean instructions always return (1 << 30) 4052 * to indicate that there are no dirty cache lines. 4053 */ 4054 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 4055 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4056 .resetvalue = (1 << 30) }, 4057 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 4058 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4059 .resetvalue = (1 << 30) }, 4060 REGINFO_SENTINEL 4061 }; 4062 4063 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 4064 /* Ignore ReadBuffer accesses */ 4065 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 4066 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4067 .access = PL1_RW, .resetvalue = 0, 4068 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 4069 REGINFO_SENTINEL 4070 }; 4071 4072 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4073 { 4074 ARMCPU *cpu = env_archcpu(env); 4075 unsigned int cur_el = arm_current_el(env); 4076 bool secure = arm_is_secure(env); 4077 4078 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 4079 return env->cp15.vpidr_el2; 4080 } 4081 return raw_read(env, ri); 4082 } 4083 4084 static uint64_t mpidr_read_val(CPUARMState *env) 4085 { 4086 ARMCPU *cpu = env_archcpu(env); 4087 uint64_t mpidr = cpu->mp_affinity; 4088 4089 if (arm_feature(env, ARM_FEATURE_V7MP)) { 4090 mpidr |= (1U << 31); 4091 /* Cores which are uniprocessor (non-coherent) 4092 * but still implement the MP extensions set 4093 * bit 30. (For instance, Cortex-R5). 4094 */ 4095 if (cpu->mp_is_up) { 4096 mpidr |= (1u << 30); 4097 } 4098 } 4099 return mpidr; 4100 } 4101 4102 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4103 { 4104 unsigned int cur_el = arm_current_el(env); 4105 bool secure = arm_is_secure(env); 4106 4107 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 4108 return env->cp15.vmpidr_el2; 4109 } 4110 return mpidr_read_val(env); 4111 } 4112 4113 static const ARMCPRegInfo lpae_cp_reginfo[] = { 4114 /* NOP AMAIR0/1 */ 4115 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 4116 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 4117 .access = PL1_RW, .type = ARM_CP_CONST, 4118 .resetvalue = 0 }, 4119 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 4120 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 4121 .access = PL1_RW, .type = ARM_CP_CONST, 4122 .resetvalue = 0 }, 4123 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 4124 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 4125 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 4126 offsetof(CPUARMState, cp15.par_ns)} }, 4127 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 4128 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4129 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4130 offsetof(CPUARMState, cp15.ttbr0_ns) }, 4131 .writefn = vmsa_ttbr_write, }, 4132 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 4133 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4134 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4135 offsetof(CPUARMState, cp15.ttbr1_ns) }, 4136 .writefn = vmsa_ttbr_write, }, 4137 REGINFO_SENTINEL 4138 }; 4139 4140 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4141 { 4142 return vfp_get_fpcr(env); 4143 } 4144 4145 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4146 uint64_t value) 4147 { 4148 vfp_set_fpcr(env, value); 4149 } 4150 4151 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4152 { 4153 return vfp_get_fpsr(env); 4154 } 4155 4156 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4157 uint64_t value) 4158 { 4159 vfp_set_fpsr(env, value); 4160 } 4161 4162 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 4163 bool isread) 4164 { 4165 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { 4166 return CP_ACCESS_TRAP; 4167 } 4168 return CP_ACCESS_OK; 4169 } 4170 4171 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 4172 uint64_t value) 4173 { 4174 env->daif = value & PSTATE_DAIF; 4175 } 4176 4177 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) 4178 { 4179 return env->pstate & PSTATE_PAN; 4180 } 4181 4182 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, 4183 uint64_t value) 4184 { 4185 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); 4186 } 4187 4188 static const ARMCPRegInfo pan_reginfo = { 4189 .name = "PAN", .state = ARM_CP_STATE_AA64, 4190 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, 4191 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4192 .readfn = aa64_pan_read, .writefn = aa64_pan_write 4193 }; 4194 4195 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) 4196 { 4197 return env->pstate & PSTATE_UAO; 4198 } 4199 4200 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, 4201 uint64_t value) 4202 { 4203 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); 4204 } 4205 4206 static const ARMCPRegInfo uao_reginfo = { 4207 .name = "UAO", .state = ARM_CP_STATE_AA64, 4208 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, 4209 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4210 .readfn = aa64_uao_read, .writefn = aa64_uao_write 4211 }; 4212 4213 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 4214 const ARMCPRegInfo *ri, 4215 bool isread) 4216 { 4217 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 4218 * SCTLR_EL1.UCI is set. 4219 */ 4220 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UCI)) { 4221 return CP_ACCESS_TRAP; 4222 } 4223 return CP_ACCESS_OK; 4224 } 4225 4226 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 4227 * Page D4-1736 (DDI0487A.b) 4228 */ 4229 4230 static int vae1_tlbmask(CPUARMState *env) 4231 { 4232 /* Since we exclude secure first, we may read HCR_EL2 directly. */ 4233 if (arm_is_secure_below_el3(env)) { 4234 return ARMMMUIdxBit_SE10_1 | 4235 ARMMMUIdxBit_SE10_1_PAN | 4236 ARMMMUIdxBit_SE10_0; 4237 } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) 4238 == (HCR_E2H | HCR_TGE)) { 4239 return ARMMMUIdxBit_E20_2 | 4240 ARMMMUIdxBit_E20_2_PAN | 4241 ARMMMUIdxBit_E20_0; 4242 } else { 4243 return ARMMMUIdxBit_E10_1 | 4244 ARMMMUIdxBit_E10_1_PAN | 4245 ARMMMUIdxBit_E10_0; 4246 } 4247 } 4248 4249 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4250 uint64_t value) 4251 { 4252 CPUState *cs = env_cpu(env); 4253 int mask = vae1_tlbmask(env); 4254 4255 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4256 } 4257 4258 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4259 uint64_t value) 4260 { 4261 CPUState *cs = env_cpu(env); 4262 int mask = vae1_tlbmask(env); 4263 4264 if (tlb_force_broadcast(env)) { 4265 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4266 } else { 4267 tlb_flush_by_mmuidx(cs, mask); 4268 } 4269 } 4270 4271 static int alle1_tlbmask(CPUARMState *env) 4272 { 4273 /* 4274 * Note that the 'ALL' scope must invalidate both stage 1 and 4275 * stage 2 translations, whereas most other scopes only invalidate 4276 * stage 1 translations. 4277 */ 4278 if (arm_is_secure_below_el3(env)) { 4279 return ARMMMUIdxBit_SE10_1 | 4280 ARMMMUIdxBit_SE10_1_PAN | 4281 ARMMMUIdxBit_SE10_0; 4282 } else if (arm_feature(env, ARM_FEATURE_EL2)) { 4283 return ARMMMUIdxBit_E10_1 | 4284 ARMMMUIdxBit_E10_1_PAN | 4285 ARMMMUIdxBit_E10_0 | 4286 ARMMMUIdxBit_Stage2; 4287 } else { 4288 return ARMMMUIdxBit_E10_1 | 4289 ARMMMUIdxBit_E10_1_PAN | 4290 ARMMMUIdxBit_E10_0; 4291 } 4292 } 4293 4294 static int e2_tlbmask(CPUARMState *env) 4295 { 4296 /* TODO: ARMv8.4-SecEL2 */ 4297 return ARMMMUIdxBit_E20_0 | 4298 ARMMMUIdxBit_E20_2 | 4299 ARMMMUIdxBit_E20_2_PAN | 4300 ARMMMUIdxBit_E2; 4301 } 4302 4303 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4304 uint64_t value) 4305 { 4306 CPUState *cs = env_cpu(env); 4307 int mask = alle1_tlbmask(env); 4308 4309 tlb_flush_by_mmuidx(cs, mask); 4310 } 4311 4312 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4313 uint64_t value) 4314 { 4315 CPUState *cs = env_cpu(env); 4316 int mask = e2_tlbmask(env); 4317 4318 tlb_flush_by_mmuidx(cs, mask); 4319 } 4320 4321 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4322 uint64_t value) 4323 { 4324 ARMCPU *cpu = env_archcpu(env); 4325 CPUState *cs = CPU(cpu); 4326 4327 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); 4328 } 4329 4330 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4331 uint64_t value) 4332 { 4333 CPUState *cs = env_cpu(env); 4334 int mask = alle1_tlbmask(env); 4335 4336 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4337 } 4338 4339 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4340 uint64_t value) 4341 { 4342 CPUState *cs = env_cpu(env); 4343 int mask = e2_tlbmask(env); 4344 4345 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4346 } 4347 4348 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4349 uint64_t value) 4350 { 4351 CPUState *cs = env_cpu(env); 4352 4353 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); 4354 } 4355 4356 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4357 uint64_t value) 4358 { 4359 /* Invalidate by VA, EL2 4360 * Currently handles both VAE2 and VALE2, since we don't support 4361 * flush-last-level-only. 4362 */ 4363 CPUState *cs = env_cpu(env); 4364 int mask = e2_tlbmask(env); 4365 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4366 4367 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4368 } 4369 4370 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4371 uint64_t value) 4372 { 4373 /* Invalidate by VA, EL3 4374 * Currently handles both VAE3 and VALE3, since we don't support 4375 * flush-last-level-only. 4376 */ 4377 ARMCPU *cpu = env_archcpu(env); 4378 CPUState *cs = CPU(cpu); 4379 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4380 4381 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); 4382 } 4383 4384 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4385 uint64_t value) 4386 { 4387 CPUState *cs = env_cpu(env); 4388 int mask = vae1_tlbmask(env); 4389 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4390 4391 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); 4392 } 4393 4394 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4395 uint64_t value) 4396 { 4397 /* Invalidate by VA, EL1&0 (AArch64 version). 4398 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 4399 * since we don't support flush-for-specific-ASID-only or 4400 * flush-last-level-only. 4401 */ 4402 CPUState *cs = env_cpu(env); 4403 int mask = vae1_tlbmask(env); 4404 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4405 4406 if (tlb_force_broadcast(env)) { 4407 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); 4408 } else { 4409 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4410 } 4411 } 4412 4413 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4414 uint64_t value) 4415 { 4416 CPUState *cs = env_cpu(env); 4417 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4418 4419 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4420 ARMMMUIdxBit_E2); 4421 } 4422 4423 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4424 uint64_t value) 4425 { 4426 CPUState *cs = env_cpu(env); 4427 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4428 4429 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4430 ARMMMUIdxBit_SE3); 4431 } 4432 4433 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4434 uint64_t value) 4435 { 4436 /* Invalidate by IPA. This has to invalidate any structures that 4437 * contain only stage 2 translation information, but does not need 4438 * to apply to structures that contain combined stage 1 and stage 2 4439 * translation information. 4440 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 4441 */ 4442 ARMCPU *cpu = env_archcpu(env); 4443 CPUState *cs = CPU(cpu); 4444 uint64_t pageaddr; 4445 4446 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4447 return; 4448 } 4449 4450 pageaddr = sextract64(value << 12, 0, 48); 4451 4452 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); 4453 } 4454 4455 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4456 uint64_t value) 4457 { 4458 CPUState *cs = env_cpu(env); 4459 uint64_t pageaddr; 4460 4461 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4462 return; 4463 } 4464 4465 pageaddr = sextract64(value << 12, 0, 48); 4466 4467 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4468 ARMMMUIdxBit_Stage2); 4469 } 4470 4471 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4472 bool isread) 4473 { 4474 int cur_el = arm_current_el(env); 4475 4476 if (cur_el < 2) { 4477 uint64_t hcr = arm_hcr_el2_eff(env); 4478 4479 if (cur_el == 0) { 4480 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4481 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { 4482 return CP_ACCESS_TRAP_EL2; 4483 } 4484 } else { 4485 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4486 return CP_ACCESS_TRAP; 4487 } 4488 if (hcr & HCR_TDZ) { 4489 return CP_ACCESS_TRAP_EL2; 4490 } 4491 } 4492 } else if (hcr & HCR_TDZ) { 4493 return CP_ACCESS_TRAP_EL2; 4494 } 4495 } 4496 return CP_ACCESS_OK; 4497 } 4498 4499 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4500 { 4501 ARMCPU *cpu = env_archcpu(env); 4502 int dzp_bit = 1 << 4; 4503 4504 /* DZP indicates whether DC ZVA access is allowed */ 4505 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4506 dzp_bit = 0; 4507 } 4508 return cpu->dcz_blocksize | dzp_bit; 4509 } 4510 4511 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4512 bool isread) 4513 { 4514 if (!(env->pstate & PSTATE_SP)) { 4515 /* Access to SP_EL0 is undefined if it's being used as 4516 * the stack pointer. 4517 */ 4518 return CP_ACCESS_TRAP_UNCATEGORIZED; 4519 } 4520 return CP_ACCESS_OK; 4521 } 4522 4523 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4524 { 4525 return env->pstate & PSTATE_SP; 4526 } 4527 4528 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 4529 { 4530 update_spsel(env, val); 4531 } 4532 4533 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4534 uint64_t value) 4535 { 4536 ARMCPU *cpu = env_archcpu(env); 4537 4538 if (raw_read(env, ri) == value) { 4539 /* Skip the TLB flush if nothing actually changed; Linux likes 4540 * to do a lot of pointless SCTLR writes. 4541 */ 4542 return; 4543 } 4544 4545 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 4546 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 4547 value &= ~SCTLR_M; 4548 } 4549 4550 raw_write(env, ri, value); 4551 /* ??? Lots of these bits are not implemented. */ 4552 /* This may enable/disable the MMU, so do a TLB flush. */ 4553 tlb_flush(CPU(cpu)); 4554 4555 if (ri->type & ARM_CP_SUPPRESS_TB_END) { 4556 /* 4557 * Normally we would always end the TB on an SCTLR write; see the 4558 * comment in ARMCPRegInfo sctlr initialization below for why Xscale 4559 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild 4560 * of hflags from the translator, so do it here. 4561 */ 4562 arm_rebuild_hflags(env); 4563 } 4564 } 4565 4566 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 4567 bool isread) 4568 { 4569 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 4570 return CP_ACCESS_TRAP_FP_EL2; 4571 } 4572 if (env->cp15.cptr_el[3] & CPTR_TFP) { 4573 return CP_ACCESS_TRAP_FP_EL3; 4574 } 4575 return CP_ACCESS_OK; 4576 } 4577 4578 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4579 uint64_t value) 4580 { 4581 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 4582 } 4583 4584 static const ARMCPRegInfo v8_cp_reginfo[] = { 4585 /* Minimal set of EL0-visible registers. This will need to be expanded 4586 * significantly for system emulation of AArch64 CPUs. 4587 */ 4588 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 4589 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 4590 .access = PL0_RW, .type = ARM_CP_NZCV }, 4591 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 4592 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 4593 .type = ARM_CP_NO_RAW, 4594 .access = PL0_RW, .accessfn = aa64_daif_access, 4595 .fieldoffset = offsetof(CPUARMState, daif), 4596 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 4597 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 4598 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 4599 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4600 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 4601 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 4602 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 4603 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4604 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 4605 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 4606 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 4607 .access = PL0_R, .type = ARM_CP_NO_RAW, 4608 .readfn = aa64_dczid_read }, 4609 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 4610 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 4611 .access = PL0_W, .type = ARM_CP_DC_ZVA, 4612 #ifndef CONFIG_USER_ONLY 4613 /* Avoid overhead of an access check that always passes in user-mode */ 4614 .accessfn = aa64_zva_access, 4615 #endif 4616 }, 4617 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 4618 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 4619 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 4620 /* Cache ops: all NOPs since we don't emulate caches */ 4621 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 4622 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4623 .access = PL1_W, .type = ARM_CP_NOP }, 4624 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 4625 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4626 .access = PL1_W, .type = ARM_CP_NOP }, 4627 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 4628 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 4629 .access = PL0_W, .type = ARM_CP_NOP, 4630 .accessfn = aa64_cacheop_access }, 4631 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 4632 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4633 .access = PL1_W, .type = ARM_CP_NOP }, 4634 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 4635 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4636 .access = PL1_W, .type = ARM_CP_NOP }, 4637 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 4638 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 4639 .access = PL0_W, .type = ARM_CP_NOP, 4640 .accessfn = aa64_cacheop_access }, 4641 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 4642 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4643 .access = PL1_W, .type = ARM_CP_NOP }, 4644 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 4645 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 4646 .access = PL0_W, .type = ARM_CP_NOP, 4647 .accessfn = aa64_cacheop_access }, 4648 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 4649 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 4650 .access = PL0_W, .type = ARM_CP_NOP, 4651 .accessfn = aa64_cacheop_access }, 4652 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 4653 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4654 .access = PL1_W, .type = ARM_CP_NOP }, 4655 /* TLBI operations */ 4656 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 4657 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 4658 .access = PL1_W, .type = ARM_CP_NO_RAW, 4659 .writefn = tlbi_aa64_vmalle1is_write }, 4660 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 4661 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 4662 .access = PL1_W, .type = ARM_CP_NO_RAW, 4663 .writefn = tlbi_aa64_vae1is_write }, 4664 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 4665 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 4666 .access = PL1_W, .type = ARM_CP_NO_RAW, 4667 .writefn = tlbi_aa64_vmalle1is_write }, 4668 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 4669 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 4670 .access = PL1_W, .type = ARM_CP_NO_RAW, 4671 .writefn = tlbi_aa64_vae1is_write }, 4672 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 4673 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4674 .access = PL1_W, .type = ARM_CP_NO_RAW, 4675 .writefn = tlbi_aa64_vae1is_write }, 4676 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 4677 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4678 .access = PL1_W, .type = ARM_CP_NO_RAW, 4679 .writefn = tlbi_aa64_vae1is_write }, 4680 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 4681 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 4682 .access = PL1_W, .type = ARM_CP_NO_RAW, 4683 .writefn = tlbi_aa64_vmalle1_write }, 4684 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 4685 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 4686 .access = PL1_W, .type = ARM_CP_NO_RAW, 4687 .writefn = tlbi_aa64_vae1_write }, 4688 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 4689 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 4690 .access = PL1_W, .type = ARM_CP_NO_RAW, 4691 .writefn = tlbi_aa64_vmalle1_write }, 4692 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 4693 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 4694 .access = PL1_W, .type = ARM_CP_NO_RAW, 4695 .writefn = tlbi_aa64_vae1_write }, 4696 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 4697 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4698 .access = PL1_W, .type = ARM_CP_NO_RAW, 4699 .writefn = tlbi_aa64_vae1_write }, 4700 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 4701 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4702 .access = PL1_W, .type = ARM_CP_NO_RAW, 4703 .writefn = tlbi_aa64_vae1_write }, 4704 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 4705 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4706 .access = PL2_W, .type = ARM_CP_NO_RAW, 4707 .writefn = tlbi_aa64_ipas2e1is_write }, 4708 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 4709 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4710 .access = PL2_W, .type = ARM_CP_NO_RAW, 4711 .writefn = tlbi_aa64_ipas2e1is_write }, 4712 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 4713 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4714 .access = PL2_W, .type = ARM_CP_NO_RAW, 4715 .writefn = tlbi_aa64_alle1is_write }, 4716 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 4717 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 4718 .access = PL2_W, .type = ARM_CP_NO_RAW, 4719 .writefn = tlbi_aa64_alle1is_write }, 4720 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 4721 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4722 .access = PL2_W, .type = ARM_CP_NO_RAW, 4723 .writefn = tlbi_aa64_ipas2e1_write }, 4724 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 4725 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4726 .access = PL2_W, .type = ARM_CP_NO_RAW, 4727 .writefn = tlbi_aa64_ipas2e1_write }, 4728 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 4729 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4730 .access = PL2_W, .type = ARM_CP_NO_RAW, 4731 .writefn = tlbi_aa64_alle1_write }, 4732 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 4733 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 4734 .access = PL2_W, .type = ARM_CP_NO_RAW, 4735 .writefn = tlbi_aa64_alle1is_write }, 4736 #ifndef CONFIG_USER_ONLY 4737 /* 64 bit address translation operations */ 4738 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 4739 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 4740 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4741 .writefn = ats_write64 }, 4742 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 4743 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 4744 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4745 .writefn = ats_write64 }, 4746 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 4747 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 4748 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4749 .writefn = ats_write64 }, 4750 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 4751 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 4752 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4753 .writefn = ats_write64 }, 4754 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 4755 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 4756 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4757 .writefn = ats_write64 }, 4758 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 4759 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 4760 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4761 .writefn = ats_write64 }, 4762 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 4763 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 4764 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4765 .writefn = ats_write64 }, 4766 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 4767 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 4768 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4769 .writefn = ats_write64 }, 4770 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 4771 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 4772 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 4773 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4774 .writefn = ats_write64 }, 4775 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 4776 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 4777 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4778 .writefn = ats_write64 }, 4779 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 4780 .type = ARM_CP_ALIAS, 4781 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 4782 .access = PL1_RW, .resetvalue = 0, 4783 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 4784 .writefn = par_write }, 4785 #endif 4786 /* TLB invalidate last level of translation table walk */ 4787 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4788 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 4789 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4790 .type = ARM_CP_NO_RAW, .access = PL1_W, 4791 .writefn = tlbimvaa_is_write }, 4792 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4793 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 4794 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4795 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 4796 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4797 .type = ARM_CP_NO_RAW, .access = PL2_W, 4798 .writefn = tlbimva_hyp_write }, 4799 { .name = "TLBIMVALHIS", 4800 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4801 .type = ARM_CP_NO_RAW, .access = PL2_W, 4802 .writefn = tlbimva_hyp_is_write }, 4803 { .name = "TLBIIPAS2", 4804 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4805 .type = ARM_CP_NO_RAW, .access = PL2_W, 4806 .writefn = tlbiipas2_write }, 4807 { .name = "TLBIIPAS2IS", 4808 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4809 .type = ARM_CP_NO_RAW, .access = PL2_W, 4810 .writefn = tlbiipas2_is_write }, 4811 { .name = "TLBIIPAS2L", 4812 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4813 .type = ARM_CP_NO_RAW, .access = PL2_W, 4814 .writefn = tlbiipas2_write }, 4815 { .name = "TLBIIPAS2LIS", 4816 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4817 .type = ARM_CP_NO_RAW, .access = PL2_W, 4818 .writefn = tlbiipas2_is_write }, 4819 /* 32 bit cache operations */ 4820 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4821 .type = ARM_CP_NOP, .access = PL1_W }, 4822 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 4823 .type = ARM_CP_NOP, .access = PL1_W }, 4824 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4825 .type = ARM_CP_NOP, .access = PL1_W }, 4826 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 4827 .type = ARM_CP_NOP, .access = PL1_W }, 4828 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 4829 .type = ARM_CP_NOP, .access = PL1_W }, 4830 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 4831 .type = ARM_CP_NOP, .access = PL1_W }, 4832 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4833 .type = ARM_CP_NOP, .access = PL1_W }, 4834 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4835 .type = ARM_CP_NOP, .access = PL1_W }, 4836 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 4837 .type = ARM_CP_NOP, .access = PL1_W }, 4838 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4839 .type = ARM_CP_NOP, .access = PL1_W }, 4840 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 4841 .type = ARM_CP_NOP, .access = PL1_W }, 4842 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 4843 .type = ARM_CP_NOP, .access = PL1_W }, 4844 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4845 .type = ARM_CP_NOP, .access = PL1_W }, 4846 /* MMU Domain access control / MPU write buffer control */ 4847 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 4848 .access = PL1_RW, .resetvalue = 0, 4849 .writefn = dacr_write, .raw_writefn = raw_write, 4850 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 4851 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 4852 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 4853 .type = ARM_CP_ALIAS, 4854 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 4855 .access = PL1_RW, 4856 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 4857 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 4858 .type = ARM_CP_ALIAS, 4859 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 4860 .access = PL1_RW, 4861 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 4862 /* We rely on the access checks not allowing the guest to write to the 4863 * state field when SPSel indicates that it's being used as the stack 4864 * pointer. 4865 */ 4866 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 4867 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 4868 .access = PL1_RW, .accessfn = sp_el0_access, 4869 .type = ARM_CP_ALIAS, 4870 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 4871 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 4872 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 4873 .access = PL2_RW, .type = ARM_CP_ALIAS, 4874 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 4875 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 4876 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 4877 .type = ARM_CP_NO_RAW, 4878 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 4879 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 4880 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 4881 .type = ARM_CP_ALIAS, 4882 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 4883 .access = PL2_RW, .accessfn = fpexc32_access }, 4884 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 4885 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 4886 .access = PL2_RW, .resetvalue = 0, 4887 .writefn = dacr_write, .raw_writefn = raw_write, 4888 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 4889 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 4890 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 4891 .access = PL2_RW, .resetvalue = 0, 4892 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 4893 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 4894 .type = ARM_CP_ALIAS, 4895 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 4896 .access = PL2_RW, 4897 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 4898 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 4899 .type = ARM_CP_ALIAS, 4900 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 4901 .access = PL2_RW, 4902 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 4903 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 4904 .type = ARM_CP_ALIAS, 4905 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 4906 .access = PL2_RW, 4907 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 4908 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 4909 .type = ARM_CP_ALIAS, 4910 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 4911 .access = PL2_RW, 4912 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 4913 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 4914 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 4915 .resetvalue = 0, 4916 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 4917 { .name = "SDCR", .type = ARM_CP_ALIAS, 4918 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 4919 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4920 .writefn = sdcr_write, 4921 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 4922 REGINFO_SENTINEL 4923 }; 4924 4925 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 4926 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 4927 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 4928 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 4929 .access = PL2_RW, 4930 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 4931 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 4932 .type = ARM_CP_NO_RAW, 4933 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 4934 .access = PL2_RW, 4935 .type = ARM_CP_CONST, .resetvalue = 0 }, 4936 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 4937 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 4938 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4939 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 4940 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 4941 .access = PL2_RW, 4942 .type = ARM_CP_CONST, .resetvalue = 0 }, 4943 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 4944 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 4945 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4946 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 4947 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 4948 .access = PL2_RW, .type = ARM_CP_CONST, 4949 .resetvalue = 0 }, 4950 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 4951 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 4952 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4953 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 4954 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 4955 .access = PL2_RW, .type = ARM_CP_CONST, 4956 .resetvalue = 0 }, 4957 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 4958 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 4959 .access = PL2_RW, .type = ARM_CP_CONST, 4960 .resetvalue = 0 }, 4961 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 4962 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 4963 .access = PL2_RW, .type = ARM_CP_CONST, 4964 .resetvalue = 0 }, 4965 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 4966 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 4967 .access = PL2_RW, .type = ARM_CP_CONST, 4968 .resetvalue = 0 }, 4969 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 4970 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 4971 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4972 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 4973 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 4974 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 4975 .type = ARM_CP_CONST, .resetvalue = 0 }, 4976 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 4977 .cp = 15, .opc1 = 6, .crm = 2, 4978 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4979 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 4980 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 4981 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 4982 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4983 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 4984 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 4985 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4986 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 4987 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 4988 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4989 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 4990 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 4991 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4992 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 4993 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 4994 .resetvalue = 0 }, 4995 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4996 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4997 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4998 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4999 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5000 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5001 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5002 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5003 .resetvalue = 0 }, 5004 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5005 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5006 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5007 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5008 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5009 .resetvalue = 0 }, 5010 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5011 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5012 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5013 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5014 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5015 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5016 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5017 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5018 .access = PL2_RW, .accessfn = access_tda, 5019 .type = ARM_CP_CONST, .resetvalue = 0 }, 5020 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 5021 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5022 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5023 .type = ARM_CP_CONST, .resetvalue = 0 }, 5024 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5025 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5026 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5027 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5028 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5029 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5030 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5031 .type = ARM_CP_CONST, 5032 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5033 .access = PL2_RW, .resetvalue = 0 }, 5034 REGINFO_SENTINEL 5035 }; 5036 5037 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 5038 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 5039 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5040 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5041 .access = PL2_RW, 5042 .type = ARM_CP_CONST, .resetvalue = 0 }, 5043 REGINFO_SENTINEL 5044 }; 5045 5046 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 5047 { 5048 ARMCPU *cpu = env_archcpu(env); 5049 /* Begin with bits defined in base ARMv8.0. */ 5050 uint64_t valid_mask = MAKE_64BIT_MASK(0, 34); 5051 5052 if (arm_feature(env, ARM_FEATURE_EL3)) { 5053 valid_mask &= ~HCR_HCD; 5054 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 5055 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 5056 * However, if we're using the SMC PSCI conduit then QEMU is 5057 * effectively acting like EL3 firmware and so the guest at 5058 * EL2 should retain the ability to prevent EL1 from being 5059 * able to make SMC calls into the ersatz firmware, so in 5060 * that case HCR.TSC should be read/write. 5061 */ 5062 valid_mask &= ~HCR_TSC; 5063 } 5064 if (cpu_isar_feature(aa64_vh, cpu)) { 5065 valid_mask |= HCR_E2H; 5066 } 5067 if (cpu_isar_feature(aa64_lor, cpu)) { 5068 valid_mask |= HCR_TLOR; 5069 } 5070 if (cpu_isar_feature(aa64_pauth, cpu)) { 5071 valid_mask |= HCR_API | HCR_APK; 5072 } 5073 5074 /* Clear RES0 bits. */ 5075 value &= valid_mask; 5076 5077 /* These bits change the MMU setup: 5078 * HCR_VM enables stage 2 translation 5079 * HCR_PTW forbids certain page-table setups 5080 * HCR_DC Disables stage1 and enables stage2 translation 5081 */ 5082 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 5083 tlb_flush(CPU(cpu)); 5084 } 5085 env->cp15.hcr_el2 = value; 5086 5087 /* 5088 * Updates to VI and VF require us to update the status of 5089 * virtual interrupts, which are the logical OR of these bits 5090 * and the state of the input lines from the GIC. (This requires 5091 * that we have the iothread lock, which is done by marking the 5092 * reginfo structs as ARM_CP_IO.) 5093 * Note that if a write to HCR pends a VIRQ or VFIQ it is never 5094 * possible for it to be taken immediately, because VIRQ and 5095 * VFIQ are masked unless running at EL0 or EL1, and HCR 5096 * can only be written at EL2. 5097 */ 5098 g_assert(qemu_mutex_iothread_locked()); 5099 arm_cpu_update_virq(cpu); 5100 arm_cpu_update_vfiq(cpu); 5101 } 5102 5103 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 5104 uint64_t value) 5105 { 5106 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 5107 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 5108 hcr_write(env, NULL, value); 5109 } 5110 5111 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 5112 uint64_t value) 5113 { 5114 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 5115 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 5116 hcr_write(env, NULL, value); 5117 } 5118 5119 /* 5120 * Return the effective value of HCR_EL2. 5121 * Bits that are not included here: 5122 * RW (read from SCR_EL3.RW as needed) 5123 */ 5124 uint64_t arm_hcr_el2_eff(CPUARMState *env) 5125 { 5126 uint64_t ret = env->cp15.hcr_el2; 5127 5128 if (arm_is_secure_below_el3(env)) { 5129 /* 5130 * "This register has no effect if EL2 is not enabled in the 5131 * current Security state". This is ARMv8.4-SecEL2 speak for 5132 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 5133 * 5134 * Prior to that, the language was "In an implementation that 5135 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 5136 * as if this field is 0 for all purposes other than a direct 5137 * read or write access of HCR_EL2". With lots of enumeration 5138 * on a per-field basis. In current QEMU, this is condition 5139 * is arm_is_secure_below_el3. 5140 * 5141 * Since the v8.4 language applies to the entire register, and 5142 * appears to be backward compatible, use that. 5143 */ 5144 ret = 0; 5145 } else if (ret & HCR_TGE) { 5146 /* These bits are up-to-date as of ARMv8.4. */ 5147 if (ret & HCR_E2H) { 5148 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 5149 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 5150 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 5151 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE); 5152 } else { 5153 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 5154 } 5155 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 5156 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 5157 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 5158 HCR_TLOR); 5159 } 5160 5161 return ret; 5162 } 5163 5164 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 5165 uint64_t value) 5166 { 5167 /* 5168 * For A-profile AArch32 EL3, if NSACR.CP10 5169 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5170 */ 5171 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5172 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5173 value &= ~(0x3 << 10); 5174 value |= env->cp15.cptr_el[2] & (0x3 << 10); 5175 } 5176 env->cp15.cptr_el[2] = value; 5177 } 5178 5179 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) 5180 { 5181 /* 5182 * For A-profile AArch32 EL3, if NSACR.CP10 5183 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5184 */ 5185 uint64_t value = env->cp15.cptr_el[2]; 5186 5187 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5188 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5189 value |= 0x3 << 10; 5190 } 5191 return value; 5192 } 5193 5194 static const ARMCPRegInfo el2_cp_reginfo[] = { 5195 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 5196 .type = ARM_CP_IO, 5197 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5198 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5199 .writefn = hcr_write }, 5200 { .name = "HCR", .state = ARM_CP_STATE_AA32, 5201 .type = ARM_CP_ALIAS | ARM_CP_IO, 5202 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5203 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5204 .writefn = hcr_writelow }, 5205 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5206 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5207 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5208 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 5209 .type = ARM_CP_ALIAS, 5210 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 5211 .access = PL2_RW, 5212 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 5213 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5214 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5215 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 5216 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5217 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5218 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 5219 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5220 .type = ARM_CP_ALIAS, 5221 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5222 .access = PL2_RW, 5223 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 5224 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 5225 .type = ARM_CP_ALIAS, 5226 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 5227 .access = PL2_RW, 5228 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 5229 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5230 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5231 .access = PL2_RW, .writefn = vbar_write, 5232 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 5233 .resetvalue = 0 }, 5234 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 5235 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 5236 .access = PL3_RW, .type = ARM_CP_ALIAS, 5237 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 5238 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5239 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5240 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 5241 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), 5242 .readfn = cptr_el2_read, .writefn = cptr_el2_write }, 5243 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5244 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5245 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 5246 .resetvalue = 0 }, 5247 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5248 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5249 .access = PL2_RW, .type = ARM_CP_ALIAS, 5250 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 5251 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5252 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5253 .access = PL2_RW, .type = ARM_CP_CONST, 5254 .resetvalue = 0 }, 5255 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 5256 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5257 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5258 .access = PL2_RW, .type = ARM_CP_CONST, 5259 .resetvalue = 0 }, 5260 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5261 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5262 .access = PL2_RW, .type = ARM_CP_CONST, 5263 .resetvalue = 0 }, 5264 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5265 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5266 .access = PL2_RW, .type = ARM_CP_CONST, 5267 .resetvalue = 0 }, 5268 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5269 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5270 .access = PL2_RW, .writefn = vmsa_tcr_el12_write, 5271 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ 5272 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 5273 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 5274 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5275 .type = ARM_CP_ALIAS, 5276 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5277 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5278 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 5279 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5280 .access = PL2_RW, 5281 /* no .writefn needed as this can't cause an ASID change; 5282 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 5283 */ 5284 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5285 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5286 .cp = 15, .opc1 = 6, .crm = 2, 5287 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5288 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5289 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 5290 .writefn = vttbr_write }, 5291 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5292 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5293 .access = PL2_RW, .writefn = vttbr_write, 5294 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 5295 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5296 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5297 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 5298 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 5299 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5300 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5301 .access = PL2_RW, .resetvalue = 0, 5302 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 5303 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5304 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5305 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, 5306 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5307 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5308 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5309 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5310 { .name = "TLBIALLNSNH", 5311 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 5312 .type = ARM_CP_NO_RAW, .access = PL2_W, 5313 .writefn = tlbiall_nsnh_write }, 5314 { .name = "TLBIALLNSNHIS", 5315 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 5316 .type = ARM_CP_NO_RAW, .access = PL2_W, 5317 .writefn = tlbiall_nsnh_is_write }, 5318 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5319 .type = ARM_CP_NO_RAW, .access = PL2_W, 5320 .writefn = tlbiall_hyp_write }, 5321 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5322 .type = ARM_CP_NO_RAW, .access = PL2_W, 5323 .writefn = tlbiall_hyp_is_write }, 5324 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5325 .type = ARM_CP_NO_RAW, .access = PL2_W, 5326 .writefn = tlbimva_hyp_write }, 5327 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5328 .type = ARM_CP_NO_RAW, .access = PL2_W, 5329 .writefn = tlbimva_hyp_is_write }, 5330 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 5331 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5332 .type = ARM_CP_NO_RAW, .access = PL2_W, 5333 .writefn = tlbi_aa64_alle2_write }, 5334 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 5335 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5336 .type = ARM_CP_NO_RAW, .access = PL2_W, 5337 .writefn = tlbi_aa64_vae2_write }, 5338 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 5339 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 5340 .access = PL2_W, .type = ARM_CP_NO_RAW, 5341 .writefn = tlbi_aa64_vae2_write }, 5342 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 5343 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5344 .access = PL2_W, .type = ARM_CP_NO_RAW, 5345 .writefn = tlbi_aa64_alle2is_write }, 5346 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 5347 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5348 .type = ARM_CP_NO_RAW, .access = PL2_W, 5349 .writefn = tlbi_aa64_vae2is_write }, 5350 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 5351 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 5352 .access = PL2_W, .type = ARM_CP_NO_RAW, 5353 .writefn = tlbi_aa64_vae2is_write }, 5354 #ifndef CONFIG_USER_ONLY 5355 /* Unlike the other EL2-related AT operations, these must 5356 * UNDEF from EL3 if EL2 is not implemented, which is why we 5357 * define them here rather than with the rest of the AT ops. 5358 */ 5359 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 5360 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5361 .access = PL2_W, .accessfn = at_s1e2_access, 5362 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5363 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 5364 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5365 .access = PL2_W, .accessfn = at_s1e2_access, 5366 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5367 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 5368 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 5369 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 5370 * to behave as if SCR.NS was 1. 5371 */ 5372 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5373 .access = PL2_W, 5374 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5375 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5376 .access = PL2_W, 5377 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5378 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5379 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5380 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 5381 * reset values as IMPDEF. We choose to reset to 3 to comply with 5382 * both ARMv7 and ARMv8. 5383 */ 5384 .access = PL2_RW, .resetvalue = 3, 5385 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 5386 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5387 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5388 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 5389 .writefn = gt_cntvoff_write, 5390 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5391 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5392 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 5393 .writefn = gt_cntvoff_write, 5394 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5395 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5396 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5397 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5398 .type = ARM_CP_IO, .access = PL2_RW, 5399 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5400 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5401 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5402 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 5403 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5404 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5405 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5406 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 5407 .resetfn = gt_hyp_timer_reset, 5408 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 5409 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5410 .type = ARM_CP_IO, 5411 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5412 .access = PL2_RW, 5413 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 5414 .resetvalue = 0, 5415 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 5416 #endif 5417 /* The only field of MDCR_EL2 that has a defined architectural reset value 5418 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 5419 * don't implement any PMU event counters, so using zero as a reset 5420 * value for MDCR_EL2 is okay 5421 */ 5422 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5423 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5424 .access = PL2_RW, .resetvalue = 0, 5425 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 5426 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 5427 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5428 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5429 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5430 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 5431 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5432 .access = PL2_RW, 5433 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5434 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5435 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5436 .access = PL2_RW, 5437 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 5438 REGINFO_SENTINEL 5439 }; 5440 5441 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 5442 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5443 .type = ARM_CP_ALIAS | ARM_CP_IO, 5444 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5445 .access = PL2_RW, 5446 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 5447 .writefn = hcr_writehigh }, 5448 REGINFO_SENTINEL 5449 }; 5450 5451 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 5452 bool isread) 5453 { 5454 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 5455 * At Secure EL1 it traps to EL3. 5456 */ 5457 if (arm_current_el(env) == 3) { 5458 return CP_ACCESS_OK; 5459 } 5460 if (arm_is_secure_below_el3(env)) { 5461 return CP_ACCESS_TRAP_EL3; 5462 } 5463 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 5464 if (isread) { 5465 return CP_ACCESS_OK; 5466 } 5467 return CP_ACCESS_TRAP_UNCATEGORIZED; 5468 } 5469 5470 static const ARMCPRegInfo el3_cp_reginfo[] = { 5471 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 5472 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 5473 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 5474 .resetvalue = 0, .writefn = scr_write }, 5475 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, 5476 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 5477 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5478 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 5479 .writefn = scr_write }, 5480 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 5481 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 5482 .access = PL3_RW, .resetvalue = 0, 5483 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 5484 { .name = "SDER", 5485 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 5486 .access = PL3_RW, .resetvalue = 0, 5487 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 5488 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5489 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5490 .writefn = vbar_write, .resetvalue = 0, 5491 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 5492 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 5493 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 5494 .access = PL3_RW, .resetvalue = 0, 5495 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 5496 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 5497 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 5498 .access = PL3_RW, 5499 /* no .writefn needed as this can't cause an ASID change; 5500 * we must provide a .raw_writefn and .resetfn because we handle 5501 * reset and migration for the AArch32 TTBCR(S), which might be 5502 * using mask and base_mask. 5503 */ 5504 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 5505 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 5506 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 5507 .type = ARM_CP_ALIAS, 5508 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 5509 .access = PL3_RW, 5510 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 5511 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 5512 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 5513 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 5514 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 5515 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 5516 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 5517 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 5518 .type = ARM_CP_ALIAS, 5519 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 5520 .access = PL3_RW, 5521 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 5522 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 5523 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 5524 .access = PL3_RW, .writefn = vbar_write, 5525 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 5526 .resetvalue = 0 }, 5527 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 5528 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 5529 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 5530 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 5531 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 5532 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 5533 .access = PL3_RW, .resetvalue = 0, 5534 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 5535 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 5536 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 5537 .access = PL3_RW, .type = ARM_CP_CONST, 5538 .resetvalue = 0 }, 5539 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 5540 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 5541 .access = PL3_RW, .type = ARM_CP_CONST, 5542 .resetvalue = 0 }, 5543 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 5544 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 5545 .access = PL3_RW, .type = ARM_CP_CONST, 5546 .resetvalue = 0 }, 5547 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 5548 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 5549 .access = PL3_W, .type = ARM_CP_NO_RAW, 5550 .writefn = tlbi_aa64_alle3is_write }, 5551 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 5552 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 5553 .access = PL3_W, .type = ARM_CP_NO_RAW, 5554 .writefn = tlbi_aa64_vae3is_write }, 5555 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 5556 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 5557 .access = PL3_W, .type = ARM_CP_NO_RAW, 5558 .writefn = tlbi_aa64_vae3is_write }, 5559 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 5560 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 5561 .access = PL3_W, .type = ARM_CP_NO_RAW, 5562 .writefn = tlbi_aa64_alle3_write }, 5563 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 5564 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 5565 .access = PL3_W, .type = ARM_CP_NO_RAW, 5566 .writefn = tlbi_aa64_vae3_write }, 5567 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 5568 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 5569 .access = PL3_W, .type = ARM_CP_NO_RAW, 5570 .writefn = tlbi_aa64_vae3_write }, 5571 REGINFO_SENTINEL 5572 }; 5573 5574 #ifndef CONFIG_USER_ONLY 5575 /* Test if system register redirection is to occur in the current state. */ 5576 static bool redirect_for_e2h(CPUARMState *env) 5577 { 5578 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); 5579 } 5580 5581 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) 5582 { 5583 CPReadFn *readfn; 5584 5585 if (redirect_for_e2h(env)) { 5586 /* Switch to the saved EL2 version of the register. */ 5587 ri = ri->opaque; 5588 readfn = ri->readfn; 5589 } else { 5590 readfn = ri->orig_readfn; 5591 } 5592 if (readfn == NULL) { 5593 readfn = raw_read; 5594 } 5595 return readfn(env, ri); 5596 } 5597 5598 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, 5599 uint64_t value) 5600 { 5601 CPWriteFn *writefn; 5602 5603 if (redirect_for_e2h(env)) { 5604 /* Switch to the saved EL2 version of the register. */ 5605 ri = ri->opaque; 5606 writefn = ri->writefn; 5607 } else { 5608 writefn = ri->orig_writefn; 5609 } 5610 if (writefn == NULL) { 5611 writefn = raw_write; 5612 } 5613 writefn(env, ri, value); 5614 } 5615 5616 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) 5617 { 5618 struct E2HAlias { 5619 uint32_t src_key, dst_key, new_key; 5620 const char *src_name, *dst_name, *new_name; 5621 bool (*feature)(const ARMISARegisters *id); 5622 }; 5623 5624 #define K(op0, op1, crn, crm, op2) \ 5625 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) 5626 5627 static const struct E2HAlias aliases[] = { 5628 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), 5629 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, 5630 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), 5631 "CPACR", "CPTR_EL2", "CPACR_EL12" }, 5632 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), 5633 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, 5634 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), 5635 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, 5636 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), 5637 "TCR_EL1", "TCR_EL2", "TCR_EL12" }, 5638 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), 5639 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, 5640 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), 5641 "ELR_EL1", "ELR_EL2", "ELR_EL12" }, 5642 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), 5643 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, 5644 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), 5645 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, 5646 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), 5647 "ESR_EL1", "ESR_EL2", "ESR_EL12" }, 5648 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), 5649 "FAR_EL1", "FAR_EL2", "FAR_EL12" }, 5650 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), 5651 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, 5652 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), 5653 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, 5654 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), 5655 "VBAR", "VBAR_EL2", "VBAR_EL12" }, 5656 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), 5657 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, 5658 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), 5659 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, 5660 5661 /* 5662 * Note that redirection of ZCR is mentioned in the description 5663 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but 5664 * not in the summary table. 5665 */ 5666 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), 5667 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, 5668 5669 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ 5670 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ 5671 }; 5672 #undef K 5673 5674 size_t i; 5675 5676 for (i = 0; i < ARRAY_SIZE(aliases); i++) { 5677 const struct E2HAlias *a = &aliases[i]; 5678 ARMCPRegInfo *src_reg, *dst_reg; 5679 5680 if (a->feature && !a->feature(&cpu->isar)) { 5681 continue; 5682 } 5683 5684 src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); 5685 dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); 5686 g_assert(src_reg != NULL); 5687 g_assert(dst_reg != NULL); 5688 5689 /* Cross-compare names to detect typos in the keys. */ 5690 g_assert(strcmp(src_reg->name, a->src_name) == 0); 5691 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); 5692 5693 /* None of the core system registers use opaque; we will. */ 5694 g_assert(src_reg->opaque == NULL); 5695 5696 /* Create alias before redirection so we dup the right data. */ 5697 if (a->new_key) { 5698 ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); 5699 uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t)); 5700 bool ok; 5701 5702 new_reg->name = a->new_name; 5703 new_reg->type |= ARM_CP_ALIAS; 5704 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ 5705 new_reg->access &= PL2_RW | PL3_RW; 5706 5707 ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); 5708 g_assert(ok); 5709 } 5710 5711 src_reg->opaque = dst_reg; 5712 src_reg->orig_readfn = src_reg->readfn ?: raw_read; 5713 src_reg->orig_writefn = src_reg->writefn ?: raw_write; 5714 if (!src_reg->raw_readfn) { 5715 src_reg->raw_readfn = raw_read; 5716 } 5717 if (!src_reg->raw_writefn) { 5718 src_reg->raw_writefn = raw_write; 5719 } 5720 src_reg->readfn = el2_e2h_read; 5721 src_reg->writefn = el2_e2h_write; 5722 } 5723 } 5724 #endif 5725 5726 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 5727 bool isread) 5728 { 5729 int cur_el = arm_current_el(env); 5730 5731 if (cur_el < 2) { 5732 uint64_t hcr = arm_hcr_el2_eff(env); 5733 5734 if (cur_el == 0) { 5735 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 5736 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { 5737 return CP_ACCESS_TRAP_EL2; 5738 } 5739 } else { 5740 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 5741 return CP_ACCESS_TRAP; 5742 } 5743 if (hcr & HCR_TID2) { 5744 return CP_ACCESS_TRAP_EL2; 5745 } 5746 } 5747 } else if (hcr & HCR_TID2) { 5748 return CP_ACCESS_TRAP_EL2; 5749 } 5750 } 5751 5752 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { 5753 return CP_ACCESS_TRAP_EL2; 5754 } 5755 5756 return CP_ACCESS_OK; 5757 } 5758 5759 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 5760 uint64_t value) 5761 { 5762 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 5763 * read via a bit in OSLSR_EL1. 5764 */ 5765 int oslock; 5766 5767 if (ri->state == ARM_CP_STATE_AA32) { 5768 oslock = (value == 0xC5ACCE55); 5769 } else { 5770 oslock = value & 1; 5771 } 5772 5773 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 5774 } 5775 5776 static const ARMCPRegInfo debug_cp_reginfo[] = { 5777 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 5778 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 5779 * unlike DBGDRAR it is never accessible from EL0. 5780 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 5781 * accessor. 5782 */ 5783 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 5784 .access = PL0_R, .accessfn = access_tdra, 5785 .type = ARM_CP_CONST, .resetvalue = 0 }, 5786 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 5787 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5788 .access = PL1_R, .accessfn = access_tdra, 5789 .type = ARM_CP_CONST, .resetvalue = 0 }, 5790 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 5791 .access = PL0_R, .accessfn = access_tdra, 5792 .type = ARM_CP_CONST, .resetvalue = 0 }, 5793 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 5794 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 5795 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 5796 .access = PL1_RW, .accessfn = access_tda, 5797 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 5798 .resetvalue = 0 }, 5799 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 5800 * We don't implement the configurable EL0 access. 5801 */ 5802 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 5803 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 5804 .type = ARM_CP_ALIAS, 5805 .access = PL1_R, .accessfn = access_tda, 5806 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 5807 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 5808 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 5809 .access = PL1_W, .type = ARM_CP_NO_RAW, 5810 .accessfn = access_tdosa, 5811 .writefn = oslar_write }, 5812 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 5813 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 5814 .access = PL1_R, .resetvalue = 10, 5815 .accessfn = access_tdosa, 5816 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 5817 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 5818 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 5819 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 5820 .access = PL1_RW, .accessfn = access_tdosa, 5821 .type = ARM_CP_NOP }, 5822 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 5823 * implement vector catch debug events yet. 5824 */ 5825 { .name = "DBGVCR", 5826 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 5827 .access = PL1_RW, .accessfn = access_tda, 5828 .type = ARM_CP_NOP }, 5829 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 5830 * to save and restore a 32-bit guest's DBGVCR) 5831 */ 5832 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 5833 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 5834 .access = PL2_RW, .accessfn = access_tda, 5835 .type = ARM_CP_NOP }, 5836 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 5837 * Channel but Linux may try to access this register. The 32-bit 5838 * alias is DBGDCCINT. 5839 */ 5840 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 5841 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 5842 .access = PL1_RW, .accessfn = access_tda, 5843 .type = ARM_CP_NOP }, 5844 REGINFO_SENTINEL 5845 }; 5846 5847 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 5848 /* 64 bit access versions of the (dummy) debug registers */ 5849 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 5850 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 5851 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 5852 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 5853 REGINFO_SENTINEL 5854 }; 5855 5856 /* Return the exception level to which exceptions should be taken 5857 * via SVEAccessTrap. If an exception should be routed through 5858 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 5859 * take care of raising that exception. 5860 * C.f. the ARM pseudocode function CheckSVEEnabled. 5861 */ 5862 int sve_exception_el(CPUARMState *env, int el) 5863 { 5864 #ifndef CONFIG_USER_ONLY 5865 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 5866 5867 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 5868 bool disabled = false; 5869 5870 /* The CPACR.ZEN controls traps to EL1: 5871 * 0, 2 : trap EL0 and EL1 accesses 5872 * 1 : trap only EL0 accesses 5873 * 3 : trap no accesses 5874 */ 5875 if (!extract32(env->cp15.cpacr_el1, 16, 1)) { 5876 disabled = true; 5877 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { 5878 disabled = el == 0; 5879 } 5880 if (disabled) { 5881 /* route_to_el2 */ 5882 return hcr_el2 & HCR_TGE ? 2 : 1; 5883 } 5884 5885 /* Check CPACR.FPEN. */ 5886 if (!extract32(env->cp15.cpacr_el1, 20, 1)) { 5887 disabled = true; 5888 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { 5889 disabled = el == 0; 5890 } 5891 if (disabled) { 5892 return 0; 5893 } 5894 } 5895 5896 /* CPTR_EL2. Since TZ and TFP are positive, 5897 * they will be zero when EL2 is not present. 5898 */ 5899 if (el <= 2 && !arm_is_secure_below_el3(env)) { 5900 if (env->cp15.cptr_el[2] & CPTR_TZ) { 5901 return 2; 5902 } 5903 if (env->cp15.cptr_el[2] & CPTR_TFP) { 5904 return 0; 5905 } 5906 } 5907 5908 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 5909 if (arm_feature(env, ARM_FEATURE_EL3) 5910 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 5911 return 3; 5912 } 5913 #endif 5914 return 0; 5915 } 5916 5917 static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) 5918 { 5919 uint32_t end_len; 5920 5921 end_len = start_len &= 0xf; 5922 if (!test_bit(start_len, cpu->sve_vq_map)) { 5923 end_len = find_last_bit(cpu->sve_vq_map, start_len); 5924 assert(end_len < start_len); 5925 } 5926 return end_len; 5927 } 5928 5929 /* 5930 * Given that SVE is enabled, return the vector length for EL. 5931 */ 5932 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 5933 { 5934 ARMCPU *cpu = env_archcpu(env); 5935 uint32_t zcr_len = cpu->sve_max_vq - 1; 5936 5937 if (el <= 1) { 5938 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 5939 } 5940 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { 5941 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 5942 } 5943 if (arm_feature(env, ARM_FEATURE_EL3)) { 5944 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 5945 } 5946 5947 return sve_zcr_get_valid_len(cpu, zcr_len); 5948 } 5949 5950 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5951 uint64_t value) 5952 { 5953 int cur_el = arm_current_el(env); 5954 int old_len = sve_zcr_len_for_el(env, cur_el); 5955 int new_len; 5956 5957 /* Bits other than [3:0] are RAZ/WI. */ 5958 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); 5959 raw_write(env, ri, value & 0xf); 5960 5961 /* 5962 * Because we arrived here, we know both FP and SVE are enabled; 5963 * otherwise we would have trapped access to the ZCR_ELn register. 5964 */ 5965 new_len = sve_zcr_len_for_el(env, cur_el); 5966 if (new_len < old_len) { 5967 aarch64_sve_narrow_vq(env, new_len + 1); 5968 } 5969 } 5970 5971 static const ARMCPRegInfo zcr_el1_reginfo = { 5972 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 5973 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 5974 .access = PL1_RW, .type = ARM_CP_SVE, 5975 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 5976 .writefn = zcr_write, .raw_writefn = raw_write 5977 }; 5978 5979 static const ARMCPRegInfo zcr_el2_reginfo = { 5980 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 5981 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 5982 .access = PL2_RW, .type = ARM_CP_SVE, 5983 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 5984 .writefn = zcr_write, .raw_writefn = raw_write 5985 }; 5986 5987 static const ARMCPRegInfo zcr_no_el2_reginfo = { 5988 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 5989 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 5990 .access = PL2_RW, .type = ARM_CP_SVE, 5991 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 5992 }; 5993 5994 static const ARMCPRegInfo zcr_el3_reginfo = { 5995 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 5996 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 5997 .access = PL3_RW, .type = ARM_CP_SVE, 5998 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 5999 .writefn = zcr_write, .raw_writefn = raw_write 6000 }; 6001 6002 void hw_watchpoint_update(ARMCPU *cpu, int n) 6003 { 6004 CPUARMState *env = &cpu->env; 6005 vaddr len = 0; 6006 vaddr wvr = env->cp15.dbgwvr[n]; 6007 uint64_t wcr = env->cp15.dbgwcr[n]; 6008 int mask; 6009 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 6010 6011 if (env->cpu_watchpoint[n]) { 6012 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 6013 env->cpu_watchpoint[n] = NULL; 6014 } 6015 6016 if (!extract64(wcr, 0, 1)) { 6017 /* E bit clear : watchpoint disabled */ 6018 return; 6019 } 6020 6021 switch (extract64(wcr, 3, 2)) { 6022 case 0: 6023 /* LSC 00 is reserved and must behave as if the wp is disabled */ 6024 return; 6025 case 1: 6026 flags |= BP_MEM_READ; 6027 break; 6028 case 2: 6029 flags |= BP_MEM_WRITE; 6030 break; 6031 case 3: 6032 flags |= BP_MEM_ACCESS; 6033 break; 6034 } 6035 6036 /* Attempts to use both MASK and BAS fields simultaneously are 6037 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 6038 * thus generating a watchpoint for every byte in the masked region. 6039 */ 6040 mask = extract64(wcr, 24, 4); 6041 if (mask == 1 || mask == 2) { 6042 /* Reserved values of MASK; we must act as if the mask value was 6043 * some non-reserved value, or as if the watchpoint were disabled. 6044 * We choose the latter. 6045 */ 6046 return; 6047 } else if (mask) { 6048 /* Watchpoint covers an aligned area up to 2GB in size */ 6049 len = 1ULL << mask; 6050 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 6051 * whether the watchpoint fires when the unmasked bits match; we opt 6052 * to generate the exceptions. 6053 */ 6054 wvr &= ~(len - 1); 6055 } else { 6056 /* Watchpoint covers bytes defined by the byte address select bits */ 6057 int bas = extract64(wcr, 5, 8); 6058 int basstart; 6059 6060 if (bas == 0) { 6061 /* This must act as if the watchpoint is disabled */ 6062 return; 6063 } 6064 6065 if (extract64(wvr, 2, 1)) { 6066 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 6067 * ignored, and BAS[3:0] define which bytes to watch. 6068 */ 6069 bas &= 0xf; 6070 } 6071 /* The BAS bits are supposed to be programmed to indicate a contiguous 6072 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 6073 * we fire for each byte in the word/doubleword addressed by the WVR. 6074 * We choose to ignore any non-zero bits after the first range of 1s. 6075 */ 6076 basstart = ctz32(bas); 6077 len = cto32(bas >> basstart); 6078 wvr += basstart; 6079 } 6080 6081 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 6082 &env->cpu_watchpoint[n]); 6083 } 6084 6085 void hw_watchpoint_update_all(ARMCPU *cpu) 6086 { 6087 int i; 6088 CPUARMState *env = &cpu->env; 6089 6090 /* Completely clear out existing QEMU watchpoints and our array, to 6091 * avoid possible stale entries following migration load. 6092 */ 6093 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 6094 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 6095 6096 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 6097 hw_watchpoint_update(cpu, i); 6098 } 6099 } 6100 6101 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6102 uint64_t value) 6103 { 6104 ARMCPU *cpu = env_archcpu(env); 6105 int i = ri->crm; 6106 6107 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 6108 * register reads and behaves as if values written are sign extended. 6109 * Bits [1:0] are RES0. 6110 */ 6111 value = sextract64(value, 0, 49) & ~3ULL; 6112 6113 raw_write(env, ri, value); 6114 hw_watchpoint_update(cpu, i); 6115 } 6116 6117 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6118 uint64_t value) 6119 { 6120 ARMCPU *cpu = env_archcpu(env); 6121 int i = ri->crm; 6122 6123 raw_write(env, ri, value); 6124 hw_watchpoint_update(cpu, i); 6125 } 6126 6127 void hw_breakpoint_update(ARMCPU *cpu, int n) 6128 { 6129 CPUARMState *env = &cpu->env; 6130 uint64_t bvr = env->cp15.dbgbvr[n]; 6131 uint64_t bcr = env->cp15.dbgbcr[n]; 6132 vaddr addr; 6133 int bt; 6134 int flags = BP_CPU; 6135 6136 if (env->cpu_breakpoint[n]) { 6137 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 6138 env->cpu_breakpoint[n] = NULL; 6139 } 6140 6141 if (!extract64(bcr, 0, 1)) { 6142 /* E bit clear : watchpoint disabled */ 6143 return; 6144 } 6145 6146 bt = extract64(bcr, 20, 4); 6147 6148 switch (bt) { 6149 case 4: /* unlinked address mismatch (reserved if AArch64) */ 6150 case 5: /* linked address mismatch (reserved if AArch64) */ 6151 qemu_log_mask(LOG_UNIMP, 6152 "arm: address mismatch breakpoint types not implemented\n"); 6153 return; 6154 case 0: /* unlinked address match */ 6155 case 1: /* linked address match */ 6156 { 6157 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 6158 * we behave as if the register was sign extended. Bits [1:0] are 6159 * RES0. The BAS field is used to allow setting breakpoints on 16 6160 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 6161 * a bp will fire if the addresses covered by the bp and the addresses 6162 * covered by the insn overlap but the insn doesn't start at the 6163 * start of the bp address range. We choose to require the insn and 6164 * the bp to have the same address. The constraints on writing to 6165 * BAS enforced in dbgbcr_write mean we have only four cases: 6166 * 0b0000 => no breakpoint 6167 * 0b0011 => breakpoint on addr 6168 * 0b1100 => breakpoint on addr + 2 6169 * 0b1111 => breakpoint on addr 6170 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 6171 */ 6172 int bas = extract64(bcr, 5, 4); 6173 addr = sextract64(bvr, 0, 49) & ~3ULL; 6174 if (bas == 0) { 6175 return; 6176 } 6177 if (bas == 0xc) { 6178 addr += 2; 6179 } 6180 break; 6181 } 6182 case 2: /* unlinked context ID match */ 6183 case 8: /* unlinked VMID match (reserved if no EL2) */ 6184 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 6185 qemu_log_mask(LOG_UNIMP, 6186 "arm: unlinked context breakpoint types not implemented\n"); 6187 return; 6188 case 9: /* linked VMID match (reserved if no EL2) */ 6189 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 6190 case 3: /* linked context ID match */ 6191 default: 6192 /* We must generate no events for Linked context matches (unless 6193 * they are linked to by some other bp/wp, which is handled in 6194 * updates for the linking bp/wp). We choose to also generate no events 6195 * for reserved values. 6196 */ 6197 return; 6198 } 6199 6200 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 6201 } 6202 6203 void hw_breakpoint_update_all(ARMCPU *cpu) 6204 { 6205 int i; 6206 CPUARMState *env = &cpu->env; 6207 6208 /* Completely clear out existing QEMU breakpoints and our array, to 6209 * avoid possible stale entries following migration load. 6210 */ 6211 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 6212 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 6213 6214 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 6215 hw_breakpoint_update(cpu, i); 6216 } 6217 } 6218 6219 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6220 uint64_t value) 6221 { 6222 ARMCPU *cpu = env_archcpu(env); 6223 int i = ri->crm; 6224 6225 raw_write(env, ri, value); 6226 hw_breakpoint_update(cpu, i); 6227 } 6228 6229 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6230 uint64_t value) 6231 { 6232 ARMCPU *cpu = env_archcpu(env); 6233 int i = ri->crm; 6234 6235 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 6236 * copy of BAS[0]. 6237 */ 6238 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 6239 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 6240 6241 raw_write(env, ri, value); 6242 hw_breakpoint_update(cpu, i); 6243 } 6244 6245 static void define_debug_regs(ARMCPU *cpu) 6246 { 6247 /* Define v7 and v8 architectural debug registers. 6248 * These are just dummy implementations for now. 6249 */ 6250 int i; 6251 int wrps, brps, ctx_cmps; 6252 ARMCPRegInfo dbgdidr = { 6253 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 6254 .access = PL0_R, .accessfn = access_tda, 6255 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 6256 }; 6257 6258 /* Note that all these register fields hold "number of Xs minus 1". */ 6259 brps = arm_num_brps(cpu); 6260 wrps = arm_num_wrps(cpu); 6261 ctx_cmps = arm_num_ctx_cmps(cpu); 6262 6263 assert(ctx_cmps <= brps); 6264 6265 define_one_arm_cp_reg(cpu, &dbgdidr); 6266 define_arm_cp_regs(cpu, debug_cp_reginfo); 6267 6268 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 6269 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 6270 } 6271 6272 for (i = 0; i < brps; i++) { 6273 ARMCPRegInfo dbgregs[] = { 6274 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 6275 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 6276 .access = PL1_RW, .accessfn = access_tda, 6277 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 6278 .writefn = dbgbvr_write, .raw_writefn = raw_write 6279 }, 6280 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 6281 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 6282 .access = PL1_RW, .accessfn = access_tda, 6283 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 6284 .writefn = dbgbcr_write, .raw_writefn = raw_write 6285 }, 6286 REGINFO_SENTINEL 6287 }; 6288 define_arm_cp_regs(cpu, dbgregs); 6289 } 6290 6291 for (i = 0; i < wrps; i++) { 6292 ARMCPRegInfo dbgregs[] = { 6293 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 6294 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 6295 .access = PL1_RW, .accessfn = access_tda, 6296 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 6297 .writefn = dbgwvr_write, .raw_writefn = raw_write 6298 }, 6299 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 6300 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 6301 .access = PL1_RW, .accessfn = access_tda, 6302 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 6303 .writefn = dbgwcr_write, .raw_writefn = raw_write 6304 }, 6305 REGINFO_SENTINEL 6306 }; 6307 define_arm_cp_regs(cpu, dbgregs); 6308 } 6309 } 6310 6311 static void define_pmu_regs(ARMCPU *cpu) 6312 { 6313 /* 6314 * v7 performance monitor control register: same implementor 6315 * field as main ID register, and we implement four counters in 6316 * addition to the cycle count register. 6317 */ 6318 unsigned int i, pmcrn = 4; 6319 ARMCPRegInfo pmcr = { 6320 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 6321 .access = PL0_RW, 6322 .type = ARM_CP_IO | ARM_CP_ALIAS, 6323 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 6324 .accessfn = pmreg_access, .writefn = pmcr_write, 6325 .raw_writefn = raw_write, 6326 }; 6327 ARMCPRegInfo pmcr64 = { 6328 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 6329 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 6330 .access = PL0_RW, .accessfn = pmreg_access, 6331 .type = ARM_CP_IO, 6332 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 6333 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT), 6334 .writefn = pmcr_write, .raw_writefn = raw_write, 6335 }; 6336 define_one_arm_cp_reg(cpu, &pmcr); 6337 define_one_arm_cp_reg(cpu, &pmcr64); 6338 for (i = 0; i < pmcrn; i++) { 6339 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 6340 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 6341 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 6342 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 6343 ARMCPRegInfo pmev_regs[] = { 6344 { .name = pmevcntr_name, .cp = 15, .crn = 14, 6345 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6346 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6347 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6348 .accessfn = pmreg_access }, 6349 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 6350 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 6351 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6352 .type = ARM_CP_IO, 6353 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6354 .raw_readfn = pmevcntr_rawread, 6355 .raw_writefn = pmevcntr_rawwrite }, 6356 { .name = pmevtyper_name, .cp = 15, .crn = 14, 6357 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6358 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6359 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6360 .accessfn = pmreg_access }, 6361 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 6362 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 6363 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6364 .type = ARM_CP_IO, 6365 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6366 .raw_writefn = pmevtyper_rawwrite }, 6367 REGINFO_SENTINEL 6368 }; 6369 define_arm_cp_regs(cpu, pmev_regs); 6370 g_free(pmevcntr_name); 6371 g_free(pmevcntr_el0_name); 6372 g_free(pmevtyper_name); 6373 g_free(pmevtyper_el0_name); 6374 } 6375 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { 6376 ARMCPRegInfo v81_pmu_regs[] = { 6377 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 6378 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 6379 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6380 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 6381 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 6382 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 6383 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6384 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 6385 REGINFO_SENTINEL 6386 }; 6387 define_arm_cp_regs(cpu, v81_pmu_regs); 6388 } 6389 } 6390 6391 /* We don't know until after realize whether there's a GICv3 6392 * attached, and that is what registers the gicv3 sysregs. 6393 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 6394 * at runtime. 6395 */ 6396 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 6397 { 6398 ARMCPU *cpu = env_archcpu(env); 6399 uint64_t pfr1 = cpu->id_pfr1; 6400 6401 if (env->gicv3state) { 6402 pfr1 |= 1 << 28; 6403 } 6404 return pfr1; 6405 } 6406 6407 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 6408 { 6409 ARMCPU *cpu = env_archcpu(env); 6410 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 6411 6412 if (env->gicv3state) { 6413 pfr0 |= 1 << 24; 6414 } 6415 return pfr0; 6416 } 6417 6418 /* Shared logic between LORID and the rest of the LOR* registers. 6419 * Secure state has already been delt with. 6420 */ 6421 static CPAccessResult access_lor_ns(CPUARMState *env) 6422 { 6423 int el = arm_current_el(env); 6424 6425 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 6426 return CP_ACCESS_TRAP_EL2; 6427 } 6428 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 6429 return CP_ACCESS_TRAP_EL3; 6430 } 6431 return CP_ACCESS_OK; 6432 } 6433 6434 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, 6435 bool isread) 6436 { 6437 if (arm_is_secure_below_el3(env)) { 6438 /* Access ok in secure mode. */ 6439 return CP_ACCESS_OK; 6440 } 6441 return access_lor_ns(env); 6442 } 6443 6444 static CPAccessResult access_lor_other(CPUARMState *env, 6445 const ARMCPRegInfo *ri, bool isread) 6446 { 6447 if (arm_is_secure_below_el3(env)) { 6448 /* Access denied in secure mode. */ 6449 return CP_ACCESS_TRAP; 6450 } 6451 return access_lor_ns(env); 6452 } 6453 6454 /* 6455 * A trivial implementation of ARMv8.1-LOR leaves all of these 6456 * registers fixed at 0, which indicates that there are zero 6457 * supported Limited Ordering regions. 6458 */ 6459 static const ARMCPRegInfo lor_reginfo[] = { 6460 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 6461 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 6462 .access = PL1_RW, .accessfn = access_lor_other, 6463 .type = ARM_CP_CONST, .resetvalue = 0 }, 6464 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 6465 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 6466 .access = PL1_RW, .accessfn = access_lor_other, 6467 .type = ARM_CP_CONST, .resetvalue = 0 }, 6468 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 6469 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 6470 .access = PL1_RW, .accessfn = access_lor_other, 6471 .type = ARM_CP_CONST, .resetvalue = 0 }, 6472 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 6473 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 6474 .access = PL1_RW, .accessfn = access_lor_other, 6475 .type = ARM_CP_CONST, .resetvalue = 0 }, 6476 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 6477 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 6478 .access = PL1_R, .accessfn = access_lorid, 6479 .type = ARM_CP_CONST, .resetvalue = 0 }, 6480 REGINFO_SENTINEL 6481 }; 6482 6483 #ifdef TARGET_AARCH64 6484 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 6485 bool isread) 6486 { 6487 int el = arm_current_el(env); 6488 6489 if (el < 2 && 6490 arm_feature(env, ARM_FEATURE_EL2) && 6491 !(arm_hcr_el2_eff(env) & HCR_APK)) { 6492 return CP_ACCESS_TRAP_EL2; 6493 } 6494 if (el < 3 && 6495 arm_feature(env, ARM_FEATURE_EL3) && 6496 !(env->cp15.scr_el3 & SCR_APK)) { 6497 return CP_ACCESS_TRAP_EL3; 6498 } 6499 return CP_ACCESS_OK; 6500 } 6501 6502 static const ARMCPRegInfo pauth_reginfo[] = { 6503 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6504 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 6505 .access = PL1_RW, .accessfn = access_pauth, 6506 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, 6507 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6508 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 6509 .access = PL1_RW, .accessfn = access_pauth, 6510 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, 6511 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6512 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 6513 .access = PL1_RW, .accessfn = access_pauth, 6514 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, 6515 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6516 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 6517 .access = PL1_RW, .accessfn = access_pauth, 6518 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, 6519 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6520 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 6521 .access = PL1_RW, .accessfn = access_pauth, 6522 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, 6523 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6524 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 6525 .access = PL1_RW, .accessfn = access_pauth, 6526 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, 6527 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6528 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 6529 .access = PL1_RW, .accessfn = access_pauth, 6530 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, 6531 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6532 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 6533 .access = PL1_RW, .accessfn = access_pauth, 6534 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, 6535 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6536 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 6537 .access = PL1_RW, .accessfn = access_pauth, 6538 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, 6539 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6540 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 6541 .access = PL1_RW, .accessfn = access_pauth, 6542 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, 6543 REGINFO_SENTINEL 6544 }; 6545 6546 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 6547 { 6548 Error *err = NULL; 6549 uint64_t ret; 6550 6551 /* Success sets NZCV = 0000. */ 6552 env->NF = env->CF = env->VF = 0, env->ZF = 1; 6553 6554 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { 6555 /* 6556 * ??? Failed, for unknown reasons in the crypto subsystem. 6557 * The best we can do is log the reason and return the 6558 * timed-out indication to the guest. There is no reason 6559 * we know to expect this failure to be transitory, so the 6560 * guest may well hang retrying the operation. 6561 */ 6562 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 6563 ri->name, error_get_pretty(err)); 6564 error_free(err); 6565 6566 env->ZF = 0; /* NZCF = 0100 */ 6567 return 0; 6568 } 6569 return ret; 6570 } 6571 6572 /* We do not support re-seeding, so the two registers operate the same. */ 6573 static const ARMCPRegInfo rndr_reginfo[] = { 6574 { .name = "RNDR", .state = ARM_CP_STATE_AA64, 6575 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6576 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, 6577 .access = PL0_R, .readfn = rndr_readfn }, 6578 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, 6579 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6580 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, 6581 .access = PL0_R, .readfn = rndr_readfn }, 6582 REGINFO_SENTINEL 6583 }; 6584 6585 #ifndef CONFIG_USER_ONLY 6586 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, 6587 uint64_t value) 6588 { 6589 ARMCPU *cpu = env_archcpu(env); 6590 /* CTR_EL0 System register -> DminLine, bits [19:16] */ 6591 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); 6592 uint64_t vaddr_in = (uint64_t) value; 6593 uint64_t vaddr = vaddr_in & ~(dline_size - 1); 6594 void *haddr; 6595 int mem_idx = cpu_mmu_index(env, false); 6596 6597 /* This won't be crossing page boundaries */ 6598 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); 6599 if (haddr) { 6600 6601 ram_addr_t offset; 6602 MemoryRegion *mr; 6603 6604 /* RCU lock is already being held */ 6605 mr = memory_region_from_host(haddr, &offset); 6606 6607 if (mr) { 6608 memory_region_do_writeback(mr, offset, dline_size); 6609 } 6610 } 6611 } 6612 6613 static const ARMCPRegInfo dcpop_reg[] = { 6614 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, 6615 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, 6616 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6617 .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, 6618 REGINFO_SENTINEL 6619 }; 6620 6621 static const ARMCPRegInfo dcpodp_reg[] = { 6622 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, 6623 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, 6624 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6625 .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, 6626 REGINFO_SENTINEL 6627 }; 6628 #endif /*CONFIG_USER_ONLY*/ 6629 6630 #endif 6631 6632 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, 6633 bool isread) 6634 { 6635 int el = arm_current_el(env); 6636 6637 if (el == 0) { 6638 uint64_t sctlr = arm_sctlr(env, el); 6639 if (!(sctlr & SCTLR_EnRCTX)) { 6640 return CP_ACCESS_TRAP; 6641 } 6642 } else if (el == 1) { 6643 uint64_t hcr = arm_hcr_el2_eff(env); 6644 if (hcr & HCR_NV) { 6645 return CP_ACCESS_TRAP_EL2; 6646 } 6647 } 6648 return CP_ACCESS_OK; 6649 } 6650 6651 static const ARMCPRegInfo predinv_reginfo[] = { 6652 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, 6653 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, 6654 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6655 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, 6656 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, 6657 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6658 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, 6659 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, 6660 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6661 /* 6662 * Note the AArch32 opcodes have a different OPC1. 6663 */ 6664 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, 6665 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, 6666 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6667 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, 6668 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, 6669 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6670 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, 6671 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, 6672 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6673 REGINFO_SENTINEL 6674 }; 6675 6676 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 6677 bool isread) 6678 { 6679 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { 6680 return CP_ACCESS_TRAP_EL2; 6681 } 6682 6683 return CP_ACCESS_OK; 6684 } 6685 6686 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 6687 bool isread) 6688 { 6689 if (arm_feature(env, ARM_FEATURE_V8)) { 6690 return access_aa64_tid3(env, ri, isread); 6691 } 6692 6693 return CP_ACCESS_OK; 6694 } 6695 6696 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, 6697 bool isread) 6698 { 6699 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { 6700 return CP_ACCESS_TRAP_EL2; 6701 } 6702 6703 return CP_ACCESS_OK; 6704 } 6705 6706 static const ARMCPRegInfo jazelle_regs[] = { 6707 { .name = "JIDR", 6708 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, 6709 .access = PL1_R, .accessfn = access_jazelle, 6710 .type = ARM_CP_CONST, .resetvalue = 0 }, 6711 { .name = "JOSCR", 6712 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, 6713 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 6714 { .name = "JMCR", 6715 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, 6716 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 6717 REGINFO_SENTINEL 6718 }; 6719 6720 static const ARMCPRegInfo vhe_reginfo[] = { 6721 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, 6722 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, 6723 .access = PL2_RW, 6724 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, 6725 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, 6726 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, 6727 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, 6728 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, 6729 #ifndef CONFIG_USER_ONLY 6730 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, 6731 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, 6732 .fieldoffset = 6733 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), 6734 .type = ARM_CP_IO, .access = PL2_RW, 6735 .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, 6736 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 6737 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, 6738 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 6739 .resetfn = gt_hv_timer_reset, 6740 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, 6741 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, 6742 .type = ARM_CP_IO, 6743 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, 6744 .access = PL2_RW, 6745 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), 6746 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, 6747 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, 6748 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, 6749 .type = ARM_CP_IO | ARM_CP_ALIAS, 6750 .access = PL2_RW, .accessfn = e2h_access, 6751 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 6752 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, 6753 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, 6754 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, 6755 .type = ARM_CP_IO | ARM_CP_ALIAS, 6756 .access = PL2_RW, .accessfn = e2h_access, 6757 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 6758 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, 6759 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, 6760 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, 6761 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 6762 .access = PL2_RW, .accessfn = e2h_access, 6763 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, 6764 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, 6765 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, 6766 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 6767 .access = PL2_RW, .accessfn = e2h_access, 6768 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, 6769 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, 6770 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, 6771 .type = ARM_CP_IO | ARM_CP_ALIAS, 6772 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 6773 .access = PL2_RW, .accessfn = e2h_access, 6774 .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, 6775 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, 6776 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, 6777 .type = ARM_CP_IO | ARM_CP_ALIAS, 6778 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 6779 .access = PL2_RW, .accessfn = e2h_access, 6780 .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, 6781 #endif 6782 REGINFO_SENTINEL 6783 }; 6784 6785 #ifndef CONFIG_USER_ONLY 6786 static const ARMCPRegInfo ats1e1_reginfo[] = { 6787 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 6788 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 6789 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6790 .writefn = ats_write64 }, 6791 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 6792 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 6793 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6794 .writefn = ats_write64 }, 6795 REGINFO_SENTINEL 6796 }; 6797 6798 static const ARMCPRegInfo ats1cp_reginfo[] = { 6799 { .name = "ATS1CPRP", 6800 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 6801 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6802 .writefn = ats_write }, 6803 { .name = "ATS1CPWP", 6804 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 6805 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6806 .writefn = ats_write }, 6807 REGINFO_SENTINEL 6808 }; 6809 #endif 6810 6811 void register_cp_regs_for_features(ARMCPU *cpu) 6812 { 6813 /* Register all the coprocessor registers based on feature bits */ 6814 CPUARMState *env = &cpu->env; 6815 if (arm_feature(env, ARM_FEATURE_M)) { 6816 /* M profile has no coprocessor registers */ 6817 return; 6818 } 6819 6820 define_arm_cp_regs(cpu, cp_reginfo); 6821 if (!arm_feature(env, ARM_FEATURE_V8)) { 6822 /* Must go early as it is full of wildcards that may be 6823 * overridden by later definitions. 6824 */ 6825 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 6826 } 6827 6828 if (arm_feature(env, ARM_FEATURE_V6)) { 6829 /* The ID registers all have impdef reset values */ 6830 ARMCPRegInfo v6_idregs[] = { 6831 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 6832 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 6833 .access = PL1_R, .type = ARM_CP_CONST, 6834 .accessfn = access_aa32_tid3, 6835 .resetvalue = cpu->id_pfr0 }, 6836 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 6837 * the value of the GIC field until after we define these regs. 6838 */ 6839 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 6840 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 6841 .access = PL1_R, .type = ARM_CP_NO_RAW, 6842 .accessfn = access_aa32_tid3, 6843 .readfn = id_pfr1_read, 6844 .writefn = arm_cp_write_ignore }, 6845 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 6846 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 6847 .access = PL1_R, .type = ARM_CP_CONST, 6848 .accessfn = access_aa32_tid3, 6849 .resetvalue = cpu->isar.id_dfr0 }, 6850 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 6851 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 6852 .access = PL1_R, .type = ARM_CP_CONST, 6853 .accessfn = access_aa32_tid3, 6854 .resetvalue = cpu->id_afr0 }, 6855 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 6856 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 6857 .access = PL1_R, .type = ARM_CP_CONST, 6858 .accessfn = access_aa32_tid3, 6859 .resetvalue = cpu->id_mmfr0 }, 6860 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 6861 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 6862 .access = PL1_R, .type = ARM_CP_CONST, 6863 .accessfn = access_aa32_tid3, 6864 .resetvalue = cpu->id_mmfr1 }, 6865 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 6866 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 6867 .access = PL1_R, .type = ARM_CP_CONST, 6868 .accessfn = access_aa32_tid3, 6869 .resetvalue = cpu->id_mmfr2 }, 6870 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 6871 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 6872 .access = PL1_R, .type = ARM_CP_CONST, 6873 .accessfn = access_aa32_tid3, 6874 .resetvalue = cpu->id_mmfr3 }, 6875 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 6876 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 6877 .access = PL1_R, .type = ARM_CP_CONST, 6878 .accessfn = access_aa32_tid3, 6879 .resetvalue = cpu->isar.id_isar0 }, 6880 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 6881 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 6882 .access = PL1_R, .type = ARM_CP_CONST, 6883 .accessfn = access_aa32_tid3, 6884 .resetvalue = cpu->isar.id_isar1 }, 6885 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 6886 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 6887 .access = PL1_R, .type = ARM_CP_CONST, 6888 .accessfn = access_aa32_tid3, 6889 .resetvalue = cpu->isar.id_isar2 }, 6890 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 6891 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 6892 .access = PL1_R, .type = ARM_CP_CONST, 6893 .accessfn = access_aa32_tid3, 6894 .resetvalue = cpu->isar.id_isar3 }, 6895 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 6896 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 6897 .access = PL1_R, .type = ARM_CP_CONST, 6898 .accessfn = access_aa32_tid3, 6899 .resetvalue = cpu->isar.id_isar4 }, 6900 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 6901 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 6902 .access = PL1_R, .type = ARM_CP_CONST, 6903 .accessfn = access_aa32_tid3, 6904 .resetvalue = cpu->isar.id_isar5 }, 6905 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 6906 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 6907 .access = PL1_R, .type = ARM_CP_CONST, 6908 .accessfn = access_aa32_tid3, 6909 .resetvalue = cpu->id_mmfr4 }, 6910 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 6911 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 6912 .access = PL1_R, .type = ARM_CP_CONST, 6913 .accessfn = access_aa32_tid3, 6914 .resetvalue = cpu->isar.id_isar6 }, 6915 REGINFO_SENTINEL 6916 }; 6917 define_arm_cp_regs(cpu, v6_idregs); 6918 define_arm_cp_regs(cpu, v6_cp_reginfo); 6919 } else { 6920 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 6921 } 6922 if (arm_feature(env, ARM_FEATURE_V6K)) { 6923 define_arm_cp_regs(cpu, v6k_cp_reginfo); 6924 } 6925 if (arm_feature(env, ARM_FEATURE_V7MP) && 6926 !arm_feature(env, ARM_FEATURE_PMSA)) { 6927 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 6928 } 6929 if (arm_feature(env, ARM_FEATURE_V7VE)) { 6930 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 6931 } 6932 if (arm_feature(env, ARM_FEATURE_V7)) { 6933 ARMCPRegInfo clidr = { 6934 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 6935 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 6936 .access = PL1_R, .type = ARM_CP_CONST, 6937 .accessfn = access_aa64_tid2, 6938 .resetvalue = cpu->clidr 6939 }; 6940 define_one_arm_cp_reg(cpu, &clidr); 6941 define_arm_cp_regs(cpu, v7_cp_reginfo); 6942 define_debug_regs(cpu); 6943 define_pmu_regs(cpu); 6944 } else { 6945 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 6946 } 6947 if (arm_feature(env, ARM_FEATURE_V8)) { 6948 /* AArch64 ID registers, which all have impdef reset values. 6949 * Note that within the ID register ranges the unused slots 6950 * must all RAZ, not UNDEF; future architecture versions may 6951 * define new registers here. 6952 */ 6953 ARMCPRegInfo v8_idregs[] = { 6954 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 6955 * know the right value for the GIC field until after we 6956 * define these regs. 6957 */ 6958 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 6959 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 6960 .access = PL1_R, .type = ARM_CP_NO_RAW, 6961 .accessfn = access_aa64_tid3, 6962 .readfn = id_aa64pfr0_read, 6963 .writefn = arm_cp_write_ignore }, 6964 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 6965 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 6966 .access = PL1_R, .type = ARM_CP_CONST, 6967 .accessfn = access_aa64_tid3, 6968 .resetvalue = cpu->isar.id_aa64pfr1}, 6969 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6970 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 6971 .access = PL1_R, .type = ARM_CP_CONST, 6972 .accessfn = access_aa64_tid3, 6973 .resetvalue = 0 }, 6974 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6975 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 6976 .access = PL1_R, .type = ARM_CP_CONST, 6977 .accessfn = access_aa64_tid3, 6978 .resetvalue = 0 }, 6979 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 6980 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 6981 .access = PL1_R, .type = ARM_CP_CONST, 6982 .accessfn = access_aa64_tid3, 6983 /* At present, only SVEver == 0 is defined anyway. */ 6984 .resetvalue = 0 }, 6985 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6986 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 6987 .access = PL1_R, .type = ARM_CP_CONST, 6988 .accessfn = access_aa64_tid3, 6989 .resetvalue = 0 }, 6990 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6991 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 6992 .access = PL1_R, .type = ARM_CP_CONST, 6993 .accessfn = access_aa64_tid3, 6994 .resetvalue = 0 }, 6995 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6996 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 6997 .access = PL1_R, .type = ARM_CP_CONST, 6998 .accessfn = access_aa64_tid3, 6999 .resetvalue = 0 }, 7000 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 7001 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 7002 .access = PL1_R, .type = ARM_CP_CONST, 7003 .accessfn = access_aa64_tid3, 7004 .resetvalue = cpu->isar.id_aa64dfr0 }, 7005 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 7006 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 7007 .access = PL1_R, .type = ARM_CP_CONST, 7008 .accessfn = access_aa64_tid3, 7009 .resetvalue = cpu->isar.id_aa64dfr1 }, 7010 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7011 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 7012 .access = PL1_R, .type = ARM_CP_CONST, 7013 .accessfn = access_aa64_tid3, 7014 .resetvalue = 0 }, 7015 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7016 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 7017 .access = PL1_R, .type = ARM_CP_CONST, 7018 .accessfn = access_aa64_tid3, 7019 .resetvalue = 0 }, 7020 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 7021 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 7022 .access = PL1_R, .type = ARM_CP_CONST, 7023 .accessfn = access_aa64_tid3, 7024 .resetvalue = cpu->id_aa64afr0 }, 7025 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 7026 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 7027 .access = PL1_R, .type = ARM_CP_CONST, 7028 .accessfn = access_aa64_tid3, 7029 .resetvalue = cpu->id_aa64afr1 }, 7030 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7031 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 7032 .access = PL1_R, .type = ARM_CP_CONST, 7033 .accessfn = access_aa64_tid3, 7034 .resetvalue = 0 }, 7035 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7036 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 7037 .access = PL1_R, .type = ARM_CP_CONST, 7038 .accessfn = access_aa64_tid3, 7039 .resetvalue = 0 }, 7040 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 7041 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 7042 .access = PL1_R, .type = ARM_CP_CONST, 7043 .accessfn = access_aa64_tid3, 7044 .resetvalue = cpu->isar.id_aa64isar0 }, 7045 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 7046 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 7047 .access = PL1_R, .type = ARM_CP_CONST, 7048 .accessfn = access_aa64_tid3, 7049 .resetvalue = cpu->isar.id_aa64isar1 }, 7050 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7051 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 7052 .access = PL1_R, .type = ARM_CP_CONST, 7053 .accessfn = access_aa64_tid3, 7054 .resetvalue = 0 }, 7055 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7056 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 7057 .access = PL1_R, .type = ARM_CP_CONST, 7058 .accessfn = access_aa64_tid3, 7059 .resetvalue = 0 }, 7060 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7061 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 7062 .access = PL1_R, .type = ARM_CP_CONST, 7063 .accessfn = access_aa64_tid3, 7064 .resetvalue = 0 }, 7065 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7066 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 7067 .access = PL1_R, .type = ARM_CP_CONST, 7068 .accessfn = access_aa64_tid3, 7069 .resetvalue = 0 }, 7070 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7071 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 7072 .access = PL1_R, .type = ARM_CP_CONST, 7073 .accessfn = access_aa64_tid3, 7074 .resetvalue = 0 }, 7075 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7076 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 7077 .access = PL1_R, .type = ARM_CP_CONST, 7078 .accessfn = access_aa64_tid3, 7079 .resetvalue = 0 }, 7080 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 7081 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 7082 .access = PL1_R, .type = ARM_CP_CONST, 7083 .accessfn = access_aa64_tid3, 7084 .resetvalue = cpu->isar.id_aa64mmfr0 }, 7085 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 7086 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 7087 .access = PL1_R, .type = ARM_CP_CONST, 7088 .accessfn = access_aa64_tid3, 7089 .resetvalue = cpu->isar.id_aa64mmfr1 }, 7090 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, 7091 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 7092 .access = PL1_R, .type = ARM_CP_CONST, 7093 .accessfn = access_aa64_tid3, 7094 .resetvalue = cpu->isar.id_aa64mmfr2 }, 7095 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7096 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 7097 .access = PL1_R, .type = ARM_CP_CONST, 7098 .accessfn = access_aa64_tid3, 7099 .resetvalue = 0 }, 7100 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7101 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 7102 .access = PL1_R, .type = ARM_CP_CONST, 7103 .accessfn = access_aa64_tid3, 7104 .resetvalue = 0 }, 7105 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7106 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 7107 .access = PL1_R, .type = ARM_CP_CONST, 7108 .accessfn = access_aa64_tid3, 7109 .resetvalue = 0 }, 7110 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7111 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 7112 .access = PL1_R, .type = ARM_CP_CONST, 7113 .accessfn = access_aa64_tid3, 7114 .resetvalue = 0 }, 7115 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7116 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 7117 .access = PL1_R, .type = ARM_CP_CONST, 7118 .accessfn = access_aa64_tid3, 7119 .resetvalue = 0 }, 7120 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 7121 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 7122 .access = PL1_R, .type = ARM_CP_CONST, 7123 .accessfn = access_aa64_tid3, 7124 .resetvalue = cpu->isar.mvfr0 }, 7125 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 7126 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 7127 .access = PL1_R, .type = ARM_CP_CONST, 7128 .accessfn = access_aa64_tid3, 7129 .resetvalue = cpu->isar.mvfr1 }, 7130 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 7131 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 7132 .access = PL1_R, .type = ARM_CP_CONST, 7133 .accessfn = access_aa64_tid3, 7134 .resetvalue = cpu->isar.mvfr2 }, 7135 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7136 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 7137 .access = PL1_R, .type = ARM_CP_CONST, 7138 .accessfn = access_aa64_tid3, 7139 .resetvalue = 0 }, 7140 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7141 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 7142 .access = PL1_R, .type = ARM_CP_CONST, 7143 .accessfn = access_aa64_tid3, 7144 .resetvalue = 0 }, 7145 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7146 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 7147 .access = PL1_R, .type = ARM_CP_CONST, 7148 .accessfn = access_aa64_tid3, 7149 .resetvalue = 0 }, 7150 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7151 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 7152 .access = PL1_R, .type = ARM_CP_CONST, 7153 .accessfn = access_aa64_tid3, 7154 .resetvalue = 0 }, 7155 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7156 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 7157 .access = PL1_R, .type = ARM_CP_CONST, 7158 .accessfn = access_aa64_tid3, 7159 .resetvalue = 0 }, 7160 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 7161 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 7162 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7163 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 7164 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 7165 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 7166 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7167 .resetvalue = cpu->pmceid0 }, 7168 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 7169 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 7170 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7171 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 7172 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 7173 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 7174 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7175 .resetvalue = cpu->pmceid1 }, 7176 REGINFO_SENTINEL 7177 }; 7178 #ifdef CONFIG_USER_ONLY 7179 ARMCPRegUserSpaceInfo v8_user_idregs[] = { 7180 { .name = "ID_AA64PFR0_EL1", 7181 .exported_bits = 0x000f000f00ff0000, 7182 .fixed_bits = 0x0000000000000011 }, 7183 { .name = "ID_AA64PFR1_EL1", 7184 .exported_bits = 0x00000000000000f0 }, 7185 { .name = "ID_AA64PFR*_EL1_RESERVED", 7186 .is_glob = true }, 7187 { .name = "ID_AA64ZFR0_EL1" }, 7188 { .name = "ID_AA64MMFR0_EL1", 7189 .fixed_bits = 0x00000000ff000000 }, 7190 { .name = "ID_AA64MMFR1_EL1" }, 7191 { .name = "ID_AA64MMFR*_EL1_RESERVED", 7192 .is_glob = true }, 7193 { .name = "ID_AA64DFR0_EL1", 7194 .fixed_bits = 0x0000000000000006 }, 7195 { .name = "ID_AA64DFR1_EL1" }, 7196 { .name = "ID_AA64DFR*_EL1_RESERVED", 7197 .is_glob = true }, 7198 { .name = "ID_AA64AFR*", 7199 .is_glob = true }, 7200 { .name = "ID_AA64ISAR0_EL1", 7201 .exported_bits = 0x00fffffff0fffff0 }, 7202 { .name = "ID_AA64ISAR1_EL1", 7203 .exported_bits = 0x000000f0ffffffff }, 7204 { .name = "ID_AA64ISAR*_EL1_RESERVED", 7205 .is_glob = true }, 7206 REGUSERINFO_SENTINEL 7207 }; 7208 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 7209 #endif 7210 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 7211 if (!arm_feature(env, ARM_FEATURE_EL3) && 7212 !arm_feature(env, ARM_FEATURE_EL2)) { 7213 ARMCPRegInfo rvbar = { 7214 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 7215 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 7216 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 7217 }; 7218 define_one_arm_cp_reg(cpu, &rvbar); 7219 } 7220 define_arm_cp_regs(cpu, v8_idregs); 7221 define_arm_cp_regs(cpu, v8_cp_reginfo); 7222 } 7223 if (arm_feature(env, ARM_FEATURE_EL2)) { 7224 uint64_t vmpidr_def = mpidr_read_val(env); 7225 ARMCPRegInfo vpidr_regs[] = { 7226 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 7227 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7228 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7229 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 7230 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 7231 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 7232 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7233 .access = PL2_RW, .resetvalue = cpu->midr, 7234 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7235 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 7236 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7237 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7238 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 7239 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 7240 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 7241 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7242 .access = PL2_RW, 7243 .resetvalue = vmpidr_def, 7244 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 7245 REGINFO_SENTINEL 7246 }; 7247 define_arm_cp_regs(cpu, vpidr_regs); 7248 define_arm_cp_regs(cpu, el2_cp_reginfo); 7249 if (arm_feature(env, ARM_FEATURE_V8)) { 7250 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 7251 } 7252 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 7253 if (!arm_feature(env, ARM_FEATURE_EL3)) { 7254 ARMCPRegInfo rvbar = { 7255 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 7256 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 7257 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 7258 }; 7259 define_one_arm_cp_reg(cpu, &rvbar); 7260 } 7261 } else { 7262 /* If EL2 is missing but higher ELs are enabled, we need to 7263 * register the no_el2 reginfos. 7264 */ 7265 if (arm_feature(env, ARM_FEATURE_EL3)) { 7266 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 7267 * of MIDR_EL1 and MPIDR_EL1. 7268 */ 7269 ARMCPRegInfo vpidr_regs[] = { 7270 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7271 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7272 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 7273 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 7274 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7275 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7276 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7277 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 7278 .type = ARM_CP_NO_RAW, 7279 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 7280 REGINFO_SENTINEL 7281 }; 7282 define_arm_cp_regs(cpu, vpidr_regs); 7283 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 7284 if (arm_feature(env, ARM_FEATURE_V8)) { 7285 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 7286 } 7287 } 7288 } 7289 if (arm_feature(env, ARM_FEATURE_EL3)) { 7290 define_arm_cp_regs(cpu, el3_cp_reginfo); 7291 ARMCPRegInfo el3_regs[] = { 7292 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 7293 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 7294 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 7295 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 7296 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 7297 .access = PL3_RW, 7298 .raw_writefn = raw_write, .writefn = sctlr_write, 7299 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 7300 .resetvalue = cpu->reset_sctlr }, 7301 REGINFO_SENTINEL 7302 }; 7303 7304 define_arm_cp_regs(cpu, el3_regs); 7305 } 7306 /* The behaviour of NSACR is sufficiently various that we don't 7307 * try to describe it in a single reginfo: 7308 * if EL3 is 64 bit, then trap to EL3 from S EL1, 7309 * reads as constant 0xc00 from NS EL1 and NS EL2 7310 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 7311 * if v7 without EL3, register doesn't exist 7312 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 7313 */ 7314 if (arm_feature(env, ARM_FEATURE_EL3)) { 7315 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7316 ARMCPRegInfo nsacr = { 7317 .name = "NSACR", .type = ARM_CP_CONST, 7318 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7319 .access = PL1_RW, .accessfn = nsacr_access, 7320 .resetvalue = 0xc00 7321 }; 7322 define_one_arm_cp_reg(cpu, &nsacr); 7323 } else { 7324 ARMCPRegInfo nsacr = { 7325 .name = "NSACR", 7326 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7327 .access = PL3_RW | PL1_R, 7328 .resetvalue = 0, 7329 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 7330 }; 7331 define_one_arm_cp_reg(cpu, &nsacr); 7332 } 7333 } else { 7334 if (arm_feature(env, ARM_FEATURE_V8)) { 7335 ARMCPRegInfo nsacr = { 7336 .name = "NSACR", .type = ARM_CP_CONST, 7337 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7338 .access = PL1_R, 7339 .resetvalue = 0xc00 7340 }; 7341 define_one_arm_cp_reg(cpu, &nsacr); 7342 } 7343 } 7344 7345 if (arm_feature(env, ARM_FEATURE_PMSA)) { 7346 if (arm_feature(env, ARM_FEATURE_V6)) { 7347 /* PMSAv6 not implemented */ 7348 assert(arm_feature(env, ARM_FEATURE_V7)); 7349 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 7350 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 7351 } else { 7352 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 7353 } 7354 } else { 7355 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 7356 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 7357 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */ 7358 if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) { 7359 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 7360 } 7361 } 7362 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 7363 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 7364 } 7365 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 7366 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 7367 } 7368 if (arm_feature(env, ARM_FEATURE_VAPA)) { 7369 define_arm_cp_regs(cpu, vapa_cp_reginfo); 7370 } 7371 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 7372 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 7373 } 7374 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 7375 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 7376 } 7377 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 7378 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 7379 } 7380 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 7381 define_arm_cp_regs(cpu, omap_cp_reginfo); 7382 } 7383 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 7384 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 7385 } 7386 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 7387 define_arm_cp_regs(cpu, xscale_cp_reginfo); 7388 } 7389 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 7390 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 7391 } 7392 if (arm_feature(env, ARM_FEATURE_LPAE)) { 7393 define_arm_cp_regs(cpu, lpae_cp_reginfo); 7394 } 7395 if (cpu_isar_feature(aa32_jazelle, cpu)) { 7396 define_arm_cp_regs(cpu, jazelle_regs); 7397 } 7398 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 7399 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 7400 * be read-only (ie write causes UNDEF exception). 7401 */ 7402 { 7403 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 7404 /* Pre-v8 MIDR space. 7405 * Note that the MIDR isn't a simple constant register because 7406 * of the TI925 behaviour where writes to another register can 7407 * cause the MIDR value to change. 7408 * 7409 * Unimplemented registers in the c15 0 0 0 space default to 7410 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 7411 * and friends override accordingly. 7412 */ 7413 { .name = "MIDR", 7414 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 7415 .access = PL1_R, .resetvalue = cpu->midr, 7416 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 7417 .readfn = midr_read, 7418 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 7419 .type = ARM_CP_OVERRIDE }, 7420 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 7421 { .name = "DUMMY", 7422 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 7423 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7424 { .name = "DUMMY", 7425 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 7426 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7427 { .name = "DUMMY", 7428 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 7429 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7430 { .name = "DUMMY", 7431 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 7432 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7433 { .name = "DUMMY", 7434 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 7435 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7436 REGINFO_SENTINEL 7437 }; 7438 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 7439 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 7440 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 7441 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 7442 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 7443 .readfn = midr_read }, 7444 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 7445 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 7446 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 7447 .access = PL1_R, .resetvalue = cpu->midr }, 7448 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 7449 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 7450 .access = PL1_R, .resetvalue = cpu->midr }, 7451 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 7452 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 7453 .access = PL1_R, 7454 .accessfn = access_aa64_tid1, 7455 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 7456 REGINFO_SENTINEL 7457 }; 7458 ARMCPRegInfo id_cp_reginfo[] = { 7459 /* These are common to v8 and pre-v8 */ 7460 { .name = "CTR", 7461 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 7462 .access = PL1_R, .accessfn = ctr_el0_access, 7463 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 7464 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 7465 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 7466 .access = PL0_R, .accessfn = ctr_el0_access, 7467 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 7468 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 7469 { .name = "TCMTR", 7470 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 7471 .access = PL1_R, 7472 .accessfn = access_aa32_tid1, 7473 .type = ARM_CP_CONST, .resetvalue = 0 }, 7474 REGINFO_SENTINEL 7475 }; 7476 /* TLBTR is specific to VMSA */ 7477 ARMCPRegInfo id_tlbtr_reginfo = { 7478 .name = "TLBTR", 7479 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 7480 .access = PL1_R, 7481 .accessfn = access_aa32_tid1, 7482 .type = ARM_CP_CONST, .resetvalue = 0, 7483 }; 7484 /* MPUIR is specific to PMSA V6+ */ 7485 ARMCPRegInfo id_mpuir_reginfo = { 7486 .name = "MPUIR", 7487 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 7488 .access = PL1_R, .type = ARM_CP_CONST, 7489 .resetvalue = cpu->pmsav7_dregion << 8 7490 }; 7491 ARMCPRegInfo crn0_wi_reginfo = { 7492 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 7493 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 7494 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 7495 }; 7496 #ifdef CONFIG_USER_ONLY 7497 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 7498 { .name = "MIDR_EL1", 7499 .exported_bits = 0x00000000ffffffff }, 7500 { .name = "REVIDR_EL1" }, 7501 REGUSERINFO_SENTINEL 7502 }; 7503 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 7504 #endif 7505 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 7506 arm_feature(env, ARM_FEATURE_STRONGARM)) { 7507 ARMCPRegInfo *r; 7508 /* Register the blanket "writes ignored" value first to cover the 7509 * whole space. Then update the specific ID registers to allow write 7510 * access, so that they ignore writes rather than causing them to 7511 * UNDEF. 7512 */ 7513 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 7514 for (r = id_pre_v8_midr_cp_reginfo; 7515 r->type != ARM_CP_SENTINEL; r++) { 7516 r->access = PL1_RW; 7517 } 7518 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 7519 r->access = PL1_RW; 7520 } 7521 id_mpuir_reginfo.access = PL1_RW; 7522 id_tlbtr_reginfo.access = PL1_RW; 7523 } 7524 if (arm_feature(env, ARM_FEATURE_V8)) { 7525 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 7526 } else { 7527 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 7528 } 7529 define_arm_cp_regs(cpu, id_cp_reginfo); 7530 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 7531 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 7532 } else if (arm_feature(env, ARM_FEATURE_V7)) { 7533 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 7534 } 7535 } 7536 7537 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 7538 ARMCPRegInfo mpidr_cp_reginfo[] = { 7539 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 7540 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 7541 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 7542 REGINFO_SENTINEL 7543 }; 7544 #ifdef CONFIG_USER_ONLY 7545 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 7546 { .name = "MPIDR_EL1", 7547 .fixed_bits = 0x0000000080000000 }, 7548 REGUSERINFO_SENTINEL 7549 }; 7550 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 7551 #endif 7552 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 7553 } 7554 7555 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 7556 ARMCPRegInfo auxcr_reginfo[] = { 7557 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 7558 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 7559 .access = PL1_RW, .type = ARM_CP_CONST, 7560 .resetvalue = cpu->reset_auxcr }, 7561 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 7562 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 7563 .access = PL2_RW, .type = ARM_CP_CONST, 7564 .resetvalue = 0 }, 7565 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 7566 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 7567 .access = PL3_RW, .type = ARM_CP_CONST, 7568 .resetvalue = 0 }, 7569 REGINFO_SENTINEL 7570 }; 7571 define_arm_cp_regs(cpu, auxcr_reginfo); 7572 if (arm_feature(env, ARM_FEATURE_V8)) { 7573 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */ 7574 ARMCPRegInfo hactlr2_reginfo = { 7575 .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 7576 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 7577 .access = PL2_RW, .type = ARM_CP_CONST, 7578 .resetvalue = 0 7579 }; 7580 define_one_arm_cp_reg(cpu, &hactlr2_reginfo); 7581 } 7582 } 7583 7584 if (arm_feature(env, ARM_FEATURE_CBAR)) { 7585 /* 7586 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. 7587 * There are two flavours: 7588 * (1) older 32-bit only cores have a simple 32-bit CBAR 7589 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a 7590 * 32-bit register visible to AArch32 at a different encoding 7591 * to the "flavour 1" register and with the bits rearranged to 7592 * be able to squash a 64-bit address into the 32-bit view. 7593 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but 7594 * in future if we support AArch32-only configs of some of the 7595 * AArch64 cores we might need to add a specific feature flag 7596 * to indicate cores with "flavour 2" CBAR. 7597 */ 7598 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7599 /* 32 bit view is [31:18] 0...0 [43:32]. */ 7600 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 7601 | extract64(cpu->reset_cbar, 32, 12); 7602 ARMCPRegInfo cbar_reginfo[] = { 7603 { .name = "CBAR", 7604 .type = ARM_CP_CONST, 7605 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, 7606 .access = PL1_R, .resetvalue = cbar32 }, 7607 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 7608 .type = ARM_CP_CONST, 7609 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 7610 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 7611 REGINFO_SENTINEL 7612 }; 7613 /* We don't implement a r/w 64 bit CBAR currently */ 7614 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 7615 define_arm_cp_regs(cpu, cbar_reginfo); 7616 } else { 7617 ARMCPRegInfo cbar = { 7618 .name = "CBAR", 7619 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 7620 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 7621 .fieldoffset = offsetof(CPUARMState, 7622 cp15.c15_config_base_address) 7623 }; 7624 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 7625 cbar.access = PL1_R; 7626 cbar.fieldoffset = 0; 7627 cbar.type = ARM_CP_CONST; 7628 } 7629 define_one_arm_cp_reg(cpu, &cbar); 7630 } 7631 } 7632 7633 if (arm_feature(env, ARM_FEATURE_VBAR)) { 7634 ARMCPRegInfo vbar_cp_reginfo[] = { 7635 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 7636 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 7637 .access = PL1_RW, .writefn = vbar_write, 7638 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 7639 offsetof(CPUARMState, cp15.vbar_ns) }, 7640 .resetvalue = 0 }, 7641 REGINFO_SENTINEL 7642 }; 7643 define_arm_cp_regs(cpu, vbar_cp_reginfo); 7644 } 7645 7646 /* Generic registers whose values depend on the implementation */ 7647 { 7648 ARMCPRegInfo sctlr = { 7649 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 7650 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 7651 .access = PL1_RW, 7652 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 7653 offsetof(CPUARMState, cp15.sctlr_ns) }, 7654 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 7655 .raw_writefn = raw_write, 7656 }; 7657 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 7658 /* Normally we would always end the TB on an SCTLR write, but Linux 7659 * arch/arm/mach-pxa/sleep.S expects two instructions following 7660 * an MMU enable to execute from cache. Imitate this behaviour. 7661 */ 7662 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 7663 } 7664 define_one_arm_cp_reg(cpu, &sctlr); 7665 } 7666 7667 if (cpu_isar_feature(aa64_lor, cpu)) { 7668 define_arm_cp_regs(cpu, lor_reginfo); 7669 } 7670 if (cpu_isar_feature(aa64_pan, cpu)) { 7671 define_one_arm_cp_reg(cpu, &pan_reginfo); 7672 } 7673 #ifndef CONFIG_USER_ONLY 7674 if (cpu_isar_feature(aa64_ats1e1, cpu)) { 7675 define_arm_cp_regs(cpu, ats1e1_reginfo); 7676 } 7677 if (cpu_isar_feature(aa32_ats1e1, cpu)) { 7678 define_arm_cp_regs(cpu, ats1cp_reginfo); 7679 } 7680 #endif 7681 if (cpu_isar_feature(aa64_uao, cpu)) { 7682 define_one_arm_cp_reg(cpu, &uao_reginfo); 7683 } 7684 7685 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 7686 define_arm_cp_regs(cpu, vhe_reginfo); 7687 } 7688 7689 if (cpu_isar_feature(aa64_sve, cpu)) { 7690 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 7691 if (arm_feature(env, ARM_FEATURE_EL2)) { 7692 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 7693 } else { 7694 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 7695 } 7696 if (arm_feature(env, ARM_FEATURE_EL3)) { 7697 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 7698 } 7699 } 7700 7701 #ifdef TARGET_AARCH64 7702 if (cpu_isar_feature(aa64_pauth, cpu)) { 7703 define_arm_cp_regs(cpu, pauth_reginfo); 7704 } 7705 if (cpu_isar_feature(aa64_rndr, cpu)) { 7706 define_arm_cp_regs(cpu, rndr_reginfo); 7707 } 7708 #ifndef CONFIG_USER_ONLY 7709 /* Data Cache clean instructions up to PoP */ 7710 if (cpu_isar_feature(aa64_dcpop, cpu)) { 7711 define_one_arm_cp_reg(cpu, dcpop_reg); 7712 7713 if (cpu_isar_feature(aa64_dcpodp, cpu)) { 7714 define_one_arm_cp_reg(cpu, dcpodp_reg); 7715 } 7716 } 7717 #endif /*CONFIG_USER_ONLY*/ 7718 #endif 7719 7720 if (cpu_isar_feature(any_predinv, cpu)) { 7721 define_arm_cp_regs(cpu, predinv_reginfo); 7722 } 7723 7724 #ifndef CONFIG_USER_ONLY 7725 /* 7726 * Register redirections and aliases must be done last, 7727 * after the registers from the other extensions have been defined. 7728 */ 7729 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 7730 define_arm_vh_e2h_redirects_aliases(cpu); 7731 } 7732 #endif 7733 } 7734 7735 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 7736 { 7737 CPUState *cs = CPU(cpu); 7738 CPUARMState *env = &cpu->env; 7739 7740 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7741 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 7742 aarch64_fpu_gdb_set_reg, 7743 34, "aarch64-fpu.xml", 0); 7744 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 7745 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7746 51, "arm-neon.xml", 0); 7747 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 7748 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7749 35, "arm-vfp3.xml", 0); 7750 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 7751 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7752 19, "arm-vfp.xml", 0); 7753 } 7754 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 7755 arm_gen_dynamic_xml(cs), 7756 "system-registers.xml", 0); 7757 } 7758 7759 /* Sort alphabetically by type name, except for "any". */ 7760 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 7761 { 7762 ObjectClass *class_a = (ObjectClass *)a; 7763 ObjectClass *class_b = (ObjectClass *)b; 7764 const char *name_a, *name_b; 7765 7766 name_a = object_class_get_name(class_a); 7767 name_b = object_class_get_name(class_b); 7768 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 7769 return 1; 7770 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 7771 return -1; 7772 } else { 7773 return strcmp(name_a, name_b); 7774 } 7775 } 7776 7777 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 7778 { 7779 ObjectClass *oc = data; 7780 const char *typename; 7781 char *name; 7782 7783 typename = object_class_get_name(oc); 7784 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 7785 qemu_printf(" %s\n", name); 7786 g_free(name); 7787 } 7788 7789 void arm_cpu_list(void) 7790 { 7791 GSList *list; 7792 7793 list = object_class_get_list(TYPE_ARM_CPU, false); 7794 list = g_slist_sort(list, arm_cpu_list_compare); 7795 qemu_printf("Available CPUs:\n"); 7796 g_slist_foreach(list, arm_cpu_list_entry, NULL); 7797 g_slist_free(list); 7798 } 7799 7800 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 7801 { 7802 ObjectClass *oc = data; 7803 CpuDefinitionInfoList **cpu_list = user_data; 7804 CpuDefinitionInfoList *entry; 7805 CpuDefinitionInfo *info; 7806 const char *typename; 7807 7808 typename = object_class_get_name(oc); 7809 info = g_malloc0(sizeof(*info)); 7810 info->name = g_strndup(typename, 7811 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 7812 info->q_typename = g_strdup(typename); 7813 7814 entry = g_malloc0(sizeof(*entry)); 7815 entry->value = info; 7816 entry->next = *cpu_list; 7817 *cpu_list = entry; 7818 } 7819 7820 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 7821 { 7822 CpuDefinitionInfoList *cpu_list = NULL; 7823 GSList *list; 7824 7825 list = object_class_get_list(TYPE_ARM_CPU, false); 7826 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 7827 g_slist_free(list); 7828 7829 return cpu_list; 7830 } 7831 7832 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 7833 void *opaque, int state, int secstate, 7834 int crm, int opc1, int opc2, 7835 const char *name) 7836 { 7837 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 7838 * add a single reginfo struct to the hash table. 7839 */ 7840 uint32_t *key = g_new(uint32_t, 1); 7841 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 7842 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 7843 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 7844 7845 r2->name = g_strdup(name); 7846 /* Reset the secure state to the specific incoming state. This is 7847 * necessary as the register may have been defined with both states. 7848 */ 7849 r2->secure = secstate; 7850 7851 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 7852 /* Register is banked (using both entries in array). 7853 * Overwriting fieldoffset as the array is only used to define 7854 * banked registers but later only fieldoffset is used. 7855 */ 7856 r2->fieldoffset = r->bank_fieldoffsets[ns]; 7857 } 7858 7859 if (state == ARM_CP_STATE_AA32) { 7860 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 7861 /* If the register is banked then we don't need to migrate or 7862 * reset the 32-bit instance in certain cases: 7863 * 7864 * 1) If the register has both 32-bit and 64-bit instances then we 7865 * can count on the 64-bit instance taking care of the 7866 * non-secure bank. 7867 * 2) If ARMv8 is enabled then we can count on a 64-bit version 7868 * taking care of the secure bank. This requires that separate 7869 * 32 and 64-bit definitions are provided. 7870 */ 7871 if ((r->state == ARM_CP_STATE_BOTH && ns) || 7872 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 7873 r2->type |= ARM_CP_ALIAS; 7874 } 7875 } else if ((secstate != r->secure) && !ns) { 7876 /* The register is not banked so we only want to allow migration of 7877 * the non-secure instance. 7878 */ 7879 r2->type |= ARM_CP_ALIAS; 7880 } 7881 7882 if (r->state == ARM_CP_STATE_BOTH) { 7883 /* We assume it is a cp15 register if the .cp field is left unset. 7884 */ 7885 if (r2->cp == 0) { 7886 r2->cp = 15; 7887 } 7888 7889 #ifdef HOST_WORDS_BIGENDIAN 7890 if (r2->fieldoffset) { 7891 r2->fieldoffset += sizeof(uint32_t); 7892 } 7893 #endif 7894 } 7895 } 7896 if (state == ARM_CP_STATE_AA64) { 7897 /* To allow abbreviation of ARMCPRegInfo 7898 * definitions, we treat cp == 0 as equivalent to 7899 * the value for "standard guest-visible sysreg". 7900 * STATE_BOTH definitions are also always "standard 7901 * sysreg" in their AArch64 view (the .cp value may 7902 * be non-zero for the benefit of the AArch32 view). 7903 */ 7904 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 7905 r2->cp = CP_REG_ARM64_SYSREG_CP; 7906 } 7907 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 7908 r2->opc0, opc1, opc2); 7909 } else { 7910 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 7911 } 7912 if (opaque) { 7913 r2->opaque = opaque; 7914 } 7915 /* reginfo passed to helpers is correct for the actual access, 7916 * and is never ARM_CP_STATE_BOTH: 7917 */ 7918 r2->state = state; 7919 /* Make sure reginfo passed to helpers for wildcarded regs 7920 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 7921 */ 7922 r2->crm = crm; 7923 r2->opc1 = opc1; 7924 r2->opc2 = opc2; 7925 /* By convention, for wildcarded registers only the first 7926 * entry is used for migration; the others are marked as 7927 * ALIAS so we don't try to transfer the register 7928 * multiple times. Special registers (ie NOP/WFI) are 7929 * never migratable and not even raw-accessible. 7930 */ 7931 if ((r->type & ARM_CP_SPECIAL)) { 7932 r2->type |= ARM_CP_NO_RAW; 7933 } 7934 if (((r->crm == CP_ANY) && crm != 0) || 7935 ((r->opc1 == CP_ANY) && opc1 != 0) || 7936 ((r->opc2 == CP_ANY) && opc2 != 0)) { 7937 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 7938 } 7939 7940 /* Check that raw accesses are either forbidden or handled. Note that 7941 * we can't assert this earlier because the setup of fieldoffset for 7942 * banked registers has to be done first. 7943 */ 7944 if (!(r2->type & ARM_CP_NO_RAW)) { 7945 assert(!raw_accessors_invalid(r2)); 7946 } 7947 7948 /* Overriding of an existing definition must be explicitly 7949 * requested. 7950 */ 7951 if (!(r->type & ARM_CP_OVERRIDE)) { 7952 ARMCPRegInfo *oldreg; 7953 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 7954 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 7955 fprintf(stderr, "Register redefined: cp=%d %d bit " 7956 "crn=%d crm=%d opc1=%d opc2=%d, " 7957 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 7958 r2->crn, r2->crm, r2->opc1, r2->opc2, 7959 oldreg->name, r2->name); 7960 g_assert_not_reached(); 7961 } 7962 } 7963 g_hash_table_insert(cpu->cp_regs, key, r2); 7964 } 7965 7966 7967 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 7968 const ARMCPRegInfo *r, void *opaque) 7969 { 7970 /* Define implementations of coprocessor registers. 7971 * We store these in a hashtable because typically 7972 * there are less than 150 registers in a space which 7973 * is 16*16*16*8*8 = 262144 in size. 7974 * Wildcarding is supported for the crm, opc1 and opc2 fields. 7975 * If a register is defined twice then the second definition is 7976 * used, so this can be used to define some generic registers and 7977 * then override them with implementation specific variations. 7978 * At least one of the original and the second definition should 7979 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 7980 * against accidental use. 7981 * 7982 * The state field defines whether the register is to be 7983 * visible in the AArch32 or AArch64 execution state. If the 7984 * state is set to ARM_CP_STATE_BOTH then we synthesise a 7985 * reginfo structure for the AArch32 view, which sees the lower 7986 * 32 bits of the 64 bit register. 7987 * 7988 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 7989 * be wildcarded. AArch64 registers are always considered to be 64 7990 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 7991 * the register, if any. 7992 */ 7993 int crm, opc1, opc2, state; 7994 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 7995 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 7996 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 7997 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 7998 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 7999 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 8000 /* 64 bit registers have only CRm and Opc1 fields */ 8001 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 8002 /* op0 only exists in the AArch64 encodings */ 8003 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 8004 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 8005 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 8006 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 8007 * encodes a minimum access level for the register. We roll this 8008 * runtime check into our general permission check code, so check 8009 * here that the reginfo's specified permissions are strict enough 8010 * to encompass the generic architectural permission check. 8011 */ 8012 if (r->state != ARM_CP_STATE_AA32) { 8013 int mask = 0; 8014 switch (r->opc1) { 8015 case 0: 8016 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 8017 mask = PL0U_R | PL1_RW; 8018 break; 8019 case 1: case 2: 8020 /* min_EL EL1 */ 8021 mask = PL1_RW; 8022 break; 8023 case 3: 8024 /* min_EL EL0 */ 8025 mask = PL0_RW; 8026 break; 8027 case 4: 8028 case 5: 8029 /* min_EL EL2 */ 8030 mask = PL2_RW; 8031 break; 8032 case 6: 8033 /* min_EL EL3 */ 8034 mask = PL3_RW; 8035 break; 8036 case 7: 8037 /* min_EL EL1, secure mode only (we don't check the latter) */ 8038 mask = PL1_RW; 8039 break; 8040 default: 8041 /* broken reginfo with out-of-range opc1 */ 8042 assert(false); 8043 break; 8044 } 8045 /* assert our permissions are not too lax (stricter is fine) */ 8046 assert((r->access & ~mask) == 0); 8047 } 8048 8049 /* Check that the register definition has enough info to handle 8050 * reads and writes if they are permitted. 8051 */ 8052 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 8053 if (r->access & PL3_R) { 8054 assert((r->fieldoffset || 8055 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8056 r->readfn); 8057 } 8058 if (r->access & PL3_W) { 8059 assert((r->fieldoffset || 8060 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8061 r->writefn); 8062 } 8063 } 8064 /* Bad type field probably means missing sentinel at end of reg list */ 8065 assert(cptype_valid(r->type)); 8066 for (crm = crmmin; crm <= crmmax; crm++) { 8067 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 8068 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 8069 for (state = ARM_CP_STATE_AA32; 8070 state <= ARM_CP_STATE_AA64; state++) { 8071 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 8072 continue; 8073 } 8074 if (state == ARM_CP_STATE_AA32) { 8075 /* Under AArch32 CP registers can be common 8076 * (same for secure and non-secure world) or banked. 8077 */ 8078 char *name; 8079 8080 switch (r->secure) { 8081 case ARM_CP_SECSTATE_S: 8082 case ARM_CP_SECSTATE_NS: 8083 add_cpreg_to_hashtable(cpu, r, opaque, state, 8084 r->secure, crm, opc1, opc2, 8085 r->name); 8086 break; 8087 default: 8088 name = g_strdup_printf("%s_S", r->name); 8089 add_cpreg_to_hashtable(cpu, r, opaque, state, 8090 ARM_CP_SECSTATE_S, 8091 crm, opc1, opc2, name); 8092 g_free(name); 8093 add_cpreg_to_hashtable(cpu, r, opaque, state, 8094 ARM_CP_SECSTATE_NS, 8095 crm, opc1, opc2, r->name); 8096 break; 8097 } 8098 } else { 8099 /* AArch64 registers get mapped to non-secure instance 8100 * of AArch32 */ 8101 add_cpreg_to_hashtable(cpu, r, opaque, state, 8102 ARM_CP_SECSTATE_NS, 8103 crm, opc1, opc2, r->name); 8104 } 8105 } 8106 } 8107 } 8108 } 8109 } 8110 8111 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 8112 const ARMCPRegInfo *regs, void *opaque) 8113 { 8114 /* Define a whole list of registers */ 8115 const ARMCPRegInfo *r; 8116 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 8117 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 8118 } 8119 } 8120 8121 /* 8122 * Modify ARMCPRegInfo for access from userspace. 8123 * 8124 * This is a data driven modification directed by 8125 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 8126 * user-space cannot alter any values and dynamic values pertaining to 8127 * execution state are hidden from user space view anyway. 8128 */ 8129 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) 8130 { 8131 const ARMCPRegUserSpaceInfo *m; 8132 ARMCPRegInfo *r; 8133 8134 for (m = mods; m->name; m++) { 8135 GPatternSpec *pat = NULL; 8136 if (m->is_glob) { 8137 pat = g_pattern_spec_new(m->name); 8138 } 8139 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 8140 if (pat && g_pattern_match_string(pat, r->name)) { 8141 r->type = ARM_CP_CONST; 8142 r->access = PL0U_R; 8143 r->resetvalue = 0; 8144 /* continue */ 8145 } else if (strcmp(r->name, m->name) == 0) { 8146 r->type = ARM_CP_CONST; 8147 r->access = PL0U_R; 8148 r->resetvalue &= m->exported_bits; 8149 r->resetvalue |= m->fixed_bits; 8150 break; 8151 } 8152 } 8153 if (pat) { 8154 g_pattern_spec_free(pat); 8155 } 8156 } 8157 } 8158 8159 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 8160 { 8161 return g_hash_table_lookup(cpregs, &encoded_cp); 8162 } 8163 8164 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 8165 uint64_t value) 8166 { 8167 /* Helper coprocessor write function for write-ignore registers */ 8168 } 8169 8170 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 8171 { 8172 /* Helper coprocessor write function for read-as-zero registers */ 8173 return 0; 8174 } 8175 8176 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 8177 { 8178 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 8179 } 8180 8181 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 8182 { 8183 /* Return true if it is not valid for us to switch to 8184 * this CPU mode (ie all the UNPREDICTABLE cases in 8185 * the ARM ARM CPSRWriteByInstr pseudocode). 8186 */ 8187 8188 /* Changes to or from Hyp via MSR and CPS are illegal. */ 8189 if (write_type == CPSRWriteByInstr && 8190 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 8191 mode == ARM_CPU_MODE_HYP)) { 8192 return 1; 8193 } 8194 8195 switch (mode) { 8196 case ARM_CPU_MODE_USR: 8197 return 0; 8198 case ARM_CPU_MODE_SYS: 8199 case ARM_CPU_MODE_SVC: 8200 case ARM_CPU_MODE_ABT: 8201 case ARM_CPU_MODE_UND: 8202 case ARM_CPU_MODE_IRQ: 8203 case ARM_CPU_MODE_FIQ: 8204 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 8205 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 8206 */ 8207 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 8208 * and CPS are treated as illegal mode changes. 8209 */ 8210 if (write_type == CPSRWriteByInstr && 8211 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 8212 (arm_hcr_el2_eff(env) & HCR_TGE)) { 8213 return 1; 8214 } 8215 return 0; 8216 case ARM_CPU_MODE_HYP: 8217 return !arm_feature(env, ARM_FEATURE_EL2) 8218 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); 8219 case ARM_CPU_MODE_MON: 8220 return arm_current_el(env) < 3; 8221 default: 8222 return 1; 8223 } 8224 } 8225 8226 uint32_t cpsr_read(CPUARMState *env) 8227 { 8228 int ZF; 8229 ZF = (env->ZF == 0); 8230 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 8231 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 8232 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 8233 | ((env->condexec_bits & 0xfc) << 8) 8234 | (env->GE << 16) | (env->daif & CPSR_AIF); 8235 } 8236 8237 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 8238 CPSRWriteType write_type) 8239 { 8240 uint32_t changed_daif; 8241 8242 if (mask & CPSR_NZCV) { 8243 env->ZF = (~val) & CPSR_Z; 8244 env->NF = val; 8245 env->CF = (val >> 29) & 1; 8246 env->VF = (val << 3) & 0x80000000; 8247 } 8248 if (mask & CPSR_Q) 8249 env->QF = ((val & CPSR_Q) != 0); 8250 if (mask & CPSR_T) 8251 env->thumb = ((val & CPSR_T) != 0); 8252 if (mask & CPSR_IT_0_1) { 8253 env->condexec_bits &= ~3; 8254 env->condexec_bits |= (val >> 25) & 3; 8255 } 8256 if (mask & CPSR_IT_2_7) { 8257 env->condexec_bits &= 3; 8258 env->condexec_bits |= (val >> 8) & 0xfc; 8259 } 8260 if (mask & CPSR_GE) { 8261 env->GE = (val >> 16) & 0xf; 8262 } 8263 8264 /* In a V7 implementation that includes the security extensions but does 8265 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 8266 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 8267 * bits respectively. 8268 * 8269 * In a V8 implementation, it is permitted for privileged software to 8270 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 8271 */ 8272 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 8273 arm_feature(env, ARM_FEATURE_EL3) && 8274 !arm_feature(env, ARM_FEATURE_EL2) && 8275 !arm_is_secure(env)) { 8276 8277 changed_daif = (env->daif ^ val) & mask; 8278 8279 if (changed_daif & CPSR_A) { 8280 /* Check to see if we are allowed to change the masking of async 8281 * abort exceptions from a non-secure state. 8282 */ 8283 if (!(env->cp15.scr_el3 & SCR_AW)) { 8284 qemu_log_mask(LOG_GUEST_ERROR, 8285 "Ignoring attempt to switch CPSR_A flag from " 8286 "non-secure world with SCR.AW bit clear\n"); 8287 mask &= ~CPSR_A; 8288 } 8289 } 8290 8291 if (changed_daif & CPSR_F) { 8292 /* Check to see if we are allowed to change the masking of FIQ 8293 * exceptions from a non-secure state. 8294 */ 8295 if (!(env->cp15.scr_el3 & SCR_FW)) { 8296 qemu_log_mask(LOG_GUEST_ERROR, 8297 "Ignoring attempt to switch CPSR_F flag from " 8298 "non-secure world with SCR.FW bit clear\n"); 8299 mask &= ~CPSR_F; 8300 } 8301 8302 /* Check whether non-maskable FIQ (NMFI) support is enabled. 8303 * If this bit is set software is not allowed to mask 8304 * FIQs, but is allowed to set CPSR_F to 0. 8305 */ 8306 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 8307 (val & CPSR_F)) { 8308 qemu_log_mask(LOG_GUEST_ERROR, 8309 "Ignoring attempt to enable CPSR_F flag " 8310 "(non-maskable FIQ [NMFI] support enabled)\n"); 8311 mask &= ~CPSR_F; 8312 } 8313 } 8314 } 8315 8316 env->daif &= ~(CPSR_AIF & mask); 8317 env->daif |= val & CPSR_AIF & mask; 8318 8319 if (write_type != CPSRWriteRaw && 8320 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 8321 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 8322 /* Note that we can only get here in USR mode if this is a 8323 * gdb stub write; for this case we follow the architectural 8324 * behaviour for guest writes in USR mode of ignoring an attempt 8325 * to switch mode. (Those are caught by translate.c for writes 8326 * triggered by guest instructions.) 8327 */ 8328 mask &= ~CPSR_M; 8329 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 8330 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 8331 * v7, and has defined behaviour in v8: 8332 * + leave CPSR.M untouched 8333 * + allow changes to the other CPSR fields 8334 * + set PSTATE.IL 8335 * For user changes via the GDB stub, we don't set PSTATE.IL, 8336 * as this would be unnecessarily harsh for a user error. 8337 */ 8338 mask &= ~CPSR_M; 8339 if (write_type != CPSRWriteByGDBStub && 8340 arm_feature(env, ARM_FEATURE_V8)) { 8341 mask |= CPSR_IL; 8342 val |= CPSR_IL; 8343 } 8344 qemu_log_mask(LOG_GUEST_ERROR, 8345 "Illegal AArch32 mode switch attempt from %s to %s\n", 8346 aarch32_mode_name(env->uncached_cpsr), 8347 aarch32_mode_name(val)); 8348 } else { 8349 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 8350 write_type == CPSRWriteExceptionReturn ? 8351 "Exception return from AArch32" : 8352 "AArch32 mode switch from", 8353 aarch32_mode_name(env->uncached_cpsr), 8354 aarch32_mode_name(val), env->regs[15]); 8355 switch_mode(env, val & CPSR_M); 8356 } 8357 } 8358 mask &= ~CACHED_CPSR_BITS; 8359 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 8360 } 8361 8362 /* Sign/zero extend */ 8363 uint32_t HELPER(sxtb16)(uint32_t x) 8364 { 8365 uint32_t res; 8366 res = (uint16_t)(int8_t)x; 8367 res |= (uint32_t)(int8_t)(x >> 16) << 16; 8368 return res; 8369 } 8370 8371 uint32_t HELPER(uxtb16)(uint32_t x) 8372 { 8373 uint32_t res; 8374 res = (uint16_t)(uint8_t)x; 8375 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 8376 return res; 8377 } 8378 8379 int32_t HELPER(sdiv)(int32_t num, int32_t den) 8380 { 8381 if (den == 0) 8382 return 0; 8383 if (num == INT_MIN && den == -1) 8384 return INT_MIN; 8385 return num / den; 8386 } 8387 8388 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 8389 { 8390 if (den == 0) 8391 return 0; 8392 return num / den; 8393 } 8394 8395 uint32_t HELPER(rbit)(uint32_t x) 8396 { 8397 return revbit32(x); 8398 } 8399 8400 #ifdef CONFIG_USER_ONLY 8401 8402 static void switch_mode(CPUARMState *env, int mode) 8403 { 8404 ARMCPU *cpu = env_archcpu(env); 8405 8406 if (mode != ARM_CPU_MODE_USR) { 8407 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 8408 } 8409 } 8410 8411 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 8412 uint32_t cur_el, bool secure) 8413 { 8414 return 1; 8415 } 8416 8417 void aarch64_sync_64_to_32(CPUARMState *env) 8418 { 8419 g_assert_not_reached(); 8420 } 8421 8422 #else 8423 8424 static void switch_mode(CPUARMState *env, int mode) 8425 { 8426 int old_mode; 8427 int i; 8428 8429 old_mode = env->uncached_cpsr & CPSR_M; 8430 if (mode == old_mode) 8431 return; 8432 8433 if (old_mode == ARM_CPU_MODE_FIQ) { 8434 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 8435 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 8436 } else if (mode == ARM_CPU_MODE_FIQ) { 8437 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 8438 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 8439 } 8440 8441 i = bank_number(old_mode); 8442 env->banked_r13[i] = env->regs[13]; 8443 env->banked_spsr[i] = env->spsr; 8444 8445 i = bank_number(mode); 8446 env->regs[13] = env->banked_r13[i]; 8447 env->spsr = env->banked_spsr[i]; 8448 8449 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 8450 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 8451 } 8452 8453 /* Physical Interrupt Target EL Lookup Table 8454 * 8455 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 8456 * 8457 * The below multi-dimensional table is used for looking up the target 8458 * exception level given numerous condition criteria. Specifically, the 8459 * target EL is based on SCR and HCR routing controls as well as the 8460 * currently executing EL and secure state. 8461 * 8462 * Dimensions: 8463 * target_el_table[2][2][2][2][2][4] 8464 * | | | | | +--- Current EL 8465 * | | | | +------ Non-secure(0)/Secure(1) 8466 * | | | +--------- HCR mask override 8467 * | | +------------ SCR exec state control 8468 * | +--------------- SCR mask override 8469 * +------------------ 32-bit(0)/64-bit(1) EL3 8470 * 8471 * The table values are as such: 8472 * 0-3 = EL0-EL3 8473 * -1 = Cannot occur 8474 * 8475 * The ARM ARM target EL table includes entries indicating that an "exception 8476 * is not taken". The two cases where this is applicable are: 8477 * 1) An exception is taken from EL3 but the SCR does not have the exception 8478 * routed to EL3. 8479 * 2) An exception is taken from EL2 but the HCR does not have the exception 8480 * routed to EL2. 8481 * In these two cases, the below table contain a target of EL1. This value is 8482 * returned as it is expected that the consumer of the table data will check 8483 * for "target EL >= current EL" to ensure the exception is not taken. 8484 * 8485 * SCR HCR 8486 * 64 EA AMO From 8487 * BIT IRQ IMO Non-secure Secure 8488 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 8489 */ 8490 static const int8_t target_el_table[2][2][2][2][2][4] = { 8491 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 8492 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 8493 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 8494 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 8495 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 8496 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 8497 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 8498 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 8499 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 8500 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 8501 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 8502 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 8503 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 8504 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 8505 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 8506 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 8507 }; 8508 8509 /* 8510 * Determine the target EL for physical exceptions 8511 */ 8512 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 8513 uint32_t cur_el, bool secure) 8514 { 8515 CPUARMState *env = cs->env_ptr; 8516 bool rw; 8517 bool scr; 8518 bool hcr; 8519 int target_el; 8520 /* Is the highest EL AArch64? */ 8521 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 8522 uint64_t hcr_el2; 8523 8524 if (arm_feature(env, ARM_FEATURE_EL3)) { 8525 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 8526 } else { 8527 /* Either EL2 is the highest EL (and so the EL2 register width 8528 * is given by is64); or there is no EL2 or EL3, in which case 8529 * the value of 'rw' does not affect the table lookup anyway. 8530 */ 8531 rw = is64; 8532 } 8533 8534 hcr_el2 = arm_hcr_el2_eff(env); 8535 switch (excp_idx) { 8536 case EXCP_IRQ: 8537 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 8538 hcr = hcr_el2 & HCR_IMO; 8539 break; 8540 case EXCP_FIQ: 8541 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 8542 hcr = hcr_el2 & HCR_FMO; 8543 break; 8544 default: 8545 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 8546 hcr = hcr_el2 & HCR_AMO; 8547 break; 8548 }; 8549 8550 /* 8551 * For these purposes, TGE and AMO/IMO/FMO both force the 8552 * interrupt to EL2. Fold TGE into the bit extracted above. 8553 */ 8554 hcr |= (hcr_el2 & HCR_TGE) != 0; 8555 8556 /* Perform a table-lookup for the target EL given the current state */ 8557 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 8558 8559 assert(target_el > 0); 8560 8561 return target_el; 8562 } 8563 8564 void arm_log_exception(int idx) 8565 { 8566 if (qemu_loglevel_mask(CPU_LOG_INT)) { 8567 const char *exc = NULL; 8568 static const char * const excnames[] = { 8569 [EXCP_UDEF] = "Undefined Instruction", 8570 [EXCP_SWI] = "SVC", 8571 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 8572 [EXCP_DATA_ABORT] = "Data Abort", 8573 [EXCP_IRQ] = "IRQ", 8574 [EXCP_FIQ] = "FIQ", 8575 [EXCP_BKPT] = "Breakpoint", 8576 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 8577 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 8578 [EXCP_HVC] = "Hypervisor Call", 8579 [EXCP_HYP_TRAP] = "Hypervisor Trap", 8580 [EXCP_SMC] = "Secure Monitor Call", 8581 [EXCP_VIRQ] = "Virtual IRQ", 8582 [EXCP_VFIQ] = "Virtual FIQ", 8583 [EXCP_SEMIHOST] = "Semihosting call", 8584 [EXCP_NOCP] = "v7M NOCP UsageFault", 8585 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 8586 [EXCP_STKOF] = "v8M STKOF UsageFault", 8587 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", 8588 [EXCP_LSERR] = "v8M LSERR UsageFault", 8589 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", 8590 }; 8591 8592 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 8593 exc = excnames[idx]; 8594 } 8595 if (!exc) { 8596 exc = "unknown"; 8597 } 8598 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 8599 } 8600 } 8601 8602 /* 8603 * Function used to synchronize QEMU's AArch64 register set with AArch32 8604 * register set. This is necessary when switching between AArch32 and AArch64 8605 * execution state. 8606 */ 8607 void aarch64_sync_32_to_64(CPUARMState *env) 8608 { 8609 int i; 8610 uint32_t mode = env->uncached_cpsr & CPSR_M; 8611 8612 /* We can blanket copy R[0:7] to X[0:7] */ 8613 for (i = 0; i < 8; i++) { 8614 env->xregs[i] = env->regs[i]; 8615 } 8616 8617 /* 8618 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 8619 * Otherwise, they come from the banked user regs. 8620 */ 8621 if (mode == ARM_CPU_MODE_FIQ) { 8622 for (i = 8; i < 13; i++) { 8623 env->xregs[i] = env->usr_regs[i - 8]; 8624 } 8625 } else { 8626 for (i = 8; i < 13; i++) { 8627 env->xregs[i] = env->regs[i]; 8628 } 8629 } 8630 8631 /* 8632 * Registers x13-x23 are the various mode SP and FP registers. Registers 8633 * r13 and r14 are only copied if we are in that mode, otherwise we copy 8634 * from the mode banked register. 8635 */ 8636 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8637 env->xregs[13] = env->regs[13]; 8638 env->xregs[14] = env->regs[14]; 8639 } else { 8640 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 8641 /* HYP is an exception in that it is copied from r14 */ 8642 if (mode == ARM_CPU_MODE_HYP) { 8643 env->xregs[14] = env->regs[14]; 8644 } else { 8645 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 8646 } 8647 } 8648 8649 if (mode == ARM_CPU_MODE_HYP) { 8650 env->xregs[15] = env->regs[13]; 8651 } else { 8652 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 8653 } 8654 8655 if (mode == ARM_CPU_MODE_IRQ) { 8656 env->xregs[16] = env->regs[14]; 8657 env->xregs[17] = env->regs[13]; 8658 } else { 8659 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 8660 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 8661 } 8662 8663 if (mode == ARM_CPU_MODE_SVC) { 8664 env->xregs[18] = env->regs[14]; 8665 env->xregs[19] = env->regs[13]; 8666 } else { 8667 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 8668 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 8669 } 8670 8671 if (mode == ARM_CPU_MODE_ABT) { 8672 env->xregs[20] = env->regs[14]; 8673 env->xregs[21] = env->regs[13]; 8674 } else { 8675 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 8676 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 8677 } 8678 8679 if (mode == ARM_CPU_MODE_UND) { 8680 env->xregs[22] = env->regs[14]; 8681 env->xregs[23] = env->regs[13]; 8682 } else { 8683 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 8684 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 8685 } 8686 8687 /* 8688 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 8689 * mode, then we can copy from r8-r14. Otherwise, we copy from the 8690 * FIQ bank for r8-r14. 8691 */ 8692 if (mode == ARM_CPU_MODE_FIQ) { 8693 for (i = 24; i < 31; i++) { 8694 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 8695 } 8696 } else { 8697 for (i = 24; i < 29; i++) { 8698 env->xregs[i] = env->fiq_regs[i - 24]; 8699 } 8700 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 8701 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 8702 } 8703 8704 env->pc = env->regs[15]; 8705 } 8706 8707 /* 8708 * Function used to synchronize QEMU's AArch32 register set with AArch64 8709 * register set. This is necessary when switching between AArch32 and AArch64 8710 * execution state. 8711 */ 8712 void aarch64_sync_64_to_32(CPUARMState *env) 8713 { 8714 int i; 8715 uint32_t mode = env->uncached_cpsr & CPSR_M; 8716 8717 /* We can blanket copy X[0:7] to R[0:7] */ 8718 for (i = 0; i < 8; i++) { 8719 env->regs[i] = env->xregs[i]; 8720 } 8721 8722 /* 8723 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 8724 * Otherwise, we copy x8-x12 into the banked user regs. 8725 */ 8726 if (mode == ARM_CPU_MODE_FIQ) { 8727 for (i = 8; i < 13; i++) { 8728 env->usr_regs[i - 8] = env->xregs[i]; 8729 } 8730 } else { 8731 for (i = 8; i < 13; i++) { 8732 env->regs[i] = env->xregs[i]; 8733 } 8734 } 8735 8736 /* 8737 * Registers r13 & r14 depend on the current mode. 8738 * If we are in a given mode, we copy the corresponding x registers to r13 8739 * and r14. Otherwise, we copy the x register to the banked r13 and r14 8740 * for the mode. 8741 */ 8742 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8743 env->regs[13] = env->xregs[13]; 8744 env->regs[14] = env->xregs[14]; 8745 } else { 8746 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 8747 8748 /* 8749 * HYP is an exception in that it does not have its own banked r14 but 8750 * shares the USR r14 8751 */ 8752 if (mode == ARM_CPU_MODE_HYP) { 8753 env->regs[14] = env->xregs[14]; 8754 } else { 8755 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 8756 } 8757 } 8758 8759 if (mode == ARM_CPU_MODE_HYP) { 8760 env->regs[13] = env->xregs[15]; 8761 } else { 8762 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 8763 } 8764 8765 if (mode == ARM_CPU_MODE_IRQ) { 8766 env->regs[14] = env->xregs[16]; 8767 env->regs[13] = env->xregs[17]; 8768 } else { 8769 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 8770 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 8771 } 8772 8773 if (mode == ARM_CPU_MODE_SVC) { 8774 env->regs[14] = env->xregs[18]; 8775 env->regs[13] = env->xregs[19]; 8776 } else { 8777 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 8778 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 8779 } 8780 8781 if (mode == ARM_CPU_MODE_ABT) { 8782 env->regs[14] = env->xregs[20]; 8783 env->regs[13] = env->xregs[21]; 8784 } else { 8785 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 8786 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 8787 } 8788 8789 if (mode == ARM_CPU_MODE_UND) { 8790 env->regs[14] = env->xregs[22]; 8791 env->regs[13] = env->xregs[23]; 8792 } else { 8793 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 8794 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 8795 } 8796 8797 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 8798 * mode, then we can copy to r8-r14. Otherwise, we copy to the 8799 * FIQ bank for r8-r14. 8800 */ 8801 if (mode == ARM_CPU_MODE_FIQ) { 8802 for (i = 24; i < 31; i++) { 8803 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 8804 } 8805 } else { 8806 for (i = 24; i < 29; i++) { 8807 env->fiq_regs[i - 24] = env->xregs[i]; 8808 } 8809 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 8810 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 8811 } 8812 8813 env->regs[15] = env->pc; 8814 } 8815 8816 static void take_aarch32_exception(CPUARMState *env, int new_mode, 8817 uint32_t mask, uint32_t offset, 8818 uint32_t newpc) 8819 { 8820 int new_el; 8821 8822 /* Change the CPU state so as to actually take the exception. */ 8823 switch_mode(env, new_mode); 8824 new_el = arm_current_el(env); 8825 8826 /* 8827 * For exceptions taken to AArch32 we must clear the SS bit in both 8828 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 8829 */ 8830 env->uncached_cpsr &= ~PSTATE_SS; 8831 env->spsr = cpsr_read(env); 8832 /* Clear IT bits. */ 8833 env->condexec_bits = 0; 8834 /* Switch to the new mode, and to the correct instruction set. */ 8835 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 8836 /* Set new mode endianness */ 8837 env->uncached_cpsr &= ~CPSR_E; 8838 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { 8839 env->uncached_cpsr |= CPSR_E; 8840 } 8841 /* J and IL must always be cleared for exception entry */ 8842 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 8843 env->daif |= mask; 8844 8845 if (new_mode == ARM_CPU_MODE_HYP) { 8846 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 8847 env->elr_el[2] = env->regs[15]; 8848 } else { 8849 /* CPSR.PAN is normally preserved preserved unless... */ 8850 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { 8851 switch (new_el) { 8852 case 3: 8853 if (!arm_is_secure_below_el3(env)) { 8854 /* ... the target is EL3, from non-secure state. */ 8855 env->uncached_cpsr &= ~CPSR_PAN; 8856 break; 8857 } 8858 /* ... the target is EL3, from secure state ... */ 8859 /* fall through */ 8860 case 1: 8861 /* ... the target is EL1 and SCTLR.SPAN is 0. */ 8862 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { 8863 env->uncached_cpsr |= CPSR_PAN; 8864 } 8865 break; 8866 } 8867 } 8868 /* 8869 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 8870 * and we should just guard the thumb mode on V4 8871 */ 8872 if (arm_feature(env, ARM_FEATURE_V4T)) { 8873 env->thumb = 8874 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 8875 } 8876 env->regs[14] = env->regs[15] + offset; 8877 } 8878 env->regs[15] = newpc; 8879 arm_rebuild_hflags(env); 8880 } 8881 8882 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 8883 { 8884 /* 8885 * Handle exception entry to Hyp mode; this is sufficiently 8886 * different to entry to other AArch32 modes that we handle it 8887 * separately here. 8888 * 8889 * The vector table entry used is always the 0x14 Hyp mode entry point, 8890 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. 8891 * The offset applied to the preferred return address is always zero 8892 * (see DDI0487C.a section G1.12.3). 8893 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 8894 */ 8895 uint32_t addr, mask; 8896 ARMCPU *cpu = ARM_CPU(cs); 8897 CPUARMState *env = &cpu->env; 8898 8899 switch (cs->exception_index) { 8900 case EXCP_UDEF: 8901 addr = 0x04; 8902 break; 8903 case EXCP_SWI: 8904 addr = 0x14; 8905 break; 8906 case EXCP_BKPT: 8907 /* Fall through to prefetch abort. */ 8908 case EXCP_PREFETCH_ABORT: 8909 env->cp15.ifar_s = env->exception.vaddress; 8910 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 8911 (uint32_t)env->exception.vaddress); 8912 addr = 0x0c; 8913 break; 8914 case EXCP_DATA_ABORT: 8915 env->cp15.dfar_s = env->exception.vaddress; 8916 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 8917 (uint32_t)env->exception.vaddress); 8918 addr = 0x10; 8919 break; 8920 case EXCP_IRQ: 8921 addr = 0x18; 8922 break; 8923 case EXCP_FIQ: 8924 addr = 0x1c; 8925 break; 8926 case EXCP_HVC: 8927 addr = 0x08; 8928 break; 8929 case EXCP_HYP_TRAP: 8930 addr = 0x14; 8931 break; 8932 default: 8933 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8934 } 8935 8936 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 8937 if (!arm_feature(env, ARM_FEATURE_V8)) { 8938 /* 8939 * QEMU syndrome values are v8-style. v7 has the IL bit 8940 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 8941 * If this is a v7 CPU, squash the IL bit in those cases. 8942 */ 8943 if (cs->exception_index == EXCP_PREFETCH_ABORT || 8944 (cs->exception_index == EXCP_DATA_ABORT && 8945 !(env->exception.syndrome & ARM_EL_ISV)) || 8946 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 8947 env->exception.syndrome &= ~ARM_EL_IL; 8948 } 8949 } 8950 env->cp15.esr_el[2] = env->exception.syndrome; 8951 } 8952 8953 if (arm_current_el(env) != 2 && addr < 0x14) { 8954 addr = 0x14; 8955 } 8956 8957 mask = 0; 8958 if (!(env->cp15.scr_el3 & SCR_EA)) { 8959 mask |= CPSR_A; 8960 } 8961 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 8962 mask |= CPSR_I; 8963 } 8964 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 8965 mask |= CPSR_F; 8966 } 8967 8968 addr += env->cp15.hvbar; 8969 8970 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 8971 } 8972 8973 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 8974 { 8975 ARMCPU *cpu = ARM_CPU(cs); 8976 CPUARMState *env = &cpu->env; 8977 uint32_t addr; 8978 uint32_t mask; 8979 int new_mode; 8980 uint32_t offset; 8981 uint32_t moe; 8982 8983 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 8984 switch (syn_get_ec(env->exception.syndrome)) { 8985 case EC_BREAKPOINT: 8986 case EC_BREAKPOINT_SAME_EL: 8987 moe = 1; 8988 break; 8989 case EC_WATCHPOINT: 8990 case EC_WATCHPOINT_SAME_EL: 8991 moe = 10; 8992 break; 8993 case EC_AA32_BKPT: 8994 moe = 3; 8995 break; 8996 case EC_VECTORCATCH: 8997 moe = 5; 8998 break; 8999 default: 9000 moe = 0; 9001 break; 9002 } 9003 9004 if (moe) { 9005 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 9006 } 9007 9008 if (env->exception.target_el == 2) { 9009 arm_cpu_do_interrupt_aarch32_hyp(cs); 9010 return; 9011 } 9012 9013 switch (cs->exception_index) { 9014 case EXCP_UDEF: 9015 new_mode = ARM_CPU_MODE_UND; 9016 addr = 0x04; 9017 mask = CPSR_I; 9018 if (env->thumb) 9019 offset = 2; 9020 else 9021 offset = 4; 9022 break; 9023 case EXCP_SWI: 9024 new_mode = ARM_CPU_MODE_SVC; 9025 addr = 0x08; 9026 mask = CPSR_I; 9027 /* The PC already points to the next instruction. */ 9028 offset = 0; 9029 break; 9030 case EXCP_BKPT: 9031 /* Fall through to prefetch abort. */ 9032 case EXCP_PREFETCH_ABORT: 9033 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 9034 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 9035 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 9036 env->exception.fsr, (uint32_t)env->exception.vaddress); 9037 new_mode = ARM_CPU_MODE_ABT; 9038 addr = 0x0c; 9039 mask = CPSR_A | CPSR_I; 9040 offset = 4; 9041 break; 9042 case EXCP_DATA_ABORT: 9043 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 9044 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 9045 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 9046 env->exception.fsr, 9047 (uint32_t)env->exception.vaddress); 9048 new_mode = ARM_CPU_MODE_ABT; 9049 addr = 0x10; 9050 mask = CPSR_A | CPSR_I; 9051 offset = 8; 9052 break; 9053 case EXCP_IRQ: 9054 new_mode = ARM_CPU_MODE_IRQ; 9055 addr = 0x18; 9056 /* Disable IRQ and imprecise data aborts. */ 9057 mask = CPSR_A | CPSR_I; 9058 offset = 4; 9059 if (env->cp15.scr_el3 & SCR_IRQ) { 9060 /* IRQ routed to monitor mode */ 9061 new_mode = ARM_CPU_MODE_MON; 9062 mask |= CPSR_F; 9063 } 9064 break; 9065 case EXCP_FIQ: 9066 new_mode = ARM_CPU_MODE_FIQ; 9067 addr = 0x1c; 9068 /* Disable FIQ, IRQ and imprecise data aborts. */ 9069 mask = CPSR_A | CPSR_I | CPSR_F; 9070 if (env->cp15.scr_el3 & SCR_FIQ) { 9071 /* FIQ routed to monitor mode */ 9072 new_mode = ARM_CPU_MODE_MON; 9073 } 9074 offset = 4; 9075 break; 9076 case EXCP_VIRQ: 9077 new_mode = ARM_CPU_MODE_IRQ; 9078 addr = 0x18; 9079 /* Disable IRQ and imprecise data aborts. */ 9080 mask = CPSR_A | CPSR_I; 9081 offset = 4; 9082 break; 9083 case EXCP_VFIQ: 9084 new_mode = ARM_CPU_MODE_FIQ; 9085 addr = 0x1c; 9086 /* Disable FIQ, IRQ and imprecise data aborts. */ 9087 mask = CPSR_A | CPSR_I | CPSR_F; 9088 offset = 4; 9089 break; 9090 case EXCP_SMC: 9091 new_mode = ARM_CPU_MODE_MON; 9092 addr = 0x08; 9093 mask = CPSR_A | CPSR_I | CPSR_F; 9094 offset = 0; 9095 break; 9096 default: 9097 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9098 return; /* Never happens. Keep compiler happy. */ 9099 } 9100 9101 if (new_mode == ARM_CPU_MODE_MON) { 9102 addr += env->cp15.mvbar; 9103 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 9104 /* High vectors. When enabled, base address cannot be remapped. */ 9105 addr += 0xffff0000; 9106 } else { 9107 /* ARM v7 architectures provide a vector base address register to remap 9108 * the interrupt vector table. 9109 * This register is only followed in non-monitor mode, and is banked. 9110 * Note: only bits 31:5 are valid. 9111 */ 9112 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 9113 } 9114 9115 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 9116 env->cp15.scr_el3 &= ~SCR_NS; 9117 } 9118 9119 take_aarch32_exception(env, new_mode, mask, offset, addr); 9120 } 9121 9122 /* Handle exception entry to a target EL which is using AArch64 */ 9123 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 9124 { 9125 ARMCPU *cpu = ARM_CPU(cs); 9126 CPUARMState *env = &cpu->env; 9127 unsigned int new_el = env->exception.target_el; 9128 target_ulong addr = env->cp15.vbar_el[new_el]; 9129 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 9130 unsigned int old_mode; 9131 unsigned int cur_el = arm_current_el(env); 9132 9133 /* 9134 * Note that new_el can never be 0. If cur_el is 0, then 9135 * el0_a64 is is_a64(), else el0_a64 is ignored. 9136 */ 9137 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 9138 9139 if (cur_el < new_el) { 9140 /* Entry vector offset depends on whether the implemented EL 9141 * immediately lower than the target level is using AArch32 or AArch64 9142 */ 9143 bool is_aa64; 9144 uint64_t hcr; 9145 9146 switch (new_el) { 9147 case 3: 9148 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 9149 break; 9150 case 2: 9151 hcr = arm_hcr_el2_eff(env); 9152 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 9153 is_aa64 = (hcr & HCR_RW) != 0; 9154 break; 9155 } 9156 /* fall through */ 9157 case 1: 9158 is_aa64 = is_a64(env); 9159 break; 9160 default: 9161 g_assert_not_reached(); 9162 } 9163 9164 if (is_aa64) { 9165 addr += 0x400; 9166 } else { 9167 addr += 0x600; 9168 } 9169 } else if (pstate_read(env) & PSTATE_SP) { 9170 addr += 0x200; 9171 } 9172 9173 switch (cs->exception_index) { 9174 case EXCP_PREFETCH_ABORT: 9175 case EXCP_DATA_ABORT: 9176 env->cp15.far_el[new_el] = env->exception.vaddress; 9177 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 9178 env->cp15.far_el[new_el]); 9179 /* fall through */ 9180 case EXCP_BKPT: 9181 case EXCP_UDEF: 9182 case EXCP_SWI: 9183 case EXCP_HVC: 9184 case EXCP_HYP_TRAP: 9185 case EXCP_SMC: 9186 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { 9187 /* 9188 * QEMU internal FP/SIMD syndromes from AArch32 include the 9189 * TA and coproc fields which are only exposed if the exception 9190 * is taken to AArch32 Hyp mode. Mask them out to get a valid 9191 * AArch64 format syndrome. 9192 */ 9193 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 9194 } 9195 env->cp15.esr_el[new_el] = env->exception.syndrome; 9196 break; 9197 case EXCP_IRQ: 9198 case EXCP_VIRQ: 9199 addr += 0x80; 9200 break; 9201 case EXCP_FIQ: 9202 case EXCP_VFIQ: 9203 addr += 0x100; 9204 break; 9205 default: 9206 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9207 } 9208 9209 if (is_a64(env)) { 9210 old_mode = pstate_read(env); 9211 aarch64_save_sp(env, arm_current_el(env)); 9212 env->elr_el[new_el] = env->pc; 9213 } else { 9214 old_mode = cpsr_read(env); 9215 env->elr_el[new_el] = env->regs[15]; 9216 9217 aarch64_sync_32_to_64(env); 9218 9219 env->condexec_bits = 0; 9220 } 9221 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; 9222 9223 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 9224 env->elr_el[new_el]); 9225 9226 if (cpu_isar_feature(aa64_pan, cpu)) { 9227 /* The value of PSTATE.PAN is normally preserved, except when ... */ 9228 new_mode |= old_mode & PSTATE_PAN; 9229 switch (new_el) { 9230 case 2: 9231 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ 9232 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) 9233 != (HCR_E2H | HCR_TGE)) { 9234 break; 9235 } 9236 /* fall through */ 9237 case 1: 9238 /* ... the target is EL1 ... */ 9239 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ 9240 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { 9241 new_mode |= PSTATE_PAN; 9242 } 9243 break; 9244 } 9245 } 9246 9247 pstate_write(env, PSTATE_DAIF | new_mode); 9248 env->aarch64 = 1; 9249 aarch64_restore_sp(env, new_el); 9250 helper_rebuild_hflags_a64(env, new_el); 9251 9252 env->pc = addr; 9253 9254 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 9255 new_el, env->pc, pstate_read(env)); 9256 } 9257 9258 /* 9259 * Do semihosting call and set the appropriate return value. All the 9260 * permission and validity checks have been done at translate time. 9261 * 9262 * We only see semihosting exceptions in TCG only as they are not 9263 * trapped to the hypervisor in KVM. 9264 */ 9265 #ifdef CONFIG_TCG 9266 static void handle_semihosting(CPUState *cs) 9267 { 9268 ARMCPU *cpu = ARM_CPU(cs); 9269 CPUARMState *env = &cpu->env; 9270 9271 if (is_a64(env)) { 9272 qemu_log_mask(CPU_LOG_INT, 9273 "...handling as semihosting call 0x%" PRIx64 "\n", 9274 env->xregs[0]); 9275 env->xregs[0] = do_arm_semihosting(env); 9276 env->pc += 4; 9277 } else { 9278 qemu_log_mask(CPU_LOG_INT, 9279 "...handling as semihosting call 0x%x\n", 9280 env->regs[0]); 9281 env->regs[0] = do_arm_semihosting(env); 9282 env->regs[15] += env->thumb ? 2 : 4; 9283 } 9284 } 9285 #endif 9286 9287 /* Handle a CPU exception for A and R profile CPUs. 9288 * Do any appropriate logging, handle PSCI calls, and then hand off 9289 * to the AArch64-entry or AArch32-entry function depending on the 9290 * target exception level's register width. 9291 */ 9292 void arm_cpu_do_interrupt(CPUState *cs) 9293 { 9294 ARMCPU *cpu = ARM_CPU(cs); 9295 CPUARMState *env = &cpu->env; 9296 unsigned int new_el = env->exception.target_el; 9297 9298 assert(!arm_feature(env, ARM_FEATURE_M)); 9299 9300 arm_log_exception(cs->exception_index); 9301 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 9302 new_el); 9303 if (qemu_loglevel_mask(CPU_LOG_INT) 9304 && !excp_is_internal(cs->exception_index)) { 9305 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 9306 syn_get_ec(env->exception.syndrome), 9307 env->exception.syndrome); 9308 } 9309 9310 if (arm_is_psci_call(cpu, cs->exception_index)) { 9311 arm_handle_psci_call(cpu); 9312 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 9313 return; 9314 } 9315 9316 /* 9317 * Semihosting semantics depend on the register width of the code 9318 * that caused the exception, not the target exception level, so 9319 * must be handled here. 9320 */ 9321 #ifdef CONFIG_TCG 9322 if (cs->exception_index == EXCP_SEMIHOST) { 9323 handle_semihosting(cs); 9324 return; 9325 } 9326 #endif 9327 9328 /* Hooks may change global state so BQL should be held, also the 9329 * BQL needs to be held for any modification of 9330 * cs->interrupt_request. 9331 */ 9332 g_assert(qemu_mutex_iothread_locked()); 9333 9334 arm_call_pre_el_change_hook(cpu); 9335 9336 assert(!excp_is_internal(cs->exception_index)); 9337 if (arm_el_is_aa64(env, new_el)) { 9338 arm_cpu_do_interrupt_aarch64(cs); 9339 } else { 9340 arm_cpu_do_interrupt_aarch32(cs); 9341 } 9342 9343 arm_call_el_change_hook(cpu); 9344 9345 if (!kvm_enabled()) { 9346 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 9347 } 9348 } 9349 #endif /* !CONFIG_USER_ONLY */ 9350 9351 /* Return the exception level which controls this address translation regime */ 9352 static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 9353 { 9354 switch (mmu_idx) { 9355 case ARMMMUIdx_E20_0: 9356 case ARMMMUIdx_E20_2: 9357 case ARMMMUIdx_E20_2_PAN: 9358 case ARMMMUIdx_Stage2: 9359 case ARMMMUIdx_E2: 9360 return 2; 9361 case ARMMMUIdx_SE3: 9362 return 3; 9363 case ARMMMUIdx_SE10_0: 9364 return arm_el_is_aa64(env, 3) ? 1 : 3; 9365 case ARMMMUIdx_SE10_1: 9366 case ARMMMUIdx_SE10_1_PAN: 9367 case ARMMMUIdx_Stage1_E0: 9368 case ARMMMUIdx_Stage1_E1: 9369 case ARMMMUIdx_Stage1_E1_PAN: 9370 case ARMMMUIdx_E10_0: 9371 case ARMMMUIdx_E10_1: 9372 case ARMMMUIdx_E10_1_PAN: 9373 case ARMMMUIdx_MPrivNegPri: 9374 case ARMMMUIdx_MUserNegPri: 9375 case ARMMMUIdx_MPriv: 9376 case ARMMMUIdx_MUser: 9377 case ARMMMUIdx_MSPrivNegPri: 9378 case ARMMMUIdx_MSUserNegPri: 9379 case ARMMMUIdx_MSPriv: 9380 case ARMMMUIdx_MSUser: 9381 return 1; 9382 default: 9383 g_assert_not_reached(); 9384 } 9385 } 9386 9387 uint64_t arm_sctlr(CPUARMState *env, int el) 9388 { 9389 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ 9390 if (el == 0) { 9391 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); 9392 el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1); 9393 } 9394 return env->cp15.sctlr_el[el]; 9395 } 9396 9397 /* Return the SCTLR value which controls this address translation regime */ 9398 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 9399 { 9400 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 9401 } 9402 9403 #ifndef CONFIG_USER_ONLY 9404 9405 /* Return true if the specified stage of address translation is disabled */ 9406 static inline bool regime_translation_disabled(CPUARMState *env, 9407 ARMMMUIdx mmu_idx) 9408 { 9409 if (arm_feature(env, ARM_FEATURE_M)) { 9410 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 9411 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 9412 case R_V7M_MPU_CTRL_ENABLE_MASK: 9413 /* Enabled, but not for HardFault and NMI */ 9414 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 9415 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 9416 /* Enabled for all cases */ 9417 return false; 9418 case 0: 9419 default: 9420 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 9421 * we warned about that in armv7m_nvic.c when the guest set it. 9422 */ 9423 return true; 9424 } 9425 } 9426 9427 if (mmu_idx == ARMMMUIdx_Stage2) { 9428 /* HCR.DC means HCR.VM behaves as 1 */ 9429 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; 9430 } 9431 9432 if (env->cp15.hcr_el2 & HCR_TGE) { 9433 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 9434 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 9435 return true; 9436 } 9437 } 9438 9439 if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 9440 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 9441 return true; 9442 } 9443 9444 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 9445 } 9446 9447 static inline bool regime_translation_big_endian(CPUARMState *env, 9448 ARMMMUIdx mmu_idx) 9449 { 9450 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 9451 } 9452 9453 /* Return the TTBR associated with this translation regime */ 9454 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 9455 int ttbrn) 9456 { 9457 if (mmu_idx == ARMMMUIdx_Stage2) { 9458 return env->cp15.vttbr_el2; 9459 } 9460 if (ttbrn == 0) { 9461 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 9462 } else { 9463 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 9464 } 9465 } 9466 9467 #endif /* !CONFIG_USER_ONLY */ 9468 9469 /* Return the TCR controlling this translation regime */ 9470 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 9471 { 9472 if (mmu_idx == ARMMMUIdx_Stage2) { 9473 return &env->cp15.vtcr_el2; 9474 } 9475 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 9476 } 9477 9478 /* Convert a possible stage1+2 MMU index into the appropriate 9479 * stage 1 MMU index 9480 */ 9481 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 9482 { 9483 switch (mmu_idx) { 9484 case ARMMMUIdx_E10_0: 9485 return ARMMMUIdx_Stage1_E0; 9486 case ARMMMUIdx_E10_1: 9487 return ARMMMUIdx_Stage1_E1; 9488 case ARMMMUIdx_E10_1_PAN: 9489 return ARMMMUIdx_Stage1_E1_PAN; 9490 default: 9491 return mmu_idx; 9492 } 9493 } 9494 9495 /* Return true if the translation regime is using LPAE format page tables */ 9496 static inline bool regime_using_lpae_format(CPUARMState *env, 9497 ARMMMUIdx mmu_idx) 9498 { 9499 int el = regime_el(env, mmu_idx); 9500 if (el == 2 || arm_el_is_aa64(env, el)) { 9501 return true; 9502 } 9503 if (arm_feature(env, ARM_FEATURE_LPAE) 9504 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 9505 return true; 9506 } 9507 return false; 9508 } 9509 9510 /* Returns true if the stage 1 translation regime is using LPAE format page 9511 * tables. Used when raising alignment exceptions, whose FSR changes depending 9512 * on whether the long or short descriptor format is in use. */ 9513 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 9514 { 9515 mmu_idx = stage_1_mmu_idx(mmu_idx); 9516 9517 return regime_using_lpae_format(env, mmu_idx); 9518 } 9519 9520 #ifndef CONFIG_USER_ONLY 9521 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 9522 { 9523 switch (mmu_idx) { 9524 case ARMMMUIdx_SE10_0: 9525 case ARMMMUIdx_E20_0: 9526 case ARMMMUIdx_Stage1_E0: 9527 case ARMMMUIdx_MUser: 9528 case ARMMMUIdx_MSUser: 9529 case ARMMMUIdx_MUserNegPri: 9530 case ARMMMUIdx_MSUserNegPri: 9531 return true; 9532 default: 9533 return false; 9534 case ARMMMUIdx_E10_0: 9535 case ARMMMUIdx_E10_1: 9536 case ARMMMUIdx_E10_1_PAN: 9537 g_assert_not_reached(); 9538 } 9539 } 9540 9541 /* Translate section/page access permissions to page 9542 * R/W protection flags 9543 * 9544 * @env: CPUARMState 9545 * @mmu_idx: MMU index indicating required translation regime 9546 * @ap: The 3-bit access permissions (AP[2:0]) 9547 * @domain_prot: The 2-bit domain access permissions 9548 */ 9549 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 9550 int ap, int domain_prot) 9551 { 9552 bool is_user = regime_is_user(env, mmu_idx); 9553 9554 if (domain_prot == 3) { 9555 return PAGE_READ | PAGE_WRITE; 9556 } 9557 9558 switch (ap) { 9559 case 0: 9560 if (arm_feature(env, ARM_FEATURE_V7)) { 9561 return 0; 9562 } 9563 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 9564 case SCTLR_S: 9565 return is_user ? 0 : PAGE_READ; 9566 case SCTLR_R: 9567 return PAGE_READ; 9568 default: 9569 return 0; 9570 } 9571 case 1: 9572 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9573 case 2: 9574 if (is_user) { 9575 return PAGE_READ; 9576 } else { 9577 return PAGE_READ | PAGE_WRITE; 9578 } 9579 case 3: 9580 return PAGE_READ | PAGE_WRITE; 9581 case 4: /* Reserved. */ 9582 return 0; 9583 case 5: 9584 return is_user ? 0 : PAGE_READ; 9585 case 6: 9586 return PAGE_READ; 9587 case 7: 9588 if (!arm_feature(env, ARM_FEATURE_V6K)) { 9589 return 0; 9590 } 9591 return PAGE_READ; 9592 default: 9593 g_assert_not_reached(); 9594 } 9595 } 9596 9597 /* Translate section/page access permissions to page 9598 * R/W protection flags. 9599 * 9600 * @ap: The 2-bit simple AP (AP[2:1]) 9601 * @is_user: TRUE if accessing from PL0 9602 */ 9603 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 9604 { 9605 switch (ap) { 9606 case 0: 9607 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9608 case 1: 9609 return PAGE_READ | PAGE_WRITE; 9610 case 2: 9611 return is_user ? 0 : PAGE_READ; 9612 case 3: 9613 return PAGE_READ; 9614 default: 9615 g_assert_not_reached(); 9616 } 9617 } 9618 9619 static inline int 9620 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 9621 { 9622 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 9623 } 9624 9625 /* Translate S2 section/page access permissions to protection flags 9626 * 9627 * @env: CPUARMState 9628 * @s2ap: The 2-bit stage2 access permissions (S2AP) 9629 * @xn: XN (execute-never) bit 9630 */ 9631 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 9632 { 9633 int prot = 0; 9634 9635 if (s2ap & 1) { 9636 prot |= PAGE_READ; 9637 } 9638 if (s2ap & 2) { 9639 prot |= PAGE_WRITE; 9640 } 9641 if (!xn) { 9642 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 9643 prot |= PAGE_EXEC; 9644 } 9645 } 9646 return prot; 9647 } 9648 9649 /* Translate section/page access permissions to protection flags 9650 * 9651 * @env: CPUARMState 9652 * @mmu_idx: MMU index indicating required translation regime 9653 * @is_aa64: TRUE if AArch64 9654 * @ap: The 2-bit simple AP (AP[2:1]) 9655 * @ns: NS (non-secure) bit 9656 * @xn: XN (execute-never) bit 9657 * @pxn: PXN (privileged execute-never) bit 9658 */ 9659 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 9660 int ap, int ns, int xn, int pxn) 9661 { 9662 bool is_user = regime_is_user(env, mmu_idx); 9663 int prot_rw, user_rw; 9664 bool have_wxn; 9665 int wxn = 0; 9666 9667 assert(mmu_idx != ARMMMUIdx_Stage2); 9668 9669 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 9670 if (is_user) { 9671 prot_rw = user_rw; 9672 } else { 9673 if (user_rw && regime_is_pan(env, mmu_idx)) { 9674 return 0; 9675 } 9676 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 9677 } 9678 9679 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 9680 return prot_rw; 9681 } 9682 9683 /* TODO have_wxn should be replaced with 9684 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 9685 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 9686 * compatible processors have EL2, which is required for [U]WXN. 9687 */ 9688 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 9689 9690 if (have_wxn) { 9691 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 9692 } 9693 9694 if (is_aa64) { 9695 if (regime_has_2_ranges(mmu_idx) && !is_user) { 9696 xn = pxn || (user_rw & PAGE_WRITE); 9697 } 9698 } else if (arm_feature(env, ARM_FEATURE_V7)) { 9699 switch (regime_el(env, mmu_idx)) { 9700 case 1: 9701 case 3: 9702 if (is_user) { 9703 xn = xn || !(user_rw & PAGE_READ); 9704 } else { 9705 int uwxn = 0; 9706 if (have_wxn) { 9707 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 9708 } 9709 xn = xn || !(prot_rw & PAGE_READ) || pxn || 9710 (uwxn && (user_rw & PAGE_WRITE)); 9711 } 9712 break; 9713 case 2: 9714 break; 9715 } 9716 } else { 9717 xn = wxn = 0; 9718 } 9719 9720 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 9721 return prot_rw; 9722 } 9723 return prot_rw | PAGE_EXEC; 9724 } 9725 9726 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 9727 uint32_t *table, uint32_t address) 9728 { 9729 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 9730 TCR *tcr = regime_tcr(env, mmu_idx); 9731 9732 if (address & tcr->mask) { 9733 if (tcr->raw_tcr & TTBCR_PD1) { 9734 /* Translation table walk disabled for TTBR1 */ 9735 return false; 9736 } 9737 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 9738 } else { 9739 if (tcr->raw_tcr & TTBCR_PD0) { 9740 /* Translation table walk disabled for TTBR0 */ 9741 return false; 9742 } 9743 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 9744 } 9745 *table |= (address >> 18) & 0x3ffc; 9746 return true; 9747 } 9748 9749 /* Translate a S1 pagetable walk through S2 if needed. */ 9750 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 9751 hwaddr addr, MemTxAttrs txattrs, 9752 ARMMMUFaultInfo *fi) 9753 { 9754 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && 9755 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 9756 target_ulong s2size; 9757 hwaddr s2pa; 9758 int s2prot; 9759 int ret; 9760 ARMCacheAttrs cacheattrs = {}; 9761 ARMCacheAttrs *pcacheattrs = NULL; 9762 9763 if (env->cp15.hcr_el2 & HCR_PTW) { 9764 /* 9765 * PTW means we must fault if this S1 walk touches S2 Device 9766 * memory; otherwise we don't care about the attributes and can 9767 * save the S2 translation the effort of computing them. 9768 */ 9769 pcacheattrs = &cacheattrs; 9770 } 9771 9772 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa, 9773 &txattrs, &s2prot, &s2size, fi, pcacheattrs); 9774 if (ret) { 9775 assert(fi->type != ARMFault_None); 9776 fi->s2addr = addr; 9777 fi->stage2 = true; 9778 fi->s1ptw = true; 9779 return ~0; 9780 } 9781 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { 9782 /* Access was to Device memory: generate Permission fault */ 9783 fi->type = ARMFault_Permission; 9784 fi->s2addr = addr; 9785 fi->stage2 = true; 9786 fi->s1ptw = true; 9787 return ~0; 9788 } 9789 addr = s2pa; 9790 } 9791 return addr; 9792 } 9793 9794 /* All loads done in the course of a page table walk go through here. */ 9795 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 9796 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 9797 { 9798 ARMCPU *cpu = ARM_CPU(cs); 9799 CPUARMState *env = &cpu->env; 9800 MemTxAttrs attrs = {}; 9801 MemTxResult result = MEMTX_OK; 9802 AddressSpace *as; 9803 uint32_t data; 9804 9805 attrs.secure = is_secure; 9806 as = arm_addressspace(cs, attrs); 9807 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 9808 if (fi->s1ptw) { 9809 return 0; 9810 } 9811 if (regime_translation_big_endian(env, mmu_idx)) { 9812 data = address_space_ldl_be(as, addr, attrs, &result); 9813 } else { 9814 data = address_space_ldl_le(as, addr, attrs, &result); 9815 } 9816 if (result == MEMTX_OK) { 9817 return data; 9818 } 9819 fi->type = ARMFault_SyncExternalOnWalk; 9820 fi->ea = arm_extabort_type(result); 9821 return 0; 9822 } 9823 9824 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 9825 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 9826 { 9827 ARMCPU *cpu = ARM_CPU(cs); 9828 CPUARMState *env = &cpu->env; 9829 MemTxAttrs attrs = {}; 9830 MemTxResult result = MEMTX_OK; 9831 AddressSpace *as; 9832 uint64_t data; 9833 9834 attrs.secure = is_secure; 9835 as = arm_addressspace(cs, attrs); 9836 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 9837 if (fi->s1ptw) { 9838 return 0; 9839 } 9840 if (regime_translation_big_endian(env, mmu_idx)) { 9841 data = address_space_ldq_be(as, addr, attrs, &result); 9842 } else { 9843 data = address_space_ldq_le(as, addr, attrs, &result); 9844 } 9845 if (result == MEMTX_OK) { 9846 return data; 9847 } 9848 fi->type = ARMFault_SyncExternalOnWalk; 9849 fi->ea = arm_extabort_type(result); 9850 return 0; 9851 } 9852 9853 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 9854 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9855 hwaddr *phys_ptr, int *prot, 9856 target_ulong *page_size, 9857 ARMMMUFaultInfo *fi) 9858 { 9859 CPUState *cs = env_cpu(env); 9860 int level = 1; 9861 uint32_t table; 9862 uint32_t desc; 9863 int type; 9864 int ap; 9865 int domain = 0; 9866 int domain_prot; 9867 hwaddr phys_addr; 9868 uint32_t dacr; 9869 9870 /* Pagetable walk. */ 9871 /* Lookup l1 descriptor. */ 9872 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 9873 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 9874 fi->type = ARMFault_Translation; 9875 goto do_fault; 9876 } 9877 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9878 mmu_idx, fi); 9879 if (fi->type != ARMFault_None) { 9880 goto do_fault; 9881 } 9882 type = (desc & 3); 9883 domain = (desc >> 5) & 0x0f; 9884 if (regime_el(env, mmu_idx) == 1) { 9885 dacr = env->cp15.dacr_ns; 9886 } else { 9887 dacr = env->cp15.dacr_s; 9888 } 9889 domain_prot = (dacr >> (domain * 2)) & 3; 9890 if (type == 0) { 9891 /* Section translation fault. */ 9892 fi->type = ARMFault_Translation; 9893 goto do_fault; 9894 } 9895 if (type != 2) { 9896 level = 2; 9897 } 9898 if (domain_prot == 0 || domain_prot == 2) { 9899 fi->type = ARMFault_Domain; 9900 goto do_fault; 9901 } 9902 if (type == 2) { 9903 /* 1Mb section. */ 9904 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 9905 ap = (desc >> 10) & 3; 9906 *page_size = 1024 * 1024; 9907 } else { 9908 /* Lookup l2 entry. */ 9909 if (type == 1) { 9910 /* Coarse pagetable. */ 9911 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 9912 } else { 9913 /* Fine pagetable. */ 9914 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 9915 } 9916 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 9917 mmu_idx, fi); 9918 if (fi->type != ARMFault_None) { 9919 goto do_fault; 9920 } 9921 switch (desc & 3) { 9922 case 0: /* Page translation fault. */ 9923 fi->type = ARMFault_Translation; 9924 goto do_fault; 9925 case 1: /* 64k page. */ 9926 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 9927 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 9928 *page_size = 0x10000; 9929 break; 9930 case 2: /* 4k page. */ 9931 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9932 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 9933 *page_size = 0x1000; 9934 break; 9935 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 9936 if (type == 1) { 9937 /* ARMv6/XScale extended small page format */ 9938 if (arm_feature(env, ARM_FEATURE_XSCALE) 9939 || arm_feature(env, ARM_FEATURE_V6)) { 9940 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 9941 *page_size = 0x1000; 9942 } else { 9943 /* UNPREDICTABLE in ARMv5; we choose to take a 9944 * page translation fault. 9945 */ 9946 fi->type = ARMFault_Translation; 9947 goto do_fault; 9948 } 9949 } else { 9950 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 9951 *page_size = 0x400; 9952 } 9953 ap = (desc >> 4) & 3; 9954 break; 9955 default: 9956 /* Never happens, but compiler isn't smart enough to tell. */ 9957 abort(); 9958 } 9959 } 9960 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 9961 *prot |= *prot ? PAGE_EXEC : 0; 9962 if (!(*prot & (1 << access_type))) { 9963 /* Access permission fault. */ 9964 fi->type = ARMFault_Permission; 9965 goto do_fault; 9966 } 9967 *phys_ptr = phys_addr; 9968 return false; 9969 do_fault: 9970 fi->domain = domain; 9971 fi->level = level; 9972 return true; 9973 } 9974 9975 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 9976 MMUAccessType access_type, ARMMMUIdx mmu_idx, 9977 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 9978 target_ulong *page_size, ARMMMUFaultInfo *fi) 9979 { 9980 CPUState *cs = env_cpu(env); 9981 int level = 1; 9982 uint32_t table; 9983 uint32_t desc; 9984 uint32_t xn; 9985 uint32_t pxn = 0; 9986 int type; 9987 int ap; 9988 int domain = 0; 9989 int domain_prot; 9990 hwaddr phys_addr; 9991 uint32_t dacr; 9992 bool ns; 9993 9994 /* Pagetable walk. */ 9995 /* Lookup l1 descriptor. */ 9996 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 9997 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 9998 fi->type = ARMFault_Translation; 9999 goto do_fault; 10000 } 10001 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10002 mmu_idx, fi); 10003 if (fi->type != ARMFault_None) { 10004 goto do_fault; 10005 } 10006 type = (desc & 3); 10007 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 10008 /* Section translation fault, or attempt to use the encoding 10009 * which is Reserved on implementations without PXN. 10010 */ 10011 fi->type = ARMFault_Translation; 10012 goto do_fault; 10013 } 10014 if ((type == 1) || !(desc & (1 << 18))) { 10015 /* Page or Section. */ 10016 domain = (desc >> 5) & 0x0f; 10017 } 10018 if (regime_el(env, mmu_idx) == 1) { 10019 dacr = env->cp15.dacr_ns; 10020 } else { 10021 dacr = env->cp15.dacr_s; 10022 } 10023 if (type == 1) { 10024 level = 2; 10025 } 10026 domain_prot = (dacr >> (domain * 2)) & 3; 10027 if (domain_prot == 0 || domain_prot == 2) { 10028 /* Section or Page domain fault */ 10029 fi->type = ARMFault_Domain; 10030 goto do_fault; 10031 } 10032 if (type != 1) { 10033 if (desc & (1 << 18)) { 10034 /* Supersection. */ 10035 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 10036 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 10037 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 10038 *page_size = 0x1000000; 10039 } else { 10040 /* Section. */ 10041 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10042 *page_size = 0x100000; 10043 } 10044 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 10045 xn = desc & (1 << 4); 10046 pxn = desc & 1; 10047 ns = extract32(desc, 19, 1); 10048 } else { 10049 if (arm_feature(env, ARM_FEATURE_PXN)) { 10050 pxn = (desc >> 2) & 1; 10051 } 10052 ns = extract32(desc, 3, 1); 10053 /* Lookup l2 entry. */ 10054 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10055 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10056 mmu_idx, fi); 10057 if (fi->type != ARMFault_None) { 10058 goto do_fault; 10059 } 10060 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 10061 switch (desc & 3) { 10062 case 0: /* Page translation fault. */ 10063 fi->type = ARMFault_Translation; 10064 goto do_fault; 10065 case 1: /* 64k page. */ 10066 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10067 xn = desc & (1 << 15); 10068 *page_size = 0x10000; 10069 break; 10070 case 2: case 3: /* 4k page. */ 10071 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10072 xn = desc & 1; 10073 *page_size = 0x1000; 10074 break; 10075 default: 10076 /* Never happens, but compiler isn't smart enough to tell. */ 10077 abort(); 10078 } 10079 } 10080 if (domain_prot == 3) { 10081 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10082 } else { 10083 if (pxn && !regime_is_user(env, mmu_idx)) { 10084 xn = 1; 10085 } 10086 if (xn && access_type == MMU_INST_FETCH) { 10087 fi->type = ARMFault_Permission; 10088 goto do_fault; 10089 } 10090 10091 if (arm_feature(env, ARM_FEATURE_V6K) && 10092 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 10093 /* The simplified model uses AP[0] as an access control bit. */ 10094 if ((ap & 1) == 0) { 10095 /* Access flag fault. */ 10096 fi->type = ARMFault_AccessFlag; 10097 goto do_fault; 10098 } 10099 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 10100 } else { 10101 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10102 } 10103 if (*prot && !xn) { 10104 *prot |= PAGE_EXEC; 10105 } 10106 if (!(*prot & (1 << access_type))) { 10107 /* Access permission fault. */ 10108 fi->type = ARMFault_Permission; 10109 goto do_fault; 10110 } 10111 } 10112 if (ns) { 10113 /* The NS bit will (as required by the architecture) have no effect if 10114 * the CPU doesn't support TZ or this is a non-secure translation 10115 * regime, because the attribute will already be non-secure. 10116 */ 10117 attrs->secure = false; 10118 } 10119 *phys_ptr = phys_addr; 10120 return false; 10121 do_fault: 10122 fi->domain = domain; 10123 fi->level = level; 10124 return true; 10125 } 10126 10127 /* 10128 * check_s2_mmu_setup 10129 * @cpu: ARMCPU 10130 * @is_aa64: True if the translation regime is in AArch64 state 10131 * @startlevel: Suggested starting level 10132 * @inputsize: Bitsize of IPAs 10133 * @stride: Page-table stride (See the ARM ARM) 10134 * 10135 * Returns true if the suggested S2 translation parameters are OK and 10136 * false otherwise. 10137 */ 10138 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 10139 int inputsize, int stride) 10140 { 10141 const int grainsize = stride + 3; 10142 int startsizecheck; 10143 10144 /* Negative levels are never allowed. */ 10145 if (level < 0) { 10146 return false; 10147 } 10148 10149 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 10150 if (startsizecheck < 1 || startsizecheck > stride + 4) { 10151 return false; 10152 } 10153 10154 if (is_aa64) { 10155 CPUARMState *env = &cpu->env; 10156 unsigned int pamax = arm_pamax(cpu); 10157 10158 switch (stride) { 10159 case 13: /* 64KB Pages. */ 10160 if (level == 0 || (level == 1 && pamax <= 42)) { 10161 return false; 10162 } 10163 break; 10164 case 11: /* 16KB Pages. */ 10165 if (level == 0 || (level == 1 && pamax <= 40)) { 10166 return false; 10167 } 10168 break; 10169 case 9: /* 4KB Pages. */ 10170 if (level == 0 && pamax <= 42) { 10171 return false; 10172 } 10173 break; 10174 default: 10175 g_assert_not_reached(); 10176 } 10177 10178 /* Inputsize checks. */ 10179 if (inputsize > pamax && 10180 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 10181 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 10182 return false; 10183 } 10184 } else { 10185 /* AArch32 only supports 4KB pages. Assert on that. */ 10186 assert(stride == 9); 10187 10188 if (level == 0) { 10189 return false; 10190 } 10191 } 10192 return true; 10193 } 10194 10195 /* Translate from the 4-bit stage 2 representation of 10196 * memory attributes (without cache-allocation hints) to 10197 * the 8-bit representation of the stage 1 MAIR registers 10198 * (which includes allocation hints). 10199 * 10200 * ref: shared/translation/attrs/S2AttrDecode() 10201 * .../S2ConvertAttrsHints() 10202 */ 10203 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 10204 { 10205 uint8_t hiattr = extract32(s2attrs, 2, 2); 10206 uint8_t loattr = extract32(s2attrs, 0, 2); 10207 uint8_t hihint = 0, lohint = 0; 10208 10209 if (hiattr != 0) { /* normal memory */ 10210 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 10211 hiattr = loattr = 1; /* non-cacheable */ 10212 } else { 10213 if (hiattr != 1) { /* Write-through or write-back */ 10214 hihint = 3; /* RW allocate */ 10215 } 10216 if (loattr != 1) { /* Write-through or write-back */ 10217 lohint = 3; /* RW allocate */ 10218 } 10219 } 10220 } 10221 10222 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 10223 } 10224 #endif /* !CONFIG_USER_ONLY */ 10225 10226 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) 10227 { 10228 if (regime_has_2_ranges(mmu_idx)) { 10229 return extract64(tcr, 37, 2); 10230 } else if (mmu_idx == ARMMMUIdx_Stage2) { 10231 return 0; /* VTCR_EL2 */ 10232 } else { 10233 return extract32(tcr, 20, 1); 10234 } 10235 } 10236 10237 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) 10238 { 10239 if (regime_has_2_ranges(mmu_idx)) { 10240 return extract64(tcr, 51, 2); 10241 } else if (mmu_idx == ARMMMUIdx_Stage2) { 10242 return 0; /* VTCR_EL2 */ 10243 } else { 10244 return extract32(tcr, 29, 1); 10245 } 10246 } 10247 10248 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 10249 ARMMMUIdx mmu_idx, bool data) 10250 { 10251 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10252 bool epd, hpd, using16k, using64k; 10253 int select, tsz, tbi; 10254 10255 if (!regime_has_2_ranges(mmu_idx)) { 10256 select = 0; 10257 tsz = extract32(tcr, 0, 6); 10258 using64k = extract32(tcr, 14, 1); 10259 using16k = extract32(tcr, 15, 1); 10260 if (mmu_idx == ARMMMUIdx_Stage2) { 10261 /* VTCR_EL2 */ 10262 hpd = false; 10263 } else { 10264 hpd = extract32(tcr, 24, 1); 10265 } 10266 epd = false; 10267 } else { 10268 /* 10269 * Bit 55 is always between the two regions, and is canonical for 10270 * determining if address tagging is enabled. 10271 */ 10272 select = extract64(va, 55, 1); 10273 if (!select) { 10274 tsz = extract32(tcr, 0, 6); 10275 epd = extract32(tcr, 7, 1); 10276 using64k = extract32(tcr, 14, 1); 10277 using16k = extract32(tcr, 15, 1); 10278 hpd = extract64(tcr, 41, 1); 10279 } else { 10280 int tg = extract32(tcr, 30, 2); 10281 using16k = tg == 1; 10282 using64k = tg == 3; 10283 tsz = extract32(tcr, 16, 6); 10284 epd = extract32(tcr, 23, 1); 10285 hpd = extract64(tcr, 42, 1); 10286 } 10287 } 10288 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ 10289 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ 10290 10291 /* Present TBI as a composite with TBID. */ 10292 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 10293 if (!data) { 10294 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 10295 } 10296 tbi = (tbi >> select) & 1; 10297 10298 return (ARMVAParameters) { 10299 .tsz = tsz, 10300 .select = select, 10301 .tbi = tbi, 10302 .epd = epd, 10303 .hpd = hpd, 10304 .using16k = using16k, 10305 .using64k = using64k, 10306 }; 10307 } 10308 10309 #ifndef CONFIG_USER_ONLY 10310 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 10311 ARMMMUIdx mmu_idx) 10312 { 10313 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10314 uint32_t el = regime_el(env, mmu_idx); 10315 int select, tsz; 10316 bool epd, hpd; 10317 10318 if (mmu_idx == ARMMMUIdx_Stage2) { 10319 /* VTCR */ 10320 bool sext = extract32(tcr, 4, 1); 10321 bool sign = extract32(tcr, 3, 1); 10322 10323 /* 10324 * If the sign-extend bit is not the same as t0sz[3], the result 10325 * is unpredictable. Flag this as a guest error. 10326 */ 10327 if (sign != sext) { 10328 qemu_log_mask(LOG_GUEST_ERROR, 10329 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 10330 } 10331 tsz = sextract32(tcr, 0, 4) + 8; 10332 select = 0; 10333 hpd = false; 10334 epd = false; 10335 } else if (el == 2) { 10336 /* HTCR */ 10337 tsz = extract32(tcr, 0, 3); 10338 select = 0; 10339 hpd = extract64(tcr, 24, 1); 10340 epd = false; 10341 } else { 10342 int t0sz = extract32(tcr, 0, 3); 10343 int t1sz = extract32(tcr, 16, 3); 10344 10345 if (t1sz == 0) { 10346 select = va > (0xffffffffu >> t0sz); 10347 } else { 10348 /* Note that we will detect errors later. */ 10349 select = va >= ~(0xffffffffu >> t1sz); 10350 } 10351 if (!select) { 10352 tsz = t0sz; 10353 epd = extract32(tcr, 7, 1); 10354 hpd = extract64(tcr, 41, 1); 10355 } else { 10356 tsz = t1sz; 10357 epd = extract32(tcr, 23, 1); 10358 hpd = extract64(tcr, 42, 1); 10359 } 10360 /* For aarch32, hpd0 is not enabled without t2e as well. */ 10361 hpd &= extract32(tcr, 6, 1); 10362 } 10363 10364 return (ARMVAParameters) { 10365 .tsz = tsz, 10366 .select = select, 10367 .epd = epd, 10368 .hpd = hpd, 10369 }; 10370 } 10371 10372 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 10373 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10374 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 10375 target_ulong *page_size_ptr, 10376 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10377 { 10378 ARMCPU *cpu = env_archcpu(env); 10379 CPUState *cs = CPU(cpu); 10380 /* Read an LPAE long-descriptor translation table. */ 10381 ARMFaultType fault_type = ARMFault_Translation; 10382 uint32_t level; 10383 ARMVAParameters param; 10384 uint64_t ttbr; 10385 hwaddr descaddr, indexmask, indexmask_grainsize; 10386 uint32_t tableattrs; 10387 target_ulong page_size; 10388 uint32_t attrs; 10389 int32_t stride; 10390 int addrsize, inputsize; 10391 TCR *tcr = regime_tcr(env, mmu_idx); 10392 int ap, ns, xn, pxn; 10393 uint32_t el = regime_el(env, mmu_idx); 10394 uint64_t descaddrmask; 10395 bool aarch64 = arm_el_is_aa64(env, el); 10396 bool guarded = false; 10397 10398 /* TODO: 10399 * This code does not handle the different format TCR for VTCR_EL2. 10400 * This code also does not support shareability levels. 10401 * Attribute and permission bit handling should also be checked when adding 10402 * support for those page table walks. 10403 */ 10404 if (aarch64) { 10405 param = aa64_va_parameters(env, address, mmu_idx, 10406 access_type != MMU_INST_FETCH); 10407 level = 0; 10408 addrsize = 64 - 8 * param.tbi; 10409 inputsize = 64 - param.tsz; 10410 } else { 10411 param = aa32_va_parameters(env, address, mmu_idx); 10412 level = 1; 10413 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 10414 inputsize = addrsize - param.tsz; 10415 } 10416 10417 /* 10418 * We determined the region when collecting the parameters, but we 10419 * have not yet validated that the address is valid for the region. 10420 * Extract the top bits and verify that they all match select. 10421 * 10422 * For aa32, if inputsize == addrsize, then we have selected the 10423 * region by exclusion in aa32_va_parameters and there is no more 10424 * validation to do here. 10425 */ 10426 if (inputsize < addrsize) { 10427 target_ulong top_bits = sextract64(address, inputsize, 10428 addrsize - inputsize); 10429 if (-top_bits != param.select) { 10430 /* The gap between the two regions is a Translation fault */ 10431 fault_type = ARMFault_Translation; 10432 goto do_fault; 10433 } 10434 } 10435 10436 if (param.using64k) { 10437 stride = 13; 10438 } else if (param.using16k) { 10439 stride = 11; 10440 } else { 10441 stride = 9; 10442 } 10443 10444 /* Note that QEMU ignores shareability and cacheability attributes, 10445 * so we don't need to do anything with the SH, ORGN, IRGN fields 10446 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 10447 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 10448 * implement any ASID-like capability so we can ignore it (instead 10449 * we will always flush the TLB any time the ASID is changed). 10450 */ 10451 ttbr = regime_ttbr(env, mmu_idx, param.select); 10452 10453 /* Here we should have set up all the parameters for the translation: 10454 * inputsize, ttbr, epd, stride, tbi 10455 */ 10456 10457 if (param.epd) { 10458 /* Translation table walk disabled => Translation fault on TLB miss 10459 * Note: This is always 0 on 64-bit EL2 and EL3. 10460 */ 10461 goto do_fault; 10462 } 10463 10464 if (mmu_idx != ARMMMUIdx_Stage2) { 10465 /* The starting level depends on the virtual address size (which can 10466 * be up to 48 bits) and the translation granule size. It indicates 10467 * the number of strides (stride bits at a time) needed to 10468 * consume the bits of the input address. In the pseudocode this is: 10469 * level = 4 - RoundUp((inputsize - grainsize) / stride) 10470 * where their 'inputsize' is our 'inputsize', 'grainsize' is 10471 * our 'stride + 3' and 'stride' is our 'stride'. 10472 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 10473 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 10474 * = 4 - (inputsize - 4) / stride; 10475 */ 10476 level = 4 - (inputsize - 4) / stride; 10477 } else { 10478 /* For stage 2 translations the starting level is specified by the 10479 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 10480 */ 10481 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 10482 uint32_t startlevel; 10483 bool ok; 10484 10485 if (!aarch64 || stride == 9) { 10486 /* AArch32 or 4KB pages */ 10487 startlevel = 2 - sl0; 10488 } else { 10489 /* 16KB or 64KB pages */ 10490 startlevel = 3 - sl0; 10491 } 10492 10493 /* Check that the starting level is valid. */ 10494 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 10495 inputsize, stride); 10496 if (!ok) { 10497 fault_type = ARMFault_Translation; 10498 goto do_fault; 10499 } 10500 level = startlevel; 10501 } 10502 10503 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 10504 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 10505 10506 /* Now we can extract the actual base address from the TTBR */ 10507 descaddr = extract64(ttbr, 0, 48); 10508 descaddr &= ~indexmask; 10509 10510 /* The address field in the descriptor goes up to bit 39 for ARMv7 10511 * but up to bit 47 for ARMv8, but we use the descaddrmask 10512 * up to bit 39 for AArch32, because we don't need other bits in that case 10513 * to construct next descriptor address (anyway they should be all zeroes). 10514 */ 10515 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 10516 ~indexmask_grainsize; 10517 10518 /* Secure accesses start with the page table in secure memory and 10519 * can be downgraded to non-secure at any step. Non-secure accesses 10520 * remain non-secure. We implement this by just ORing in the NSTable/NS 10521 * bits at each step. 10522 */ 10523 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 10524 for (;;) { 10525 uint64_t descriptor; 10526 bool nstable; 10527 10528 descaddr |= (address >> (stride * (4 - level))) & indexmask; 10529 descaddr &= ~7ULL; 10530 nstable = extract32(tableattrs, 4, 1); 10531 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 10532 if (fi->type != ARMFault_None) { 10533 goto do_fault; 10534 } 10535 10536 if (!(descriptor & 1) || 10537 (!(descriptor & 2) && (level == 3))) { 10538 /* Invalid, or the Reserved level 3 encoding */ 10539 goto do_fault; 10540 } 10541 descaddr = descriptor & descaddrmask; 10542 10543 if ((descriptor & 2) && (level < 3)) { 10544 /* Table entry. The top five bits are attributes which may 10545 * propagate down through lower levels of the table (and 10546 * which are all arranged so that 0 means "no effect", so 10547 * we can gather them up by ORing in the bits at each level). 10548 */ 10549 tableattrs |= extract64(descriptor, 59, 5); 10550 level++; 10551 indexmask = indexmask_grainsize; 10552 continue; 10553 } 10554 /* Block entry at level 1 or 2, or page entry at level 3. 10555 * These are basically the same thing, although the number 10556 * of bits we pull in from the vaddr varies. 10557 */ 10558 page_size = (1ULL << ((stride * (4 - level)) + 3)); 10559 descaddr |= (address & (page_size - 1)); 10560 /* Extract attributes from the descriptor */ 10561 attrs = extract64(descriptor, 2, 10) 10562 | (extract64(descriptor, 52, 12) << 10); 10563 10564 if (mmu_idx == ARMMMUIdx_Stage2) { 10565 /* Stage 2 table descriptors do not include any attribute fields */ 10566 break; 10567 } 10568 /* Merge in attributes from table descriptors */ 10569 attrs |= nstable << 3; /* NS */ 10570 guarded = extract64(descriptor, 50, 1); /* GP */ 10571 if (param.hpd) { 10572 /* HPD disables all the table attributes except NSTable. */ 10573 break; 10574 } 10575 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 10576 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 10577 * means "force PL1 access only", which means forcing AP[1] to 0. 10578 */ 10579 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ 10580 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ 10581 break; 10582 } 10583 /* Here descaddr is the final physical address, and attributes 10584 * are all in attrs. 10585 */ 10586 fault_type = ARMFault_AccessFlag; 10587 if ((attrs & (1 << 8)) == 0) { 10588 /* Access flag */ 10589 goto do_fault; 10590 } 10591 10592 ap = extract32(attrs, 4, 2); 10593 xn = extract32(attrs, 12, 1); 10594 10595 if (mmu_idx == ARMMMUIdx_Stage2) { 10596 ns = true; 10597 *prot = get_S2prot(env, ap, xn); 10598 } else { 10599 ns = extract32(attrs, 3, 1); 10600 pxn = extract32(attrs, 11, 1); 10601 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 10602 } 10603 10604 fault_type = ARMFault_Permission; 10605 if (!(*prot & (1 << access_type))) { 10606 goto do_fault; 10607 } 10608 10609 if (ns) { 10610 /* The NS bit will (as required by the architecture) have no effect if 10611 * the CPU doesn't support TZ or this is a non-secure translation 10612 * regime, because the attribute will already be non-secure. 10613 */ 10614 txattrs->secure = false; 10615 } 10616 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ 10617 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { 10618 txattrs->target_tlb_bit0 = true; 10619 } 10620 10621 if (cacheattrs != NULL) { 10622 if (mmu_idx == ARMMMUIdx_Stage2) { 10623 cacheattrs->attrs = convert_stage2_attrs(env, 10624 extract32(attrs, 0, 4)); 10625 } else { 10626 /* Index into MAIR registers for cache attributes */ 10627 uint8_t attrindx = extract32(attrs, 0, 3); 10628 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 10629 assert(attrindx <= 7); 10630 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 10631 } 10632 cacheattrs->shareability = extract32(attrs, 6, 2); 10633 } 10634 10635 *phys_ptr = descaddr; 10636 *page_size_ptr = page_size; 10637 return false; 10638 10639 do_fault: 10640 fi->type = fault_type; 10641 fi->level = level; 10642 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 10643 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2); 10644 return true; 10645 } 10646 10647 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 10648 ARMMMUIdx mmu_idx, 10649 int32_t address, int *prot) 10650 { 10651 if (!arm_feature(env, ARM_FEATURE_M)) { 10652 *prot = PAGE_READ | PAGE_WRITE; 10653 switch (address) { 10654 case 0xF0000000 ... 0xFFFFFFFF: 10655 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 10656 /* hivecs execing is ok */ 10657 *prot |= PAGE_EXEC; 10658 } 10659 break; 10660 case 0x00000000 ... 0x7FFFFFFF: 10661 *prot |= PAGE_EXEC; 10662 break; 10663 } 10664 } else { 10665 /* Default system address map for M profile cores. 10666 * The architecture specifies which regions are execute-never; 10667 * at the MPU level no other checks are defined. 10668 */ 10669 switch (address) { 10670 case 0x00000000 ... 0x1fffffff: /* ROM */ 10671 case 0x20000000 ... 0x3fffffff: /* SRAM */ 10672 case 0x60000000 ... 0x7fffffff: /* RAM */ 10673 case 0x80000000 ... 0x9fffffff: /* RAM */ 10674 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10675 break; 10676 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 10677 case 0xa0000000 ... 0xbfffffff: /* Device */ 10678 case 0xc0000000 ... 0xdfffffff: /* Device */ 10679 case 0xe0000000 ... 0xffffffff: /* System */ 10680 *prot = PAGE_READ | PAGE_WRITE; 10681 break; 10682 default: 10683 g_assert_not_reached(); 10684 } 10685 } 10686 } 10687 10688 static bool pmsav7_use_background_region(ARMCPU *cpu, 10689 ARMMMUIdx mmu_idx, bool is_user) 10690 { 10691 /* Return true if we should use the default memory map as a 10692 * "background" region if there are no hits against any MPU regions. 10693 */ 10694 CPUARMState *env = &cpu->env; 10695 10696 if (is_user) { 10697 return false; 10698 } 10699 10700 if (arm_feature(env, ARM_FEATURE_M)) { 10701 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 10702 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 10703 } else { 10704 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 10705 } 10706 } 10707 10708 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 10709 { 10710 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 10711 return arm_feature(env, ARM_FEATURE_M) && 10712 extract32(address, 20, 12) == 0xe00; 10713 } 10714 10715 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 10716 { 10717 /* True if address is in the M profile system region 10718 * 0xe0000000 - 0xffffffff 10719 */ 10720 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 10721 } 10722 10723 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 10724 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10725 hwaddr *phys_ptr, int *prot, 10726 target_ulong *page_size, 10727 ARMMMUFaultInfo *fi) 10728 { 10729 ARMCPU *cpu = env_archcpu(env); 10730 int n; 10731 bool is_user = regime_is_user(env, mmu_idx); 10732 10733 *phys_ptr = address; 10734 *page_size = TARGET_PAGE_SIZE; 10735 *prot = 0; 10736 10737 if (regime_translation_disabled(env, mmu_idx) || 10738 m_is_ppb_region(env, address)) { 10739 /* MPU disabled or M profile PPB access: use default memory map. 10740 * The other case which uses the default memory map in the 10741 * v7M ARM ARM pseudocode is exception vector reads from the vector 10742 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 10743 * which always does a direct read using address_space_ldl(), rather 10744 * than going via this function, so we don't need to check that here. 10745 */ 10746 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10747 } else { /* MPU enabled */ 10748 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 10749 /* region search */ 10750 uint32_t base = env->pmsav7.drbar[n]; 10751 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 10752 uint32_t rmask; 10753 bool srdis = false; 10754 10755 if (!(env->pmsav7.drsr[n] & 0x1)) { 10756 continue; 10757 } 10758 10759 if (!rsize) { 10760 qemu_log_mask(LOG_GUEST_ERROR, 10761 "DRSR[%d]: Rsize field cannot be 0\n", n); 10762 continue; 10763 } 10764 rsize++; 10765 rmask = (1ull << rsize) - 1; 10766 10767 if (base & rmask) { 10768 qemu_log_mask(LOG_GUEST_ERROR, 10769 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 10770 "to DRSR region size, mask = 0x%" PRIx32 "\n", 10771 n, base, rmask); 10772 continue; 10773 } 10774 10775 if (address < base || address > base + rmask) { 10776 /* 10777 * Address not in this region. We must check whether the 10778 * region covers addresses in the same page as our address. 10779 * In that case we must not report a size that covers the 10780 * whole page for a subsequent hit against a different MPU 10781 * region or the background region, because it would result in 10782 * incorrect TLB hits for subsequent accesses to addresses that 10783 * are in this MPU region. 10784 */ 10785 if (ranges_overlap(base, rmask, 10786 address & TARGET_PAGE_MASK, 10787 TARGET_PAGE_SIZE)) { 10788 *page_size = 1; 10789 } 10790 continue; 10791 } 10792 10793 /* Region matched */ 10794 10795 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 10796 int i, snd; 10797 uint32_t srdis_mask; 10798 10799 rsize -= 3; /* sub region size (power of 2) */ 10800 snd = ((address - base) >> rsize) & 0x7; 10801 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 10802 10803 srdis_mask = srdis ? 0x3 : 0x0; 10804 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 10805 /* This will check in groups of 2, 4 and then 8, whether 10806 * the subregion bits are consistent. rsize is incremented 10807 * back up to give the region size, considering consistent 10808 * adjacent subregions as one region. Stop testing if rsize 10809 * is already big enough for an entire QEMU page. 10810 */ 10811 int snd_rounded = snd & ~(i - 1); 10812 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 10813 snd_rounded + 8, i); 10814 if (srdis_mask ^ srdis_multi) { 10815 break; 10816 } 10817 srdis_mask = (srdis_mask << i) | srdis_mask; 10818 rsize++; 10819 } 10820 } 10821 if (srdis) { 10822 continue; 10823 } 10824 if (rsize < TARGET_PAGE_BITS) { 10825 *page_size = 1 << rsize; 10826 } 10827 break; 10828 } 10829 10830 if (n == -1) { /* no hits */ 10831 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 10832 /* background fault */ 10833 fi->type = ARMFault_Background; 10834 return true; 10835 } 10836 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10837 } else { /* a MPU hit! */ 10838 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 10839 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 10840 10841 if (m_is_system_region(env, address)) { 10842 /* System space is always execute never */ 10843 xn = 1; 10844 } 10845 10846 if (is_user) { /* User mode AP bit decoding */ 10847 switch (ap) { 10848 case 0: 10849 case 1: 10850 case 5: 10851 break; /* no access */ 10852 case 3: 10853 *prot |= PAGE_WRITE; 10854 /* fall through */ 10855 case 2: 10856 case 6: 10857 *prot |= PAGE_READ | PAGE_EXEC; 10858 break; 10859 case 7: 10860 /* for v7M, same as 6; for R profile a reserved value */ 10861 if (arm_feature(env, ARM_FEATURE_M)) { 10862 *prot |= PAGE_READ | PAGE_EXEC; 10863 break; 10864 } 10865 /* fall through */ 10866 default: 10867 qemu_log_mask(LOG_GUEST_ERROR, 10868 "DRACR[%d]: Bad value for AP bits: 0x%" 10869 PRIx32 "\n", n, ap); 10870 } 10871 } else { /* Priv. mode AP bits decoding */ 10872 switch (ap) { 10873 case 0: 10874 break; /* no access */ 10875 case 1: 10876 case 2: 10877 case 3: 10878 *prot |= PAGE_WRITE; 10879 /* fall through */ 10880 case 5: 10881 case 6: 10882 *prot |= PAGE_READ | PAGE_EXEC; 10883 break; 10884 case 7: 10885 /* for v7M, same as 6; for R profile a reserved value */ 10886 if (arm_feature(env, ARM_FEATURE_M)) { 10887 *prot |= PAGE_READ | PAGE_EXEC; 10888 break; 10889 } 10890 /* fall through */ 10891 default: 10892 qemu_log_mask(LOG_GUEST_ERROR, 10893 "DRACR[%d]: Bad value for AP bits: 0x%" 10894 PRIx32 "\n", n, ap); 10895 } 10896 } 10897 10898 /* execute never */ 10899 if (xn) { 10900 *prot &= ~PAGE_EXEC; 10901 } 10902 } 10903 } 10904 10905 fi->type = ARMFault_Permission; 10906 fi->level = 1; 10907 return !(*prot & (1 << access_type)); 10908 } 10909 10910 static bool v8m_is_sau_exempt(CPUARMState *env, 10911 uint32_t address, MMUAccessType access_type) 10912 { 10913 /* The architecture specifies that certain address ranges are 10914 * exempt from v8M SAU/IDAU checks. 10915 */ 10916 return 10917 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 10918 (address >= 0xe0000000 && address <= 0xe0002fff) || 10919 (address >= 0xe000e000 && address <= 0xe000efff) || 10920 (address >= 0xe002e000 && address <= 0xe002efff) || 10921 (address >= 0xe0040000 && address <= 0xe0041fff) || 10922 (address >= 0xe00ff000 && address <= 0xe00fffff); 10923 } 10924 10925 void v8m_security_lookup(CPUARMState *env, uint32_t address, 10926 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10927 V8M_SAttributes *sattrs) 10928 { 10929 /* Look up the security attributes for this address. Compare the 10930 * pseudocode SecurityCheck() function. 10931 * We assume the caller has zero-initialized *sattrs. 10932 */ 10933 ARMCPU *cpu = env_archcpu(env); 10934 int r; 10935 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 10936 int idau_region = IREGION_NOTVALID; 10937 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 10938 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 10939 10940 if (cpu->idau) { 10941 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 10942 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 10943 10944 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 10945 &idau_nsc); 10946 } 10947 10948 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 10949 /* 0xf0000000..0xffffffff is always S for insn fetches */ 10950 return; 10951 } 10952 10953 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 10954 sattrs->ns = !regime_is_secure(env, mmu_idx); 10955 return; 10956 } 10957 10958 if (idau_region != IREGION_NOTVALID) { 10959 sattrs->irvalid = true; 10960 sattrs->iregion = idau_region; 10961 } 10962 10963 switch (env->sau.ctrl & 3) { 10964 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 10965 break; 10966 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 10967 sattrs->ns = true; 10968 break; 10969 default: /* SAU.ENABLE == 1 */ 10970 for (r = 0; r < cpu->sau_sregion; r++) { 10971 if (env->sau.rlar[r] & 1) { 10972 uint32_t base = env->sau.rbar[r] & ~0x1f; 10973 uint32_t limit = env->sau.rlar[r] | 0x1f; 10974 10975 if (base <= address && limit >= address) { 10976 if (base > addr_page_base || limit < addr_page_limit) { 10977 sattrs->subpage = true; 10978 } 10979 if (sattrs->srvalid) { 10980 /* If we hit in more than one region then we must report 10981 * as Secure, not NS-Callable, with no valid region 10982 * number info. 10983 */ 10984 sattrs->ns = false; 10985 sattrs->nsc = false; 10986 sattrs->sregion = 0; 10987 sattrs->srvalid = false; 10988 break; 10989 } else { 10990 if (env->sau.rlar[r] & 2) { 10991 sattrs->nsc = true; 10992 } else { 10993 sattrs->ns = true; 10994 } 10995 sattrs->srvalid = true; 10996 sattrs->sregion = r; 10997 } 10998 } else { 10999 /* 11000 * Address not in this region. We must check whether the 11001 * region covers addresses in the same page as our address. 11002 * In that case we must not report a size that covers the 11003 * whole page for a subsequent hit against a different MPU 11004 * region or the background region, because it would result 11005 * in incorrect TLB hits for subsequent accesses to 11006 * addresses that are in this MPU region. 11007 */ 11008 if (limit >= base && 11009 ranges_overlap(base, limit - base + 1, 11010 addr_page_base, 11011 TARGET_PAGE_SIZE)) { 11012 sattrs->subpage = true; 11013 } 11014 } 11015 } 11016 } 11017 break; 11018 } 11019 11020 /* 11021 * The IDAU will override the SAU lookup results if it specifies 11022 * higher security than the SAU does. 11023 */ 11024 if (!idau_ns) { 11025 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 11026 sattrs->ns = false; 11027 sattrs->nsc = idau_nsc; 11028 } 11029 } 11030 } 11031 11032 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 11033 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11034 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11035 int *prot, bool *is_subpage, 11036 ARMMMUFaultInfo *fi, uint32_t *mregion) 11037 { 11038 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 11039 * that a full phys-to-virt translation does). 11040 * mregion is (if not NULL) set to the region number which matched, 11041 * or -1 if no region number is returned (MPU off, address did not 11042 * hit a region, address hit in multiple regions). 11043 * We set is_subpage to true if the region hit doesn't cover the 11044 * entire TARGET_PAGE the address is within. 11045 */ 11046 ARMCPU *cpu = env_archcpu(env); 11047 bool is_user = regime_is_user(env, mmu_idx); 11048 uint32_t secure = regime_is_secure(env, mmu_idx); 11049 int n; 11050 int matchregion = -1; 11051 bool hit = false; 11052 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 11053 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 11054 11055 *is_subpage = false; 11056 *phys_ptr = address; 11057 *prot = 0; 11058 if (mregion) { 11059 *mregion = -1; 11060 } 11061 11062 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 11063 * was an exception vector read from the vector table (which is always 11064 * done using the default system address map), because those accesses 11065 * are done in arm_v7m_load_vector(), which always does a direct 11066 * read using address_space_ldl(), rather than going via this function. 11067 */ 11068 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 11069 hit = true; 11070 } else if (m_is_ppb_region(env, address)) { 11071 hit = true; 11072 } else { 11073 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11074 hit = true; 11075 } 11076 11077 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11078 /* region search */ 11079 /* Note that the base address is bits [31:5] from the register 11080 * with bits [4:0] all zeroes, but the limit address is bits 11081 * [31:5] from the register with bits [4:0] all ones. 11082 */ 11083 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 11084 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 11085 11086 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 11087 /* Region disabled */ 11088 continue; 11089 } 11090 11091 if (address < base || address > limit) { 11092 /* 11093 * Address not in this region. We must check whether the 11094 * region covers addresses in the same page as our address. 11095 * In that case we must not report a size that covers the 11096 * whole page for a subsequent hit against a different MPU 11097 * region or the background region, because it would result in 11098 * incorrect TLB hits for subsequent accesses to addresses that 11099 * are in this MPU region. 11100 */ 11101 if (limit >= base && 11102 ranges_overlap(base, limit - base + 1, 11103 addr_page_base, 11104 TARGET_PAGE_SIZE)) { 11105 *is_subpage = true; 11106 } 11107 continue; 11108 } 11109 11110 if (base > addr_page_base || limit < addr_page_limit) { 11111 *is_subpage = true; 11112 } 11113 11114 if (matchregion != -1) { 11115 /* Multiple regions match -- always a failure (unlike 11116 * PMSAv7 where highest-numbered-region wins) 11117 */ 11118 fi->type = ARMFault_Permission; 11119 fi->level = 1; 11120 return true; 11121 } 11122 11123 matchregion = n; 11124 hit = true; 11125 } 11126 } 11127 11128 if (!hit) { 11129 /* background fault */ 11130 fi->type = ARMFault_Background; 11131 return true; 11132 } 11133 11134 if (matchregion == -1) { 11135 /* hit using the background region */ 11136 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11137 } else { 11138 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 11139 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 11140 11141 if (m_is_system_region(env, address)) { 11142 /* System space is always execute never */ 11143 xn = 1; 11144 } 11145 11146 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 11147 if (*prot && !xn) { 11148 *prot |= PAGE_EXEC; 11149 } 11150 /* We don't need to look the attribute up in the MAIR0/MAIR1 11151 * registers because that only tells us about cacheability. 11152 */ 11153 if (mregion) { 11154 *mregion = matchregion; 11155 } 11156 } 11157 11158 fi->type = ARMFault_Permission; 11159 fi->level = 1; 11160 return !(*prot & (1 << access_type)); 11161 } 11162 11163 11164 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 11165 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11166 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11167 int *prot, target_ulong *page_size, 11168 ARMMMUFaultInfo *fi) 11169 { 11170 uint32_t secure = regime_is_secure(env, mmu_idx); 11171 V8M_SAttributes sattrs = {}; 11172 bool ret; 11173 bool mpu_is_subpage; 11174 11175 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 11176 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 11177 if (access_type == MMU_INST_FETCH) { 11178 /* Instruction fetches always use the MMU bank and the 11179 * transaction attribute determined by the fetch address, 11180 * regardless of CPU state. This is painful for QEMU 11181 * to handle, because it would mean we need to encode 11182 * into the mmu_idx not just the (user, negpri) information 11183 * for the current security state but also that for the 11184 * other security state, which would balloon the number 11185 * of mmu_idx values needed alarmingly. 11186 * Fortunately we can avoid this because it's not actually 11187 * possible to arbitrarily execute code from memory with 11188 * the wrong security attribute: it will always generate 11189 * an exception of some kind or another, apart from the 11190 * special case of an NS CPU executing an SG instruction 11191 * in S&NSC memory. So we always just fail the translation 11192 * here and sort things out in the exception handler 11193 * (including possibly emulating an SG instruction). 11194 */ 11195 if (sattrs.ns != !secure) { 11196 if (sattrs.nsc) { 11197 fi->type = ARMFault_QEMU_NSCExec; 11198 } else { 11199 fi->type = ARMFault_QEMU_SFault; 11200 } 11201 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11202 *phys_ptr = address; 11203 *prot = 0; 11204 return true; 11205 } 11206 } else { 11207 /* For data accesses we always use the MMU bank indicated 11208 * by the current CPU state, but the security attributes 11209 * might downgrade a secure access to nonsecure. 11210 */ 11211 if (sattrs.ns) { 11212 txattrs->secure = false; 11213 } else if (!secure) { 11214 /* NS access to S memory must fault. 11215 * Architecturally we should first check whether the 11216 * MPU information for this address indicates that we 11217 * are doing an unaligned access to Device memory, which 11218 * should generate a UsageFault instead. QEMU does not 11219 * currently check for that kind of unaligned access though. 11220 * If we added it we would need to do so as a special case 11221 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 11222 */ 11223 fi->type = ARMFault_QEMU_SFault; 11224 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11225 *phys_ptr = address; 11226 *prot = 0; 11227 return true; 11228 } 11229 } 11230 } 11231 11232 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 11233 txattrs, prot, &mpu_is_subpage, fi, NULL); 11234 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 11235 return ret; 11236 } 11237 11238 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 11239 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11240 hwaddr *phys_ptr, int *prot, 11241 ARMMMUFaultInfo *fi) 11242 { 11243 int n; 11244 uint32_t mask; 11245 uint32_t base; 11246 bool is_user = regime_is_user(env, mmu_idx); 11247 11248 if (regime_translation_disabled(env, mmu_idx)) { 11249 /* MPU disabled. */ 11250 *phys_ptr = address; 11251 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11252 return false; 11253 } 11254 11255 *phys_ptr = address; 11256 for (n = 7; n >= 0; n--) { 11257 base = env->cp15.c6_region[n]; 11258 if ((base & 1) == 0) { 11259 continue; 11260 } 11261 mask = 1 << ((base >> 1) & 0x1f); 11262 /* Keep this shift separate from the above to avoid an 11263 (undefined) << 32. */ 11264 mask = (mask << 1) - 1; 11265 if (((base ^ address) & ~mask) == 0) { 11266 break; 11267 } 11268 } 11269 if (n < 0) { 11270 fi->type = ARMFault_Background; 11271 return true; 11272 } 11273 11274 if (access_type == MMU_INST_FETCH) { 11275 mask = env->cp15.pmsav5_insn_ap; 11276 } else { 11277 mask = env->cp15.pmsav5_data_ap; 11278 } 11279 mask = (mask >> (n * 4)) & 0xf; 11280 switch (mask) { 11281 case 0: 11282 fi->type = ARMFault_Permission; 11283 fi->level = 1; 11284 return true; 11285 case 1: 11286 if (is_user) { 11287 fi->type = ARMFault_Permission; 11288 fi->level = 1; 11289 return true; 11290 } 11291 *prot = PAGE_READ | PAGE_WRITE; 11292 break; 11293 case 2: 11294 *prot = PAGE_READ; 11295 if (!is_user) { 11296 *prot |= PAGE_WRITE; 11297 } 11298 break; 11299 case 3: 11300 *prot = PAGE_READ | PAGE_WRITE; 11301 break; 11302 case 5: 11303 if (is_user) { 11304 fi->type = ARMFault_Permission; 11305 fi->level = 1; 11306 return true; 11307 } 11308 *prot = PAGE_READ; 11309 break; 11310 case 6: 11311 *prot = PAGE_READ; 11312 break; 11313 default: 11314 /* Bad permission. */ 11315 fi->type = ARMFault_Permission; 11316 fi->level = 1; 11317 return true; 11318 } 11319 *prot |= PAGE_EXEC; 11320 return false; 11321 } 11322 11323 /* Combine either inner or outer cacheability attributes for normal 11324 * memory, according to table D4-42 and pseudocode procedure 11325 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 11326 * 11327 * NB: only stage 1 includes allocation hints (RW bits), leading to 11328 * some asymmetry. 11329 */ 11330 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 11331 { 11332 if (s1 == 4 || s2 == 4) { 11333 /* non-cacheable has precedence */ 11334 return 4; 11335 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 11336 /* stage 1 write-through takes precedence */ 11337 return s1; 11338 } else if (extract32(s2, 2, 2) == 2) { 11339 /* stage 2 write-through takes precedence, but the allocation hint 11340 * is still taken from stage 1 11341 */ 11342 return (2 << 2) | extract32(s1, 0, 2); 11343 } else { /* write-back */ 11344 return s1; 11345 } 11346 } 11347 11348 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 11349 * and CombineS1S2Desc() 11350 * 11351 * @s1: Attributes from stage 1 walk 11352 * @s2: Attributes from stage 2 walk 11353 */ 11354 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 11355 { 11356 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 11357 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 11358 ARMCacheAttrs ret; 11359 11360 /* Combine shareability attributes (table D4-43) */ 11361 if (s1.shareability == 2 || s2.shareability == 2) { 11362 /* if either are outer-shareable, the result is outer-shareable */ 11363 ret.shareability = 2; 11364 } else if (s1.shareability == 3 || s2.shareability == 3) { 11365 /* if either are inner-shareable, the result is inner-shareable */ 11366 ret.shareability = 3; 11367 } else { 11368 /* both non-shareable */ 11369 ret.shareability = 0; 11370 } 11371 11372 /* Combine memory type and cacheability attributes */ 11373 if (s1hi == 0 || s2hi == 0) { 11374 /* Device has precedence over normal */ 11375 if (s1lo == 0 || s2lo == 0) { 11376 /* nGnRnE has precedence over anything */ 11377 ret.attrs = 0; 11378 } else if (s1lo == 4 || s2lo == 4) { 11379 /* non-Reordering has precedence over Reordering */ 11380 ret.attrs = 4; /* nGnRE */ 11381 } else if (s1lo == 8 || s2lo == 8) { 11382 /* non-Gathering has precedence over Gathering */ 11383 ret.attrs = 8; /* nGRE */ 11384 } else { 11385 ret.attrs = 0xc; /* GRE */ 11386 } 11387 11388 /* Any location for which the resultant memory type is any 11389 * type of Device memory is always treated as Outer Shareable. 11390 */ 11391 ret.shareability = 2; 11392 } else { /* Normal memory */ 11393 /* Outer/inner cacheability combine independently */ 11394 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 11395 | combine_cacheattr_nibble(s1lo, s2lo); 11396 11397 if (ret.attrs == 0x44) { 11398 /* Any location for which the resultant memory type is Normal 11399 * Inner Non-cacheable, Outer Non-cacheable is always treated 11400 * as Outer Shareable. 11401 */ 11402 ret.shareability = 2; 11403 } 11404 } 11405 11406 return ret; 11407 } 11408 11409 11410 /* get_phys_addr - get the physical address for this virtual address 11411 * 11412 * Find the physical address corresponding to the given virtual address, 11413 * by doing a translation table walk on MMU based systems or using the 11414 * MPU state on MPU based systems. 11415 * 11416 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 11417 * prot and page_size may not be filled in, and the populated fsr value provides 11418 * information on why the translation aborted, in the format of a 11419 * DFSR/IFSR fault register, with the following caveats: 11420 * * we honour the short vs long DFSR format differences. 11421 * * the WnR bit is never set (the caller must do this). 11422 * * for PSMAv5 based systems we don't bother to return a full FSR format 11423 * value. 11424 * 11425 * @env: CPUARMState 11426 * @address: virtual address to get physical address for 11427 * @access_type: 0 for read, 1 for write, 2 for execute 11428 * @mmu_idx: MMU index indicating required translation regime 11429 * @phys_ptr: set to the physical address corresponding to the virtual address 11430 * @attrs: set to the memory transaction attributes to use 11431 * @prot: set to the permissions for the page containing phys_ptr 11432 * @page_size: set to the size of the page containing phys_ptr 11433 * @fi: set to fault info if the translation fails 11434 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 11435 */ 11436 bool get_phys_addr(CPUARMState *env, target_ulong address, 11437 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11438 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 11439 target_ulong *page_size, 11440 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11441 { 11442 if (mmu_idx == ARMMMUIdx_E10_0 || 11443 mmu_idx == ARMMMUIdx_E10_1 || 11444 mmu_idx == ARMMMUIdx_E10_1_PAN) { 11445 /* Call ourselves recursively to do the stage 1 and then stage 2 11446 * translations. 11447 */ 11448 if (arm_feature(env, ARM_FEATURE_EL2)) { 11449 hwaddr ipa; 11450 int s2_prot; 11451 int ret; 11452 ARMCacheAttrs cacheattrs2 = {}; 11453 11454 ret = get_phys_addr(env, address, access_type, 11455 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 11456 prot, page_size, fi, cacheattrs); 11457 11458 /* If S1 fails or S2 is disabled, return early. */ 11459 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 11460 *phys_ptr = ipa; 11461 return ret; 11462 } 11463 11464 /* S1 is done. Now do S2 translation. */ 11465 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2, 11466 phys_ptr, attrs, &s2_prot, 11467 page_size, fi, 11468 cacheattrs != NULL ? &cacheattrs2 : NULL); 11469 fi->s2addr = ipa; 11470 /* Combine the S1 and S2 perms. */ 11471 *prot &= s2_prot; 11472 11473 /* Combine the S1 and S2 cache attributes, if needed */ 11474 if (!ret && cacheattrs != NULL) { 11475 if (env->cp15.hcr_el2 & HCR_DC) { 11476 /* 11477 * HCR.DC forces the first stage attributes to 11478 * Normal Non-Shareable, 11479 * Inner Write-Back Read-Allocate Write-Allocate, 11480 * Outer Write-Back Read-Allocate Write-Allocate. 11481 */ 11482 cacheattrs->attrs = 0xff; 11483 cacheattrs->shareability = 0; 11484 } 11485 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 11486 } 11487 11488 return ret; 11489 } else { 11490 /* 11491 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 11492 */ 11493 mmu_idx = stage_1_mmu_idx(mmu_idx); 11494 } 11495 } 11496 11497 /* The page table entries may downgrade secure to non-secure, but 11498 * cannot upgrade an non-secure translation regime's attributes 11499 * to secure. 11500 */ 11501 attrs->secure = regime_is_secure(env, mmu_idx); 11502 attrs->user = regime_is_user(env, mmu_idx); 11503 11504 /* Fast Context Switch Extension. This doesn't exist at all in v8. 11505 * In v7 and earlier it affects all stage 1 translations. 11506 */ 11507 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 11508 && !arm_feature(env, ARM_FEATURE_V8)) { 11509 if (regime_el(env, mmu_idx) == 3) { 11510 address += env->cp15.fcseidr_s; 11511 } else { 11512 address += env->cp15.fcseidr_ns; 11513 } 11514 } 11515 11516 if (arm_feature(env, ARM_FEATURE_PMSA)) { 11517 bool ret; 11518 *page_size = TARGET_PAGE_SIZE; 11519 11520 if (arm_feature(env, ARM_FEATURE_V8)) { 11521 /* PMSAv8 */ 11522 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 11523 phys_ptr, attrs, prot, page_size, fi); 11524 } else if (arm_feature(env, ARM_FEATURE_V7)) { 11525 /* PMSAv7 */ 11526 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 11527 phys_ptr, prot, page_size, fi); 11528 } else { 11529 /* Pre-v7 MPU */ 11530 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 11531 phys_ptr, prot, fi); 11532 } 11533 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 11534 " mmu_idx %u -> %s (prot %c%c%c)\n", 11535 access_type == MMU_DATA_LOAD ? "reading" : 11536 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 11537 (uint32_t)address, mmu_idx, 11538 ret ? "Miss" : "Hit", 11539 *prot & PAGE_READ ? 'r' : '-', 11540 *prot & PAGE_WRITE ? 'w' : '-', 11541 *prot & PAGE_EXEC ? 'x' : '-'); 11542 11543 return ret; 11544 } 11545 11546 /* Definitely a real MMU, not an MPU */ 11547 11548 if (regime_translation_disabled(env, mmu_idx)) { 11549 /* MMU disabled. */ 11550 *phys_ptr = address; 11551 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11552 *page_size = TARGET_PAGE_SIZE; 11553 return 0; 11554 } 11555 11556 if (regime_using_lpae_format(env, mmu_idx)) { 11557 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 11558 phys_ptr, attrs, prot, page_size, 11559 fi, cacheattrs); 11560 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 11561 return get_phys_addr_v6(env, address, access_type, mmu_idx, 11562 phys_ptr, attrs, prot, page_size, fi); 11563 } else { 11564 return get_phys_addr_v5(env, address, access_type, mmu_idx, 11565 phys_ptr, prot, page_size, fi); 11566 } 11567 } 11568 11569 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 11570 MemTxAttrs *attrs) 11571 { 11572 ARMCPU *cpu = ARM_CPU(cs); 11573 CPUARMState *env = &cpu->env; 11574 hwaddr phys_addr; 11575 target_ulong page_size; 11576 int prot; 11577 bool ret; 11578 ARMMMUFaultInfo fi = {}; 11579 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 11580 11581 *attrs = (MemTxAttrs) {}; 11582 11583 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 11584 attrs, &prot, &page_size, &fi, NULL); 11585 11586 if (ret) { 11587 return -1; 11588 } 11589 return phys_addr; 11590 } 11591 11592 #endif 11593 11594 /* Note that signed overflow is undefined in C. The following routines are 11595 careful to use unsigned types where modulo arithmetic is required. 11596 Failure to do so _will_ break on newer gcc. */ 11597 11598 /* Signed saturating arithmetic. */ 11599 11600 /* Perform 16-bit signed saturating addition. */ 11601 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 11602 { 11603 uint16_t res; 11604 11605 res = a + b; 11606 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 11607 if (a & 0x8000) 11608 res = 0x8000; 11609 else 11610 res = 0x7fff; 11611 } 11612 return res; 11613 } 11614 11615 /* Perform 8-bit signed saturating addition. */ 11616 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 11617 { 11618 uint8_t res; 11619 11620 res = a + b; 11621 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 11622 if (a & 0x80) 11623 res = 0x80; 11624 else 11625 res = 0x7f; 11626 } 11627 return res; 11628 } 11629 11630 /* Perform 16-bit signed saturating subtraction. */ 11631 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 11632 { 11633 uint16_t res; 11634 11635 res = a - b; 11636 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 11637 if (a & 0x8000) 11638 res = 0x8000; 11639 else 11640 res = 0x7fff; 11641 } 11642 return res; 11643 } 11644 11645 /* Perform 8-bit signed saturating subtraction. */ 11646 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 11647 { 11648 uint8_t res; 11649 11650 res = a - b; 11651 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 11652 if (a & 0x80) 11653 res = 0x80; 11654 else 11655 res = 0x7f; 11656 } 11657 return res; 11658 } 11659 11660 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 11661 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 11662 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 11663 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 11664 #define PFX q 11665 11666 #include "op_addsub.h" 11667 11668 /* Unsigned saturating arithmetic. */ 11669 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 11670 { 11671 uint16_t res; 11672 res = a + b; 11673 if (res < a) 11674 res = 0xffff; 11675 return res; 11676 } 11677 11678 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 11679 { 11680 if (a > b) 11681 return a - b; 11682 else 11683 return 0; 11684 } 11685 11686 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 11687 { 11688 uint8_t res; 11689 res = a + b; 11690 if (res < a) 11691 res = 0xff; 11692 return res; 11693 } 11694 11695 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 11696 { 11697 if (a > b) 11698 return a - b; 11699 else 11700 return 0; 11701 } 11702 11703 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 11704 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 11705 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 11706 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 11707 #define PFX uq 11708 11709 #include "op_addsub.h" 11710 11711 /* Signed modulo arithmetic. */ 11712 #define SARITH16(a, b, n, op) do { \ 11713 int32_t sum; \ 11714 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 11715 RESULT(sum, n, 16); \ 11716 if (sum >= 0) \ 11717 ge |= 3 << (n * 2); \ 11718 } while(0) 11719 11720 #define SARITH8(a, b, n, op) do { \ 11721 int32_t sum; \ 11722 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 11723 RESULT(sum, n, 8); \ 11724 if (sum >= 0) \ 11725 ge |= 1 << n; \ 11726 } while(0) 11727 11728 11729 #define ADD16(a, b, n) SARITH16(a, b, n, +) 11730 #define SUB16(a, b, n) SARITH16(a, b, n, -) 11731 #define ADD8(a, b, n) SARITH8(a, b, n, +) 11732 #define SUB8(a, b, n) SARITH8(a, b, n, -) 11733 #define PFX s 11734 #define ARITH_GE 11735 11736 #include "op_addsub.h" 11737 11738 /* Unsigned modulo arithmetic. */ 11739 #define ADD16(a, b, n) do { \ 11740 uint32_t sum; \ 11741 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 11742 RESULT(sum, n, 16); \ 11743 if ((sum >> 16) == 1) \ 11744 ge |= 3 << (n * 2); \ 11745 } while(0) 11746 11747 #define ADD8(a, b, n) do { \ 11748 uint32_t sum; \ 11749 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 11750 RESULT(sum, n, 8); \ 11751 if ((sum >> 8) == 1) \ 11752 ge |= 1 << n; \ 11753 } while(0) 11754 11755 #define SUB16(a, b, n) do { \ 11756 uint32_t sum; \ 11757 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 11758 RESULT(sum, n, 16); \ 11759 if ((sum >> 16) == 0) \ 11760 ge |= 3 << (n * 2); \ 11761 } while(0) 11762 11763 #define SUB8(a, b, n) do { \ 11764 uint32_t sum; \ 11765 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 11766 RESULT(sum, n, 8); \ 11767 if ((sum >> 8) == 0) \ 11768 ge |= 1 << n; \ 11769 } while(0) 11770 11771 #define PFX u 11772 #define ARITH_GE 11773 11774 #include "op_addsub.h" 11775 11776 /* Halved signed arithmetic. */ 11777 #define ADD16(a, b, n) \ 11778 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 11779 #define SUB16(a, b, n) \ 11780 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 11781 #define ADD8(a, b, n) \ 11782 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 11783 #define SUB8(a, b, n) \ 11784 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 11785 #define PFX sh 11786 11787 #include "op_addsub.h" 11788 11789 /* Halved unsigned arithmetic. */ 11790 #define ADD16(a, b, n) \ 11791 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11792 #define SUB16(a, b, n) \ 11793 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 11794 #define ADD8(a, b, n) \ 11795 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11796 #define SUB8(a, b, n) \ 11797 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 11798 #define PFX uh 11799 11800 #include "op_addsub.h" 11801 11802 static inline uint8_t do_usad(uint8_t a, uint8_t b) 11803 { 11804 if (a > b) 11805 return a - b; 11806 else 11807 return b - a; 11808 } 11809 11810 /* Unsigned sum of absolute byte differences. */ 11811 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 11812 { 11813 uint32_t sum; 11814 sum = do_usad(a, b); 11815 sum += do_usad(a >> 8, b >> 8); 11816 sum += do_usad(a >> 16, b >>16); 11817 sum += do_usad(a >> 24, b >> 24); 11818 return sum; 11819 } 11820 11821 /* For ARMv6 SEL instruction. */ 11822 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 11823 { 11824 uint32_t mask; 11825 11826 mask = 0; 11827 if (flags & 1) 11828 mask |= 0xff; 11829 if (flags & 2) 11830 mask |= 0xff00; 11831 if (flags & 4) 11832 mask |= 0xff0000; 11833 if (flags & 8) 11834 mask |= 0xff000000; 11835 return (a & mask) | (b & ~mask); 11836 } 11837 11838 /* CRC helpers. 11839 * The upper bytes of val (above the number specified by 'bytes') must have 11840 * been zeroed out by the caller. 11841 */ 11842 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 11843 { 11844 uint8_t buf[4]; 11845 11846 stl_le_p(buf, val); 11847 11848 /* zlib crc32 converts the accumulator and output to one's complement. */ 11849 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 11850 } 11851 11852 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 11853 { 11854 uint8_t buf[4]; 11855 11856 stl_le_p(buf, val); 11857 11858 /* Linux crc32c converts the output to one's complement. */ 11859 return crc32c(acc, buf, bytes) ^ 0xffffffff; 11860 } 11861 11862 /* Return the exception level to which FP-disabled exceptions should 11863 * be taken, or 0 if FP is enabled. 11864 */ 11865 int fp_exception_el(CPUARMState *env, int cur_el) 11866 { 11867 #ifndef CONFIG_USER_ONLY 11868 /* CPACR and the CPTR registers don't exist before v6, so FP is 11869 * always accessible 11870 */ 11871 if (!arm_feature(env, ARM_FEATURE_V6)) { 11872 return 0; 11873 } 11874 11875 if (arm_feature(env, ARM_FEATURE_M)) { 11876 /* CPACR can cause a NOCP UsageFault taken to current security state */ 11877 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { 11878 return 1; 11879 } 11880 11881 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { 11882 if (!extract32(env->v7m.nsacr, 10, 1)) { 11883 /* FP insns cause a NOCP UsageFault taken to Secure */ 11884 return 3; 11885 } 11886 } 11887 11888 return 0; 11889 } 11890 11891 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 11892 * 0, 2 : trap EL0 and EL1/PL1 accesses 11893 * 1 : trap only EL0 accesses 11894 * 3 : trap no accesses 11895 * This register is ignored if E2H+TGE are both set. 11896 */ 11897 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 11898 int fpen = extract32(env->cp15.cpacr_el1, 20, 2); 11899 11900 switch (fpen) { 11901 case 0: 11902 case 2: 11903 if (cur_el == 0 || cur_el == 1) { 11904 /* Trap to PL1, which might be EL1 or EL3 */ 11905 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 11906 return 3; 11907 } 11908 return 1; 11909 } 11910 if (cur_el == 3 && !is_a64(env)) { 11911 /* Secure PL1 running at EL3 */ 11912 return 3; 11913 } 11914 break; 11915 case 1: 11916 if (cur_el == 0) { 11917 return 1; 11918 } 11919 break; 11920 case 3: 11921 break; 11922 } 11923 } 11924 11925 /* 11926 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode 11927 * to control non-secure access to the FPU. It doesn't have any 11928 * effect if EL3 is AArch64 or if EL3 doesn't exist at all. 11929 */ 11930 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 11931 cur_el <= 2 && !arm_is_secure_below_el3(env))) { 11932 if (!extract32(env->cp15.nsacr, 10, 1)) { 11933 /* FP insns act as UNDEF */ 11934 return cur_el == 2 ? 2 : 1; 11935 } 11936 } 11937 11938 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 11939 * check because zero bits in the registers mean "don't trap". 11940 */ 11941 11942 /* CPTR_EL2 : present in v7VE or v8 */ 11943 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 11944 && !arm_is_secure_below_el3(env)) { 11945 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 11946 return 2; 11947 } 11948 11949 /* CPTR_EL3 : present in v8 */ 11950 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 11951 /* Trap all FP ops to EL3 */ 11952 return 3; 11953 } 11954 #endif 11955 return 0; 11956 } 11957 11958 /* Return the exception level we're running at if this is our mmu_idx */ 11959 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 11960 { 11961 if (mmu_idx & ARM_MMU_IDX_M) { 11962 return mmu_idx & ARM_MMU_IDX_M_PRIV; 11963 } 11964 11965 switch (mmu_idx) { 11966 case ARMMMUIdx_E10_0: 11967 case ARMMMUIdx_E20_0: 11968 case ARMMMUIdx_SE10_0: 11969 return 0; 11970 case ARMMMUIdx_E10_1: 11971 case ARMMMUIdx_E10_1_PAN: 11972 case ARMMMUIdx_SE10_1: 11973 case ARMMMUIdx_SE10_1_PAN: 11974 return 1; 11975 case ARMMMUIdx_E2: 11976 case ARMMMUIdx_E20_2: 11977 case ARMMMUIdx_E20_2_PAN: 11978 return 2; 11979 case ARMMMUIdx_SE3: 11980 return 3; 11981 default: 11982 g_assert_not_reached(); 11983 } 11984 } 11985 11986 #ifndef CONFIG_TCG 11987 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 11988 { 11989 g_assert_not_reached(); 11990 } 11991 #endif 11992 11993 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) 11994 { 11995 if (arm_feature(env, ARM_FEATURE_M)) { 11996 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 11997 } 11998 11999 /* See ARM pseudo-function ELIsInHost. */ 12000 switch (el) { 12001 case 0: 12002 if (arm_is_secure_below_el3(env)) { 12003 return ARMMMUIdx_SE10_0; 12004 } 12005 if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE) 12006 && arm_el_is_aa64(env, 2)) { 12007 return ARMMMUIdx_E20_0; 12008 } 12009 return ARMMMUIdx_E10_0; 12010 case 1: 12011 if (arm_is_secure_below_el3(env)) { 12012 if (env->pstate & PSTATE_PAN) { 12013 return ARMMMUIdx_SE10_1_PAN; 12014 } 12015 return ARMMMUIdx_SE10_1; 12016 } 12017 if (env->pstate & PSTATE_PAN) { 12018 return ARMMMUIdx_E10_1_PAN; 12019 } 12020 return ARMMMUIdx_E10_1; 12021 case 2: 12022 /* TODO: ARMv8.4-SecEL2 */ 12023 /* Note that TGE does not apply at EL2. */ 12024 if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) { 12025 if (env->pstate & PSTATE_PAN) { 12026 return ARMMMUIdx_E20_2_PAN; 12027 } 12028 return ARMMMUIdx_E20_2; 12029 } 12030 return ARMMMUIdx_E2; 12031 case 3: 12032 return ARMMMUIdx_SE3; 12033 default: 12034 g_assert_not_reached(); 12035 } 12036 } 12037 12038 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 12039 { 12040 return arm_mmu_idx_el(env, arm_current_el(env)); 12041 } 12042 12043 int cpu_mmu_index(CPUARMState *env, bool ifetch) 12044 { 12045 return arm_to_core_mmu_idx(arm_mmu_idx(env)); 12046 } 12047 12048 #ifndef CONFIG_USER_ONLY 12049 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 12050 { 12051 return stage_1_mmu_idx(arm_mmu_idx(env)); 12052 } 12053 #endif 12054 12055 static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, 12056 ARMMMUIdx mmu_idx, uint32_t flags) 12057 { 12058 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el); 12059 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, 12060 arm_to_core_mmu_idx(mmu_idx)); 12061 12062 if (arm_singlestep_active(env)) { 12063 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1); 12064 } 12065 return flags; 12066 } 12067 12068 static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, 12069 ARMMMUIdx mmu_idx, uint32_t flags) 12070 { 12071 bool sctlr_b = arm_sctlr_b(env); 12072 12073 if (sctlr_b) { 12074 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1); 12075 } 12076 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { 12077 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 12078 } 12079 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env)); 12080 12081 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 12082 } 12083 12084 static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, 12085 ARMMMUIdx mmu_idx) 12086 { 12087 uint32_t flags = 0; 12088 12089 if (arm_v7m_is_handler_mode(env)) { 12090 flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1); 12091 } 12092 12093 /* 12094 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN 12095 * is suppressing them because the requested execution priority 12096 * is less than 0. 12097 */ 12098 if (arm_feature(env, ARM_FEATURE_V8) && 12099 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 12100 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 12101 flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1); 12102 } 12103 12104 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 12105 } 12106 12107 static uint32_t rebuild_hflags_aprofile(CPUARMState *env) 12108 { 12109 int flags = 0; 12110 12111 flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, 12112 arm_debug_target_el(env)); 12113 return flags; 12114 } 12115 12116 static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, 12117 ARMMMUIdx mmu_idx) 12118 { 12119 uint32_t flags = rebuild_hflags_aprofile(env); 12120 12121 if (arm_el_is_aa64(env, 1)) { 12122 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 12123 } 12124 12125 if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && 12126 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 12127 flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1); 12128 } 12129 12130 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 12131 } 12132 12133 static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, 12134 ARMMMUIdx mmu_idx) 12135 { 12136 uint32_t flags = rebuild_hflags_aprofile(env); 12137 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); 12138 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 12139 uint64_t sctlr; 12140 int tbii, tbid; 12141 12142 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); 12143 12144 /* Get control bits for tagged addresses. */ 12145 tbid = aa64_va_parameter_tbi(tcr, mmu_idx); 12146 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); 12147 12148 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); 12149 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid); 12150 12151 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 12152 int sve_el = sve_exception_el(env, el); 12153 uint32_t zcr_len; 12154 12155 /* 12156 * If SVE is disabled, but FP is enabled, 12157 * then the effective len is 0. 12158 */ 12159 if (sve_el != 0 && fp_el == 0) { 12160 zcr_len = 0; 12161 } else { 12162 zcr_len = sve_zcr_len_for_el(env, el); 12163 } 12164 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el); 12165 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len); 12166 } 12167 12168 sctlr = regime_sctlr(env, stage1); 12169 12170 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { 12171 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 12172 } 12173 12174 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { 12175 /* 12176 * In order to save space in flags, we record only whether 12177 * pauth is "inactive", meaning all insns are implemented as 12178 * a nop, or "active" when some action must be performed. 12179 * The decision of which action to take is left to a helper. 12180 */ 12181 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { 12182 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1); 12183 } 12184 } 12185 12186 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 12187 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ 12188 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { 12189 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1); 12190 } 12191 } 12192 12193 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ 12194 if (!(env->pstate & PSTATE_UAO)) { 12195 switch (mmu_idx) { 12196 case ARMMMUIdx_E10_1: 12197 case ARMMMUIdx_E10_1_PAN: 12198 case ARMMMUIdx_SE10_1: 12199 case ARMMMUIdx_SE10_1_PAN: 12200 /* TODO: ARMv8.3-NV */ 12201 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); 12202 break; 12203 case ARMMMUIdx_E20_2: 12204 case ARMMMUIdx_E20_2_PAN: 12205 /* TODO: ARMv8.4-SecEL2 */ 12206 /* 12207 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is 12208 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. 12209 */ 12210 if (env->cp15.hcr_el2 & HCR_TGE) { 12211 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); 12212 } 12213 break; 12214 default: 12215 break; 12216 } 12217 } 12218 12219 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 12220 } 12221 12222 static uint32_t rebuild_hflags_internal(CPUARMState *env) 12223 { 12224 int el = arm_current_el(env); 12225 int fp_el = fp_exception_el(env, el); 12226 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12227 12228 if (is_a64(env)) { 12229 return rebuild_hflags_a64(env, el, fp_el, mmu_idx); 12230 } else if (arm_feature(env, ARM_FEATURE_M)) { 12231 return rebuild_hflags_m32(env, fp_el, mmu_idx); 12232 } else { 12233 return rebuild_hflags_a32(env, fp_el, mmu_idx); 12234 } 12235 } 12236 12237 void arm_rebuild_hflags(CPUARMState *env) 12238 { 12239 env->hflags = rebuild_hflags_internal(env); 12240 } 12241 12242 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) 12243 { 12244 int fp_el = fp_exception_el(env, el); 12245 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12246 12247 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 12248 } 12249 12250 /* 12251 * If we have triggered a EL state change we can't rely on the 12252 * translator having passed it too us, we need to recompute. 12253 */ 12254 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) 12255 { 12256 int el = arm_current_el(env); 12257 int fp_el = fp_exception_el(env, el); 12258 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12259 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 12260 } 12261 12262 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) 12263 { 12264 int fp_el = fp_exception_el(env, el); 12265 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12266 12267 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 12268 } 12269 12270 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) 12271 { 12272 int fp_el = fp_exception_el(env, el); 12273 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12274 12275 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); 12276 } 12277 12278 static inline void assert_hflags_rebuild_correctly(CPUARMState *env) 12279 { 12280 #ifdef CONFIG_DEBUG_TCG 12281 uint32_t env_flags_current = env->hflags; 12282 uint32_t env_flags_rebuilt = rebuild_hflags_internal(env); 12283 12284 if (unlikely(env_flags_current != env_flags_rebuilt)) { 12285 fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", 12286 env_flags_current, env_flags_rebuilt); 12287 abort(); 12288 } 12289 #endif 12290 } 12291 12292 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12293 target_ulong *cs_base, uint32_t *pflags) 12294 { 12295 uint32_t flags = env->hflags; 12296 uint32_t pstate_for_ss; 12297 12298 *cs_base = 0; 12299 assert_hflags_rebuild_correctly(env); 12300 12301 if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) { 12302 *pc = env->pc; 12303 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 12304 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype); 12305 } 12306 pstate_for_ss = env->pstate; 12307 } else { 12308 *pc = env->regs[15]; 12309 12310 if (arm_feature(env, ARM_FEATURE_M)) { 12311 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 12312 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) 12313 != env->v7m.secure) { 12314 flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1); 12315 } 12316 12317 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && 12318 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || 12319 (env->v7m.secure && 12320 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { 12321 /* 12322 * ASPEN is set, but FPCA/SFPA indicate that there is no 12323 * active FP context; we must create a new FP context before 12324 * executing any FP insn. 12325 */ 12326 flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1); 12327 } 12328 12329 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 12330 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { 12331 flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1); 12332 } 12333 } else { 12334 /* 12335 * Note that XSCALE_CPAR shares bits with VECSTRIDE. 12336 * Note that VECLEN+VECSTRIDE are RES0 for M-profile. 12337 */ 12338 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 12339 flags = FIELD_DP32(flags, TBFLAG_A32, 12340 XSCALE_CPAR, env->cp15.c15_cpar); 12341 } else { 12342 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, 12343 env->vfp.vec_len); 12344 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, 12345 env->vfp.vec_stride); 12346 } 12347 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { 12348 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 12349 } 12350 } 12351 12352 flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb); 12353 flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits); 12354 pstate_for_ss = env->uncached_cpsr; 12355 } 12356 12357 /* 12358 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12359 * states defined in the ARM ARM for software singlestep: 12360 * SS_ACTIVE PSTATE.SS State 12361 * 0 x Inactive (the TB flag for SS is always 0) 12362 * 1 0 Active-pending 12363 * 1 1 Active-not-pending 12364 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB. 12365 */ 12366 if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) && 12367 (pstate_for_ss & PSTATE_SS)) { 12368 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); 12369 } 12370 12371 *pflags = flags; 12372 } 12373 12374 #ifdef TARGET_AARCH64 12375 /* 12376 * The manual says that when SVE is enabled and VQ is widened the 12377 * implementation is allowed to zero the previously inaccessible 12378 * portion of the registers. The corollary to that is that when 12379 * SVE is enabled and VQ is narrowed we are also allowed to zero 12380 * the now inaccessible portion of the registers. 12381 * 12382 * The intent of this is that no predicate bit beyond VQ is ever set. 12383 * Which means that some operations on predicate registers themselves 12384 * may operate on full uint64_t or even unrolled across the maximum 12385 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 12386 * may well be cheaper than conditionals to restrict the operation 12387 * to the relevant portion of a uint16_t[16]. 12388 */ 12389 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 12390 { 12391 int i, j; 12392 uint64_t pmask; 12393 12394 assert(vq >= 1 && vq <= ARM_MAX_VQ); 12395 assert(vq <= env_archcpu(env)->sve_max_vq); 12396 12397 /* Zap the high bits of the zregs. */ 12398 for (i = 0; i < 32; i++) { 12399 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 12400 } 12401 12402 /* Zap the high bits of the pregs and ffr. */ 12403 pmask = 0; 12404 if (vq & 3) { 12405 pmask = ~(-1ULL << (16 * (vq & 3))); 12406 } 12407 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 12408 for (i = 0; i < 17; ++i) { 12409 env->vfp.pregs[i].p[j] &= pmask; 12410 } 12411 pmask = 0; 12412 } 12413 } 12414 12415 /* 12416 * Notice a change in SVE vector size when changing EL. 12417 */ 12418 void aarch64_sve_change_el(CPUARMState *env, int old_el, 12419 int new_el, bool el0_a64) 12420 { 12421 ARMCPU *cpu = env_archcpu(env); 12422 int old_len, new_len; 12423 bool old_a64, new_a64; 12424 12425 /* Nothing to do if no SVE. */ 12426 if (!cpu_isar_feature(aa64_sve, cpu)) { 12427 return; 12428 } 12429 12430 /* Nothing to do if FP is disabled in either EL. */ 12431 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 12432 return; 12433 } 12434 12435 /* 12436 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 12437 * at ELx, or not available because the EL is in AArch32 state, then 12438 * for all purposes other than a direct read, the ZCR_ELx.LEN field 12439 * has an effective value of 0". 12440 * 12441 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 12442 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 12443 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 12444 * we already have the correct register contents when encountering the 12445 * vq0->vq0 transition between EL0->EL1. 12446 */ 12447 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 12448 old_len = (old_a64 && !sve_exception_el(env, old_el) 12449 ? sve_zcr_len_for_el(env, old_el) : 0); 12450 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 12451 new_len = (new_a64 && !sve_exception_el(env, new_el) 12452 ? sve_zcr_len_for_el(env, new_el) : 0); 12453 12454 /* When changing vector length, clear inaccessible state. */ 12455 if (new_len < old_len) { 12456 aarch64_sve_narrow_vq(env, new_len + 1); 12457 } 12458 } 12459 #endif 12460