1 /* 2 * ARM generic helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/units.h" 11 #include "qemu/log.h" 12 #include "target/arm/idau.h" 13 #include "trace.h" 14 #include "cpu.h" 15 #include "internals.h" 16 #include "exec/helper-proto.h" 17 #include "qemu/host-utils.h" 18 #include "qemu/main-loop.h" 19 #include "qemu/timer.h" 20 #include "qemu/bitops.h" 21 #include "qemu/crc32c.h" 22 #include "qemu/qemu-print.h" 23 #include "exec/exec-all.h" 24 #include <zlib.h> /* For crc32 */ 25 #include "hw/irq.h" 26 #include "semihosting/semihost.h" 27 #include "sysemu/cpus.h" 28 #include "sysemu/cpu-timers.h" 29 #include "sysemu/kvm.h" 30 #include "qemu/range.h" 31 #include "qapi/qapi-commands-machine-target.h" 32 #include "qapi/error.h" 33 #include "qemu/guest-random.h" 34 #ifdef CONFIG_TCG 35 #include "arm_ldst.h" 36 #include "exec/cpu_ldst.h" 37 #include "semihosting/common-semi.h" 38 #endif 39 #include "cpregs.h" 40 41 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 42 #define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */ 43 44 #ifndef CONFIG_USER_ONLY 45 46 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, 47 MMUAccessType access_type, ARMMMUIdx mmu_idx, 48 bool s1_is_el0, 49 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 50 target_ulong *page_size_ptr, 51 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 52 __attribute__((nonnull)); 53 #endif 54 55 static void switch_mode(CPUARMState *env, int mode); 56 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); 57 58 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 59 { 60 assert(ri->fieldoffset); 61 if (cpreg_field_is_64bit(ri)) { 62 return CPREG_FIELD64(env, ri); 63 } else { 64 return CPREG_FIELD32(env, ri); 65 } 66 } 67 68 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 69 uint64_t value) 70 { 71 assert(ri->fieldoffset); 72 if (cpreg_field_is_64bit(ri)) { 73 CPREG_FIELD64(env, ri) = value; 74 } else { 75 CPREG_FIELD32(env, ri) = value; 76 } 77 } 78 79 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 80 { 81 return (char *)env + ri->fieldoffset; 82 } 83 84 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 85 { 86 /* Raw read of a coprocessor register (as needed for migration, etc). */ 87 if (ri->type & ARM_CP_CONST) { 88 return ri->resetvalue; 89 } else if (ri->raw_readfn) { 90 return ri->raw_readfn(env, ri); 91 } else if (ri->readfn) { 92 return ri->readfn(env, ri); 93 } else { 94 return raw_read(env, ri); 95 } 96 } 97 98 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 99 uint64_t v) 100 { 101 /* Raw write of a coprocessor register (as needed for migration, etc). 102 * Note that constant registers are treated as write-ignored; the 103 * caller should check for success by whether a readback gives the 104 * value written. 105 */ 106 if (ri->type & ARM_CP_CONST) { 107 return; 108 } else if (ri->raw_writefn) { 109 ri->raw_writefn(env, ri, v); 110 } else if (ri->writefn) { 111 ri->writefn(env, ri, v); 112 } else { 113 raw_write(env, ri, v); 114 } 115 } 116 117 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 118 { 119 /* Return true if the regdef would cause an assertion if you called 120 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 121 * program bug for it not to have the NO_RAW flag). 122 * NB that returning false here doesn't necessarily mean that calling 123 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 124 * read/write access functions which are safe for raw use" from "has 125 * read/write access functions which have side effects but has forgotten 126 * to provide raw access functions". 127 * The tests here line up with the conditions in read/write_raw_cp_reg() 128 * and assertions in raw_read()/raw_write(). 129 */ 130 if ((ri->type & ARM_CP_CONST) || 131 ri->fieldoffset || 132 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 133 return false; 134 } 135 return true; 136 } 137 138 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 139 { 140 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 141 int i; 142 bool ok = true; 143 144 for (i = 0; i < cpu->cpreg_array_len; i++) { 145 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 146 const ARMCPRegInfo *ri; 147 uint64_t newval; 148 149 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 150 if (!ri) { 151 ok = false; 152 continue; 153 } 154 if (ri->type & ARM_CP_NO_RAW) { 155 continue; 156 } 157 158 newval = read_raw_cp_reg(&cpu->env, ri); 159 if (kvm_sync) { 160 /* 161 * Only sync if the previous list->cpustate sync succeeded. 162 * Rather than tracking the success/failure state for every 163 * item in the list, we just recheck "does the raw write we must 164 * have made in write_list_to_cpustate() read back OK" here. 165 */ 166 uint64_t oldval = cpu->cpreg_values[i]; 167 168 if (oldval == newval) { 169 continue; 170 } 171 172 write_raw_cp_reg(&cpu->env, ri, oldval); 173 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 174 continue; 175 } 176 177 write_raw_cp_reg(&cpu->env, ri, newval); 178 } 179 cpu->cpreg_values[i] = newval; 180 } 181 return ok; 182 } 183 184 bool write_list_to_cpustate(ARMCPU *cpu) 185 { 186 int i; 187 bool ok = true; 188 189 for (i = 0; i < cpu->cpreg_array_len; i++) { 190 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 191 uint64_t v = cpu->cpreg_values[i]; 192 const ARMCPRegInfo *ri; 193 194 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 195 if (!ri) { 196 ok = false; 197 continue; 198 } 199 if (ri->type & ARM_CP_NO_RAW) { 200 continue; 201 } 202 /* Write value and confirm it reads back as written 203 * (to catch read-only registers and partially read-only 204 * registers where the incoming migration value doesn't match) 205 */ 206 write_raw_cp_reg(&cpu->env, ri, v); 207 if (read_raw_cp_reg(&cpu->env, ri) != v) { 208 ok = false; 209 } 210 } 211 return ok; 212 } 213 214 static void add_cpreg_to_list(gpointer key, gpointer opaque) 215 { 216 ARMCPU *cpu = opaque; 217 uint32_t regidx = (uintptr_t)key; 218 const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 219 220 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 221 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 222 /* The value array need not be initialized at this point */ 223 cpu->cpreg_array_len++; 224 } 225 } 226 227 static void count_cpreg(gpointer key, gpointer opaque) 228 { 229 ARMCPU *cpu = opaque; 230 const ARMCPRegInfo *ri; 231 232 ri = g_hash_table_lookup(cpu->cp_regs, key); 233 234 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 235 cpu->cpreg_array_len++; 236 } 237 } 238 239 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 240 { 241 uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a); 242 uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b); 243 244 if (aidx > bidx) { 245 return 1; 246 } 247 if (aidx < bidx) { 248 return -1; 249 } 250 return 0; 251 } 252 253 void init_cpreg_list(ARMCPU *cpu) 254 { 255 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 256 * Note that we require cpreg_tuples[] to be sorted by key ID. 257 */ 258 GList *keys; 259 int arraylen; 260 261 keys = g_hash_table_get_keys(cpu->cp_regs); 262 keys = g_list_sort(keys, cpreg_key_compare); 263 264 cpu->cpreg_array_len = 0; 265 266 g_list_foreach(keys, count_cpreg, cpu); 267 268 arraylen = cpu->cpreg_array_len; 269 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 270 cpu->cpreg_values = g_new(uint64_t, arraylen); 271 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 272 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 273 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 274 cpu->cpreg_array_len = 0; 275 276 g_list_foreach(keys, add_cpreg_to_list, cpu); 277 278 assert(cpu->cpreg_array_len == arraylen); 279 280 g_list_free(keys); 281 } 282 283 /* 284 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0. 285 */ 286 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 287 const ARMCPRegInfo *ri, 288 bool isread) 289 { 290 if (!is_a64(env) && arm_current_el(env) == 3 && 291 arm_is_secure_below_el3(env)) { 292 return CP_ACCESS_TRAP_UNCATEGORIZED; 293 } 294 return CP_ACCESS_OK; 295 } 296 297 /* Some secure-only AArch32 registers trap to EL3 if used from 298 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 299 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 300 * We assume that the .access field is set to PL1_RW. 301 */ 302 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 303 const ARMCPRegInfo *ri, 304 bool isread) 305 { 306 if (arm_current_el(env) == 3) { 307 return CP_ACCESS_OK; 308 } 309 if (arm_is_secure_below_el3(env)) { 310 if (env->cp15.scr_el3 & SCR_EEL2) { 311 return CP_ACCESS_TRAP_EL2; 312 } 313 return CP_ACCESS_TRAP_EL3; 314 } 315 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 316 return CP_ACCESS_TRAP_UNCATEGORIZED; 317 } 318 319 static uint64_t arm_mdcr_el2_eff(CPUARMState *env) 320 { 321 return arm_is_el2_enabled(env) ? env->cp15.mdcr_el2 : 0; 322 } 323 324 /* Check for traps to "powerdown debug" registers, which are controlled 325 * by MDCR.TDOSA 326 */ 327 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 328 bool isread) 329 { 330 int el = arm_current_el(env); 331 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 332 bool mdcr_el2_tdosa = (mdcr_el2 & MDCR_TDOSA) || (mdcr_el2 & MDCR_TDE) || 333 (arm_hcr_el2_eff(env) & HCR_TGE); 334 335 if (el < 2 && mdcr_el2_tdosa) { 336 return CP_ACCESS_TRAP_EL2; 337 } 338 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 339 return CP_ACCESS_TRAP_EL3; 340 } 341 return CP_ACCESS_OK; 342 } 343 344 /* Check for traps to "debug ROM" registers, which are controlled 345 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 346 */ 347 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 348 bool isread) 349 { 350 int el = arm_current_el(env); 351 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 352 bool mdcr_el2_tdra = (mdcr_el2 & MDCR_TDRA) || (mdcr_el2 & MDCR_TDE) || 353 (arm_hcr_el2_eff(env) & HCR_TGE); 354 355 if (el < 2 && mdcr_el2_tdra) { 356 return CP_ACCESS_TRAP_EL2; 357 } 358 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 359 return CP_ACCESS_TRAP_EL3; 360 } 361 return CP_ACCESS_OK; 362 } 363 364 /* Check for traps to general debug registers, which are controlled 365 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 366 */ 367 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 368 bool isread) 369 { 370 int el = arm_current_el(env); 371 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 372 bool mdcr_el2_tda = (mdcr_el2 & MDCR_TDA) || (mdcr_el2 & MDCR_TDE) || 373 (arm_hcr_el2_eff(env) & HCR_TGE); 374 375 if (el < 2 && mdcr_el2_tda) { 376 return CP_ACCESS_TRAP_EL2; 377 } 378 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 379 return CP_ACCESS_TRAP_EL3; 380 } 381 return CP_ACCESS_OK; 382 } 383 384 /* Check for traps to performance monitor registers, which are controlled 385 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 386 */ 387 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 388 bool isread) 389 { 390 int el = arm_current_el(env); 391 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 392 393 if (el < 2 && (mdcr_el2 & MDCR_TPM)) { 394 return CP_ACCESS_TRAP_EL2; 395 } 396 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 397 return CP_ACCESS_TRAP_EL3; 398 } 399 return CP_ACCESS_OK; 400 } 401 402 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ 403 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, 404 bool isread) 405 { 406 if (arm_current_el(env) == 1) { 407 uint64_t trap = isread ? HCR_TRVM : HCR_TVM; 408 if (arm_hcr_el2_eff(env) & trap) { 409 return CP_ACCESS_TRAP_EL2; 410 } 411 } 412 return CP_ACCESS_OK; 413 } 414 415 /* Check for traps from EL1 due to HCR_EL2.TSW. */ 416 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri, 417 bool isread) 418 { 419 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) { 420 return CP_ACCESS_TRAP_EL2; 421 } 422 return CP_ACCESS_OK; 423 } 424 425 /* Check for traps from EL1 due to HCR_EL2.TACR. */ 426 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, 427 bool isread) 428 { 429 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) { 430 return CP_ACCESS_TRAP_EL2; 431 } 432 return CP_ACCESS_OK; 433 } 434 435 /* Check for traps from EL1 due to HCR_EL2.TTLB. */ 436 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, 437 bool isread) 438 { 439 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { 440 return CP_ACCESS_TRAP_EL2; 441 } 442 return CP_ACCESS_OK; 443 } 444 445 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 446 { 447 ARMCPU *cpu = env_archcpu(env); 448 449 raw_write(env, ri, value); 450 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 451 } 452 453 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 454 { 455 ARMCPU *cpu = env_archcpu(env); 456 457 if (raw_read(env, ri) != value) { 458 /* Unlike real hardware the qemu TLB uses virtual addresses, 459 * not modified virtual addresses, so this causes a TLB flush. 460 */ 461 tlb_flush(CPU(cpu)); 462 raw_write(env, ri, value); 463 } 464 } 465 466 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 467 uint64_t value) 468 { 469 ARMCPU *cpu = env_archcpu(env); 470 471 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 472 && !extended_addresses_enabled(env)) { 473 /* For VMSA (when not using the LPAE long descriptor page table 474 * format) this register includes the ASID, so do a TLB flush. 475 * For PMSA it is purely a process ID and no action is needed. 476 */ 477 tlb_flush(CPU(cpu)); 478 } 479 raw_write(env, ri, value); 480 } 481 482 /* IS variants of TLB operations must affect all cores */ 483 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 484 uint64_t value) 485 { 486 CPUState *cs = env_cpu(env); 487 488 tlb_flush_all_cpus_synced(cs); 489 } 490 491 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 492 uint64_t value) 493 { 494 CPUState *cs = env_cpu(env); 495 496 tlb_flush_all_cpus_synced(cs); 497 } 498 499 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 500 uint64_t value) 501 { 502 CPUState *cs = env_cpu(env); 503 504 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 505 } 506 507 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 508 uint64_t value) 509 { 510 CPUState *cs = env_cpu(env); 511 512 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 513 } 514 515 /* 516 * Non-IS variants of TLB operations are upgraded to 517 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to 518 * force broadcast of these operations. 519 */ 520 static bool tlb_force_broadcast(CPUARMState *env) 521 { 522 return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB); 523 } 524 525 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 526 uint64_t value) 527 { 528 /* Invalidate all (TLBIALL) */ 529 CPUState *cs = env_cpu(env); 530 531 if (tlb_force_broadcast(env)) { 532 tlb_flush_all_cpus_synced(cs); 533 } else { 534 tlb_flush(cs); 535 } 536 } 537 538 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 539 uint64_t value) 540 { 541 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 542 CPUState *cs = env_cpu(env); 543 544 value &= TARGET_PAGE_MASK; 545 if (tlb_force_broadcast(env)) { 546 tlb_flush_page_all_cpus_synced(cs, value); 547 } else { 548 tlb_flush_page(cs, value); 549 } 550 } 551 552 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 553 uint64_t value) 554 { 555 /* Invalidate by ASID (TLBIASID) */ 556 CPUState *cs = env_cpu(env); 557 558 if (tlb_force_broadcast(env)) { 559 tlb_flush_all_cpus_synced(cs); 560 } else { 561 tlb_flush(cs); 562 } 563 } 564 565 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 566 uint64_t value) 567 { 568 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 569 CPUState *cs = env_cpu(env); 570 571 value &= TARGET_PAGE_MASK; 572 if (tlb_force_broadcast(env)) { 573 tlb_flush_page_all_cpus_synced(cs, value); 574 } else { 575 tlb_flush_page(cs, value); 576 } 577 } 578 579 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 580 uint64_t value) 581 { 582 CPUState *cs = env_cpu(env); 583 584 tlb_flush_by_mmuidx(cs, 585 ARMMMUIdxBit_E10_1 | 586 ARMMMUIdxBit_E10_1_PAN | 587 ARMMMUIdxBit_E10_0); 588 } 589 590 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 591 uint64_t value) 592 { 593 CPUState *cs = env_cpu(env); 594 595 tlb_flush_by_mmuidx_all_cpus_synced(cs, 596 ARMMMUIdxBit_E10_1 | 597 ARMMMUIdxBit_E10_1_PAN | 598 ARMMMUIdxBit_E10_0); 599 } 600 601 602 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 603 uint64_t value) 604 { 605 CPUState *cs = env_cpu(env); 606 607 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); 608 } 609 610 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 611 uint64_t value) 612 { 613 CPUState *cs = env_cpu(env); 614 615 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); 616 } 617 618 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 619 uint64_t value) 620 { 621 CPUState *cs = env_cpu(env); 622 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 623 624 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); 625 } 626 627 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 628 uint64_t value) 629 { 630 CPUState *cs = env_cpu(env); 631 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 632 633 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 634 ARMMMUIdxBit_E2); 635 } 636 637 static const ARMCPRegInfo cp_reginfo[] = { 638 /* Define the secure and non-secure FCSE identifier CP registers 639 * separately because there is no secure bank in V8 (no _EL3). This allows 640 * the secure register to be properly reset and migrated. There is also no 641 * v8 EL1 version of the register so the non-secure instance stands alone. 642 */ 643 { .name = "FCSEIDR", 644 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 645 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 646 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 647 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 648 { .name = "FCSEIDR_S", 649 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 650 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 651 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 652 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 653 /* Define the secure and non-secure context identifier CP registers 654 * separately because there is no secure bank in V8 (no _EL3). This allows 655 * the secure register to be properly reset and migrated. In the 656 * non-secure case, the 32-bit register will have reset and migration 657 * disabled during registration as it is handled by the 64-bit instance. 658 */ 659 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 660 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 661 .access = PL1_RW, .accessfn = access_tvm_trvm, 662 .secure = ARM_CP_SECSTATE_NS, 663 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 664 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 665 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 666 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 667 .access = PL1_RW, .accessfn = access_tvm_trvm, 668 .secure = ARM_CP_SECSTATE_S, 669 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 670 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 671 }; 672 673 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 674 /* NB: Some of these registers exist in v8 but with more precise 675 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 676 */ 677 /* MMU Domain access control / MPU write buffer control */ 678 { .name = "DACR", 679 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 680 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 681 .writefn = dacr_write, .raw_writefn = raw_write, 682 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 683 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 684 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 685 * For v6 and v5, these mappings are overly broad. 686 */ 687 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 688 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 689 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 690 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 691 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 692 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 693 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 694 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 695 /* Cache maintenance ops; some of this space may be overridden later. */ 696 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 697 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 698 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 699 }; 700 701 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 702 /* Not all pre-v6 cores implemented this WFI, so this is slightly 703 * over-broad. 704 */ 705 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 706 .access = PL1_W, .type = ARM_CP_WFI }, 707 }; 708 709 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 710 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 711 * is UNPREDICTABLE; we choose to NOP as most implementations do). 712 */ 713 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 714 .access = PL1_W, .type = ARM_CP_WFI }, 715 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 716 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 717 * OMAPCP will override this space. 718 */ 719 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 720 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 721 .resetvalue = 0 }, 722 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 723 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 724 .resetvalue = 0 }, 725 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 726 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 727 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 728 .resetvalue = 0 }, 729 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 730 * implementing it as RAZ means the "debug architecture version" bits 731 * will read as a reserved value, which should cause Linux to not try 732 * to use the debug hardware. 733 */ 734 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 735 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 736 /* MMU TLB control. Note that the wildcarding means we cover not just 737 * the unified TLB ops but also the dside/iside/inner-shareable variants. 738 */ 739 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 740 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 741 .type = ARM_CP_NO_RAW }, 742 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 743 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 744 .type = ARM_CP_NO_RAW }, 745 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 746 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 747 .type = ARM_CP_NO_RAW }, 748 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 749 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 750 .type = ARM_CP_NO_RAW }, 751 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 752 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 753 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 754 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 755 }; 756 757 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 758 uint64_t value) 759 { 760 uint32_t mask = 0; 761 762 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 763 if (!arm_feature(env, ARM_FEATURE_V8)) { 764 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 765 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 766 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 767 */ 768 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { 769 /* VFP coprocessor: cp10 & cp11 [23:20] */ 770 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 771 772 if (!arm_feature(env, ARM_FEATURE_NEON)) { 773 /* ASEDIS [31] bit is RAO/WI */ 774 value |= (1 << 31); 775 } 776 777 /* VFPv3 and upwards with NEON implement 32 double precision 778 * registers (D0-D31). 779 */ 780 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { 781 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 782 value |= (1 << 30); 783 } 784 } 785 value &= mask; 786 } 787 788 /* 789 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 790 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 791 */ 792 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 793 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 794 value &= ~(0xf << 20); 795 value |= env->cp15.cpacr_el1 & (0xf << 20); 796 } 797 798 env->cp15.cpacr_el1 = value; 799 } 800 801 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) 802 { 803 /* 804 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 805 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 806 */ 807 uint64_t value = env->cp15.cpacr_el1; 808 809 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 810 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 811 value &= ~(0xf << 20); 812 } 813 return value; 814 } 815 816 817 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 818 { 819 /* Call cpacr_write() so that we reset with the correct RAO bits set 820 * for our CPU features. 821 */ 822 cpacr_write(env, ri, 0); 823 } 824 825 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 826 bool isread) 827 { 828 if (arm_feature(env, ARM_FEATURE_V8)) { 829 /* Check if CPACR accesses are to be trapped to EL2 */ 830 if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) && 831 (env->cp15.cptr_el[2] & CPTR_TCPAC)) { 832 return CP_ACCESS_TRAP_EL2; 833 /* Check if CPACR accesses are to be trapped to EL3 */ 834 } else if (arm_current_el(env) < 3 && 835 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 836 return CP_ACCESS_TRAP_EL3; 837 } 838 } 839 840 return CP_ACCESS_OK; 841 } 842 843 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 844 bool isread) 845 { 846 /* Check if CPTR accesses are set to trap to EL3 */ 847 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 848 return CP_ACCESS_TRAP_EL3; 849 } 850 851 return CP_ACCESS_OK; 852 } 853 854 static const ARMCPRegInfo v6_cp_reginfo[] = { 855 /* prefetch by MVA in v6, NOP in v7 */ 856 { .name = "MVA_prefetch", 857 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 858 .access = PL1_W, .type = ARM_CP_NOP }, 859 /* We need to break the TB after ISB to execute self-modifying code 860 * correctly and also to take any pending interrupts immediately. 861 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 862 */ 863 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 864 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 865 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 866 .access = PL0_W, .type = ARM_CP_NOP }, 867 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 868 .access = PL0_W, .type = ARM_CP_NOP }, 869 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 870 .access = PL1_RW, .accessfn = access_tvm_trvm, 871 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 872 offsetof(CPUARMState, cp15.ifar_ns) }, 873 .resetvalue = 0, }, 874 /* Watchpoint Fault Address Register : should actually only be present 875 * for 1136, 1176, 11MPCore. 876 */ 877 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 878 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 879 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 880 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 881 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 882 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, 883 }; 884 885 typedef struct pm_event { 886 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 887 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 888 bool (*supported)(CPUARMState *); 889 /* 890 * Retrieve the current count of the underlying event. The programmed 891 * counters hold a difference from the return value from this function 892 */ 893 uint64_t (*get_count)(CPUARMState *); 894 /* 895 * Return how many nanoseconds it will take (at a minimum) for count events 896 * to occur. A negative value indicates the counter will never overflow, or 897 * that the counter has otherwise arranged for the overflow bit to be set 898 * and the PMU interrupt to be raised on overflow. 899 */ 900 int64_t (*ns_per_count)(uint64_t); 901 } pm_event; 902 903 static bool event_always_supported(CPUARMState *env) 904 { 905 return true; 906 } 907 908 static uint64_t swinc_get_count(CPUARMState *env) 909 { 910 /* 911 * SW_INCR events are written directly to the pmevcntr's by writes to 912 * PMSWINC, so there is no underlying count maintained by the PMU itself 913 */ 914 return 0; 915 } 916 917 static int64_t swinc_ns_per(uint64_t ignored) 918 { 919 return -1; 920 } 921 922 /* 923 * Return the underlying cycle count for the PMU cycle counters. If we're in 924 * usermode, simply return 0. 925 */ 926 static uint64_t cycles_get_count(CPUARMState *env) 927 { 928 #ifndef CONFIG_USER_ONLY 929 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 930 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 931 #else 932 return cpu_get_host_ticks(); 933 #endif 934 } 935 936 #ifndef CONFIG_USER_ONLY 937 static int64_t cycles_ns_per(uint64_t cycles) 938 { 939 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 940 } 941 942 static bool instructions_supported(CPUARMState *env) 943 { 944 return icount_enabled() == 1; /* Precise instruction counting */ 945 } 946 947 static uint64_t instructions_get_count(CPUARMState *env) 948 { 949 return (uint64_t)icount_get_raw(); 950 } 951 952 static int64_t instructions_ns_per(uint64_t icount) 953 { 954 return icount_to_ns((int64_t)icount); 955 } 956 #endif 957 958 static bool pmu_8_1_events_supported(CPUARMState *env) 959 { 960 /* For events which are supported in any v8.1 PMU */ 961 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); 962 } 963 964 static bool pmu_8_4_events_supported(CPUARMState *env) 965 { 966 /* For events which are supported in any v8.1 PMU */ 967 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); 968 } 969 970 static uint64_t zero_event_get_count(CPUARMState *env) 971 { 972 /* For events which on QEMU never fire, so their count is always zero */ 973 return 0; 974 } 975 976 static int64_t zero_event_ns_per(uint64_t cycles) 977 { 978 /* An event which never fires can never overflow */ 979 return -1; 980 } 981 982 static const pm_event pm_events[] = { 983 { .number = 0x000, /* SW_INCR */ 984 .supported = event_always_supported, 985 .get_count = swinc_get_count, 986 .ns_per_count = swinc_ns_per, 987 }, 988 #ifndef CONFIG_USER_ONLY 989 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 990 .supported = instructions_supported, 991 .get_count = instructions_get_count, 992 .ns_per_count = instructions_ns_per, 993 }, 994 { .number = 0x011, /* CPU_CYCLES, Cycle */ 995 .supported = event_always_supported, 996 .get_count = cycles_get_count, 997 .ns_per_count = cycles_ns_per, 998 }, 999 #endif 1000 { .number = 0x023, /* STALL_FRONTEND */ 1001 .supported = pmu_8_1_events_supported, 1002 .get_count = zero_event_get_count, 1003 .ns_per_count = zero_event_ns_per, 1004 }, 1005 { .number = 0x024, /* STALL_BACKEND */ 1006 .supported = pmu_8_1_events_supported, 1007 .get_count = zero_event_get_count, 1008 .ns_per_count = zero_event_ns_per, 1009 }, 1010 { .number = 0x03c, /* STALL */ 1011 .supported = pmu_8_4_events_supported, 1012 .get_count = zero_event_get_count, 1013 .ns_per_count = zero_event_ns_per, 1014 }, 1015 }; 1016 1017 /* 1018 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 1019 * events (i.e. the statistical profiling extension), this implementation 1020 * should first be updated to something sparse instead of the current 1021 * supported_event_map[] array. 1022 */ 1023 #define MAX_EVENT_ID 0x3c 1024 #define UNSUPPORTED_EVENT UINT16_MAX 1025 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 1026 1027 /* 1028 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 1029 * of ARM event numbers to indices in our pm_events array. 1030 * 1031 * Note: Events in the 0x40XX range are not currently supported. 1032 */ 1033 void pmu_init(ARMCPU *cpu) 1034 { 1035 unsigned int i; 1036 1037 /* 1038 * Empty supported_event_map and cpu->pmceid[01] before adding supported 1039 * events to them 1040 */ 1041 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 1042 supported_event_map[i] = UNSUPPORTED_EVENT; 1043 } 1044 cpu->pmceid0 = 0; 1045 cpu->pmceid1 = 0; 1046 1047 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 1048 const pm_event *cnt = &pm_events[i]; 1049 assert(cnt->number <= MAX_EVENT_ID); 1050 /* We do not currently support events in the 0x40xx range */ 1051 assert(cnt->number <= 0x3f); 1052 1053 if (cnt->supported(&cpu->env)) { 1054 supported_event_map[cnt->number] = i; 1055 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); 1056 if (cnt->number & 0x20) { 1057 cpu->pmceid1 |= event_mask; 1058 } else { 1059 cpu->pmceid0 |= event_mask; 1060 } 1061 } 1062 } 1063 } 1064 1065 /* 1066 * Check at runtime whether a PMU event is supported for the current machine 1067 */ 1068 static bool event_supported(uint16_t number) 1069 { 1070 if (number > MAX_EVENT_ID) { 1071 return false; 1072 } 1073 return supported_event_map[number] != UNSUPPORTED_EVENT; 1074 } 1075 1076 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 1077 bool isread) 1078 { 1079 /* Performance monitor registers user accessibility is controlled 1080 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 1081 * trapping to EL2 or EL3 for other accesses. 1082 */ 1083 int el = arm_current_el(env); 1084 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 1085 1086 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 1087 return CP_ACCESS_TRAP; 1088 } 1089 if (el < 2 && (mdcr_el2 & MDCR_TPM)) { 1090 return CP_ACCESS_TRAP_EL2; 1091 } 1092 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 1093 return CP_ACCESS_TRAP_EL3; 1094 } 1095 1096 return CP_ACCESS_OK; 1097 } 1098 1099 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 1100 const ARMCPRegInfo *ri, 1101 bool isread) 1102 { 1103 /* ER: event counter read trap control */ 1104 if (arm_feature(env, ARM_FEATURE_V8) 1105 && arm_current_el(env) == 0 1106 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 1107 && isread) { 1108 return CP_ACCESS_OK; 1109 } 1110 1111 return pmreg_access(env, ri, isread); 1112 } 1113 1114 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1115 const ARMCPRegInfo *ri, 1116 bool isread) 1117 { 1118 /* SW: software increment write trap control */ 1119 if (arm_feature(env, ARM_FEATURE_V8) 1120 && arm_current_el(env) == 0 1121 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1122 && !isread) { 1123 return CP_ACCESS_OK; 1124 } 1125 1126 return pmreg_access(env, ri, isread); 1127 } 1128 1129 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1130 const ARMCPRegInfo *ri, 1131 bool isread) 1132 { 1133 /* ER: event counter read trap control */ 1134 if (arm_feature(env, ARM_FEATURE_V8) 1135 && arm_current_el(env) == 0 1136 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1137 return CP_ACCESS_OK; 1138 } 1139 1140 return pmreg_access(env, ri, isread); 1141 } 1142 1143 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1144 const ARMCPRegInfo *ri, 1145 bool isread) 1146 { 1147 /* CR: cycle counter read trap control */ 1148 if (arm_feature(env, ARM_FEATURE_V8) 1149 && arm_current_el(env) == 0 1150 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1151 && isread) { 1152 return CP_ACCESS_OK; 1153 } 1154 1155 return pmreg_access(env, ri, isread); 1156 } 1157 1158 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using 1159 * the current EL, security state, and register configuration. 1160 */ 1161 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 1162 { 1163 uint64_t filter; 1164 bool e, p, u, nsk, nsu, nsh, m; 1165 bool enabled, prohibited, filtered; 1166 bool secure = arm_is_secure(env); 1167 int el = arm_current_el(env); 1168 uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); 1169 uint8_t hpmn = mdcr_el2 & MDCR_HPMN; 1170 1171 if (!arm_feature(env, ARM_FEATURE_PMU)) { 1172 return false; 1173 } 1174 1175 if (!arm_feature(env, ARM_FEATURE_EL2) || 1176 (counter < hpmn || counter == 31)) { 1177 e = env->cp15.c9_pmcr & PMCRE; 1178 } else { 1179 e = mdcr_el2 & MDCR_HPME; 1180 } 1181 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 1182 1183 if (!secure) { 1184 if (el == 2 && (counter < hpmn || counter == 31)) { 1185 prohibited = mdcr_el2 & MDCR_HPMD; 1186 } else { 1187 prohibited = false; 1188 } 1189 } else { 1190 prohibited = arm_feature(env, ARM_FEATURE_EL3) && 1191 !(env->cp15.mdcr_el3 & MDCR_SPME); 1192 } 1193 1194 if (prohibited && counter == 31) { 1195 prohibited = env->cp15.c9_pmcr & PMCRDP; 1196 } 1197 1198 if (counter == 31) { 1199 filter = env->cp15.pmccfiltr_el0; 1200 } else { 1201 filter = env->cp15.c14_pmevtyper[counter]; 1202 } 1203 1204 p = filter & PMXEVTYPER_P; 1205 u = filter & PMXEVTYPER_U; 1206 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1207 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1208 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1209 m = arm_el_is_aa64(env, 1) && 1210 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1211 1212 if (el == 0) { 1213 filtered = secure ? u : u != nsu; 1214 } else if (el == 1) { 1215 filtered = secure ? p : p != nsk; 1216 } else if (el == 2) { 1217 filtered = !nsh; 1218 } else { /* EL3 */ 1219 filtered = m != p; 1220 } 1221 1222 if (counter != 31) { 1223 /* 1224 * If not checking PMCCNTR, ensure the counter is setup to an event we 1225 * support 1226 */ 1227 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1228 if (!event_supported(event)) { 1229 return false; 1230 } 1231 } 1232 1233 return enabled && !prohibited && !filtered; 1234 } 1235 1236 static void pmu_update_irq(CPUARMState *env) 1237 { 1238 ARMCPU *cpu = env_archcpu(env); 1239 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1240 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1241 } 1242 1243 /* 1244 * Ensure c15_ccnt is the guest-visible count so that operations such as 1245 * enabling/disabling the counter or filtering, modifying the count itself, 1246 * etc. can be done logically. This is essentially a no-op if the counter is 1247 * not enabled at the time of the call. 1248 */ 1249 static void pmccntr_op_start(CPUARMState *env) 1250 { 1251 uint64_t cycles = cycles_get_count(env); 1252 1253 if (pmu_counter_enabled(env, 31)) { 1254 uint64_t eff_cycles = cycles; 1255 if (env->cp15.c9_pmcr & PMCRD) { 1256 /* Increment once every 64 processor clock cycles */ 1257 eff_cycles /= 64; 1258 } 1259 1260 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1261 1262 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1263 1ull << 63 : 1ull << 31; 1264 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1265 env->cp15.c9_pmovsr |= (1 << 31); 1266 pmu_update_irq(env); 1267 } 1268 1269 env->cp15.c15_ccnt = new_pmccntr; 1270 } 1271 env->cp15.c15_ccnt_delta = cycles; 1272 } 1273 1274 /* 1275 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1276 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1277 * pmccntr_op_start. 1278 */ 1279 static void pmccntr_op_finish(CPUARMState *env) 1280 { 1281 if (pmu_counter_enabled(env, 31)) { 1282 #ifndef CONFIG_USER_ONLY 1283 /* Calculate when the counter will next overflow */ 1284 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1285 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1286 remaining_cycles = (uint32_t)remaining_cycles; 1287 } 1288 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1289 1290 if (overflow_in > 0) { 1291 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1292 overflow_in; 1293 ARMCPU *cpu = env_archcpu(env); 1294 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1295 } 1296 #endif 1297 1298 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1299 if (env->cp15.c9_pmcr & PMCRD) { 1300 /* Increment once every 64 processor clock cycles */ 1301 prev_cycles /= 64; 1302 } 1303 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1304 } 1305 } 1306 1307 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1308 { 1309 1310 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1311 uint64_t count = 0; 1312 if (event_supported(event)) { 1313 uint16_t event_idx = supported_event_map[event]; 1314 count = pm_events[event_idx].get_count(env); 1315 } 1316 1317 if (pmu_counter_enabled(env, counter)) { 1318 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1319 1320 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { 1321 env->cp15.c9_pmovsr |= (1 << counter); 1322 pmu_update_irq(env); 1323 } 1324 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1325 } 1326 env->cp15.c14_pmevcntr_delta[counter] = count; 1327 } 1328 1329 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1330 { 1331 if (pmu_counter_enabled(env, counter)) { 1332 #ifndef CONFIG_USER_ONLY 1333 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1334 uint16_t event_idx = supported_event_map[event]; 1335 uint64_t delta = UINT32_MAX - 1336 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; 1337 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); 1338 1339 if (overflow_in > 0) { 1340 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1341 overflow_in; 1342 ARMCPU *cpu = env_archcpu(env); 1343 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1344 } 1345 #endif 1346 1347 env->cp15.c14_pmevcntr_delta[counter] -= 1348 env->cp15.c14_pmevcntr[counter]; 1349 } 1350 } 1351 1352 void pmu_op_start(CPUARMState *env) 1353 { 1354 unsigned int i; 1355 pmccntr_op_start(env); 1356 for (i = 0; i < pmu_num_counters(env); i++) { 1357 pmevcntr_op_start(env, i); 1358 } 1359 } 1360 1361 void pmu_op_finish(CPUARMState *env) 1362 { 1363 unsigned int i; 1364 pmccntr_op_finish(env); 1365 for (i = 0; i < pmu_num_counters(env); i++) { 1366 pmevcntr_op_finish(env, i); 1367 } 1368 } 1369 1370 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1371 { 1372 pmu_op_start(&cpu->env); 1373 } 1374 1375 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1376 { 1377 pmu_op_finish(&cpu->env); 1378 } 1379 1380 void arm_pmu_timer_cb(void *opaque) 1381 { 1382 ARMCPU *cpu = opaque; 1383 1384 /* 1385 * Update all the counter values based on the current underlying counts, 1386 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1387 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1388 * counter may expire. 1389 */ 1390 pmu_op_start(&cpu->env); 1391 pmu_op_finish(&cpu->env); 1392 } 1393 1394 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1395 uint64_t value) 1396 { 1397 pmu_op_start(env); 1398 1399 if (value & PMCRC) { 1400 /* The counter has been reset */ 1401 env->cp15.c15_ccnt = 0; 1402 } 1403 1404 if (value & PMCRP) { 1405 unsigned int i; 1406 for (i = 0; i < pmu_num_counters(env); i++) { 1407 env->cp15.c14_pmevcntr[i] = 0; 1408 } 1409 } 1410 1411 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; 1412 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); 1413 1414 pmu_op_finish(env); 1415 } 1416 1417 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1418 uint64_t value) 1419 { 1420 unsigned int i; 1421 for (i = 0; i < pmu_num_counters(env); i++) { 1422 /* Increment a counter's count iff: */ 1423 if ((value & (1 << i)) && /* counter's bit is set */ 1424 /* counter is enabled and not filtered */ 1425 pmu_counter_enabled(env, i) && 1426 /* counter is SW_INCR */ 1427 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1428 pmevcntr_op_start(env, i); 1429 1430 /* 1431 * Detect if this write causes an overflow since we can't predict 1432 * PMSWINC overflows like we can for other events 1433 */ 1434 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1435 1436 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { 1437 env->cp15.c9_pmovsr |= (1 << i); 1438 pmu_update_irq(env); 1439 } 1440 1441 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1442 1443 pmevcntr_op_finish(env, i); 1444 } 1445 } 1446 } 1447 1448 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1449 { 1450 uint64_t ret; 1451 pmccntr_op_start(env); 1452 ret = env->cp15.c15_ccnt; 1453 pmccntr_op_finish(env); 1454 return ret; 1455 } 1456 1457 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1458 uint64_t value) 1459 { 1460 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1461 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1462 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1463 * accessed. 1464 */ 1465 env->cp15.c9_pmselr = value & 0x1f; 1466 } 1467 1468 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1469 uint64_t value) 1470 { 1471 pmccntr_op_start(env); 1472 env->cp15.c15_ccnt = value; 1473 pmccntr_op_finish(env); 1474 } 1475 1476 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1477 uint64_t value) 1478 { 1479 uint64_t cur_val = pmccntr_read(env, NULL); 1480 1481 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1482 } 1483 1484 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1485 uint64_t value) 1486 { 1487 pmccntr_op_start(env); 1488 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1489 pmccntr_op_finish(env); 1490 } 1491 1492 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1493 uint64_t value) 1494 { 1495 pmccntr_op_start(env); 1496 /* M is not accessible from AArch32 */ 1497 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1498 (value & PMCCFILTR); 1499 pmccntr_op_finish(env); 1500 } 1501 1502 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1503 { 1504 /* M is not visible in AArch32 */ 1505 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1506 } 1507 1508 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1509 uint64_t value) 1510 { 1511 value &= pmu_counter_mask(env); 1512 env->cp15.c9_pmcnten |= value; 1513 } 1514 1515 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1516 uint64_t value) 1517 { 1518 value &= pmu_counter_mask(env); 1519 env->cp15.c9_pmcnten &= ~value; 1520 } 1521 1522 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1523 uint64_t value) 1524 { 1525 value &= pmu_counter_mask(env); 1526 env->cp15.c9_pmovsr &= ~value; 1527 pmu_update_irq(env); 1528 } 1529 1530 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1531 uint64_t value) 1532 { 1533 value &= pmu_counter_mask(env); 1534 env->cp15.c9_pmovsr |= value; 1535 pmu_update_irq(env); 1536 } 1537 1538 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1539 uint64_t value, const uint8_t counter) 1540 { 1541 if (counter == 31) { 1542 pmccfiltr_write(env, ri, value); 1543 } else if (counter < pmu_num_counters(env)) { 1544 pmevcntr_op_start(env, counter); 1545 1546 /* 1547 * If this counter's event type is changing, store the current 1548 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1549 * pmevcntr_op_finish has the correct baseline when it converts back to 1550 * a delta. 1551 */ 1552 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1553 PMXEVTYPER_EVTCOUNT; 1554 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1555 if (old_event != new_event) { 1556 uint64_t count = 0; 1557 if (event_supported(new_event)) { 1558 uint16_t event_idx = supported_event_map[new_event]; 1559 count = pm_events[event_idx].get_count(env); 1560 } 1561 env->cp15.c14_pmevcntr_delta[counter] = count; 1562 } 1563 1564 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1565 pmevcntr_op_finish(env, counter); 1566 } 1567 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1568 * PMSELR value is equal to or greater than the number of implemented 1569 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1570 */ 1571 } 1572 1573 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1574 const uint8_t counter) 1575 { 1576 if (counter == 31) { 1577 return env->cp15.pmccfiltr_el0; 1578 } else if (counter < pmu_num_counters(env)) { 1579 return env->cp15.c14_pmevtyper[counter]; 1580 } else { 1581 /* 1582 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1583 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1584 */ 1585 return 0; 1586 } 1587 } 1588 1589 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1590 uint64_t value) 1591 { 1592 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1593 pmevtyper_write(env, ri, value, counter); 1594 } 1595 1596 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1597 uint64_t value) 1598 { 1599 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1600 env->cp15.c14_pmevtyper[counter] = value; 1601 1602 /* 1603 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1604 * pmu_op_finish calls when loading saved state for a migration. Because 1605 * we're potentially updating the type of event here, the value written to 1606 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a 1607 * different counter type. Therefore, we need to set this value to the 1608 * current count for the counter type we're writing so that pmu_op_finish 1609 * has the correct count for its calculation. 1610 */ 1611 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1612 if (event_supported(event)) { 1613 uint16_t event_idx = supported_event_map[event]; 1614 env->cp15.c14_pmevcntr_delta[counter] = 1615 pm_events[event_idx].get_count(env); 1616 } 1617 } 1618 1619 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1620 { 1621 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1622 return pmevtyper_read(env, ri, counter); 1623 } 1624 1625 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1626 uint64_t value) 1627 { 1628 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1629 } 1630 1631 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1632 { 1633 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1634 } 1635 1636 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1637 uint64_t value, uint8_t counter) 1638 { 1639 if (counter < pmu_num_counters(env)) { 1640 pmevcntr_op_start(env, counter); 1641 env->cp15.c14_pmevcntr[counter] = value; 1642 pmevcntr_op_finish(env, counter); 1643 } 1644 /* 1645 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1646 * are CONSTRAINED UNPREDICTABLE. 1647 */ 1648 } 1649 1650 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1651 uint8_t counter) 1652 { 1653 if (counter < pmu_num_counters(env)) { 1654 uint64_t ret; 1655 pmevcntr_op_start(env, counter); 1656 ret = env->cp15.c14_pmevcntr[counter]; 1657 pmevcntr_op_finish(env, counter); 1658 return ret; 1659 } else { 1660 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1661 * are CONSTRAINED UNPREDICTABLE. */ 1662 return 0; 1663 } 1664 } 1665 1666 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1667 uint64_t value) 1668 { 1669 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1670 pmevcntr_write(env, ri, value, counter); 1671 } 1672 1673 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1674 { 1675 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1676 return pmevcntr_read(env, ri, counter); 1677 } 1678 1679 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1680 uint64_t value) 1681 { 1682 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1683 assert(counter < pmu_num_counters(env)); 1684 env->cp15.c14_pmevcntr[counter] = value; 1685 pmevcntr_write(env, ri, value, counter); 1686 } 1687 1688 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1689 { 1690 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1691 assert(counter < pmu_num_counters(env)); 1692 return env->cp15.c14_pmevcntr[counter]; 1693 } 1694 1695 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1696 uint64_t value) 1697 { 1698 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1699 } 1700 1701 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1702 { 1703 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1704 } 1705 1706 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1707 uint64_t value) 1708 { 1709 if (arm_feature(env, ARM_FEATURE_V8)) { 1710 env->cp15.c9_pmuserenr = value & 0xf; 1711 } else { 1712 env->cp15.c9_pmuserenr = value & 1; 1713 } 1714 } 1715 1716 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1717 uint64_t value) 1718 { 1719 /* We have no event counters so only the C bit can be changed */ 1720 value &= pmu_counter_mask(env); 1721 env->cp15.c9_pminten |= value; 1722 pmu_update_irq(env); 1723 } 1724 1725 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1726 uint64_t value) 1727 { 1728 value &= pmu_counter_mask(env); 1729 env->cp15.c9_pminten &= ~value; 1730 pmu_update_irq(env); 1731 } 1732 1733 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1734 uint64_t value) 1735 { 1736 /* Note that even though the AArch64 view of this register has bits 1737 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1738 * architectural requirements for bits which are RES0 only in some 1739 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1740 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1741 */ 1742 raw_write(env, ri, value & ~0x1FULL); 1743 } 1744 1745 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1746 { 1747 /* Begin with base v8.0 state. */ 1748 uint32_t valid_mask = 0x3fff; 1749 ARMCPU *cpu = env_archcpu(env); 1750 1751 if (ri->state == ARM_CP_STATE_AA64) { 1752 if (arm_feature(env, ARM_FEATURE_AARCH64) && 1753 !cpu_isar_feature(aa64_aa32_el1, cpu)) { 1754 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ 1755 } 1756 valid_mask &= ~SCR_NET; 1757 1758 if (cpu_isar_feature(aa64_lor, cpu)) { 1759 valid_mask |= SCR_TLOR; 1760 } 1761 if (cpu_isar_feature(aa64_pauth, cpu)) { 1762 valid_mask |= SCR_API | SCR_APK; 1763 } 1764 if (cpu_isar_feature(aa64_sel2, cpu)) { 1765 valid_mask |= SCR_EEL2; 1766 } 1767 if (cpu_isar_feature(aa64_mte, cpu)) { 1768 valid_mask |= SCR_ATA; 1769 } 1770 } else { 1771 valid_mask &= ~(SCR_RW | SCR_ST); 1772 } 1773 1774 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1775 valid_mask &= ~SCR_HCE; 1776 1777 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1778 * supported if EL2 exists. The bit is UNK/SBZP when 1779 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1780 * when EL2 is unavailable. 1781 * On ARMv8, this bit is always available. 1782 */ 1783 if (arm_feature(env, ARM_FEATURE_V7) && 1784 !arm_feature(env, ARM_FEATURE_V8)) { 1785 valid_mask &= ~SCR_SMD; 1786 } 1787 } 1788 1789 /* Clear all-context RES0 bits. */ 1790 value &= valid_mask; 1791 raw_write(env, ri, value); 1792 } 1793 1794 static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 1795 { 1796 /* 1797 * scr_write will set the RES1 bits on an AArch64-only CPU. 1798 * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. 1799 */ 1800 scr_write(env, ri, 0); 1801 } 1802 1803 static CPAccessResult access_aa64_tid2(CPUARMState *env, 1804 const ARMCPRegInfo *ri, 1805 bool isread) 1806 { 1807 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { 1808 return CP_ACCESS_TRAP_EL2; 1809 } 1810 1811 return CP_ACCESS_OK; 1812 } 1813 1814 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1815 { 1816 ARMCPU *cpu = env_archcpu(env); 1817 1818 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1819 * bank 1820 */ 1821 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1822 ri->secure & ARM_CP_SECSTATE_S); 1823 1824 return cpu->ccsidr[index]; 1825 } 1826 1827 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1828 uint64_t value) 1829 { 1830 raw_write(env, ri, value & 0xf); 1831 } 1832 1833 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1834 { 1835 CPUState *cs = env_cpu(env); 1836 bool el1 = arm_current_el(env) == 1; 1837 uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0; 1838 uint64_t ret = 0; 1839 1840 if (hcr_el2 & HCR_IMO) { 1841 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 1842 ret |= CPSR_I; 1843 } 1844 } else { 1845 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1846 ret |= CPSR_I; 1847 } 1848 } 1849 1850 if (hcr_el2 & HCR_FMO) { 1851 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 1852 ret |= CPSR_F; 1853 } 1854 } else { 1855 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1856 ret |= CPSR_F; 1857 } 1858 } 1859 1860 /* External aborts are not possible in QEMU so A bit is always clear */ 1861 return ret; 1862 } 1863 1864 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 1865 bool isread) 1866 { 1867 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { 1868 return CP_ACCESS_TRAP_EL2; 1869 } 1870 1871 return CP_ACCESS_OK; 1872 } 1873 1874 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 1875 bool isread) 1876 { 1877 if (arm_feature(env, ARM_FEATURE_V8)) { 1878 return access_aa64_tid1(env, ri, isread); 1879 } 1880 1881 return CP_ACCESS_OK; 1882 } 1883 1884 static const ARMCPRegInfo v7_cp_reginfo[] = { 1885 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1886 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1887 .access = PL1_W, .type = ARM_CP_NOP }, 1888 /* Performance monitors are implementation defined in v7, 1889 * but with an ARM recommended set of registers, which we 1890 * follow. 1891 * 1892 * Performance registers fall into three categories: 1893 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1894 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1895 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1896 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1897 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1898 */ 1899 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1900 .access = PL0_RW, .type = ARM_CP_ALIAS, 1901 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1902 .writefn = pmcntenset_write, 1903 .accessfn = pmreg_access, 1904 .raw_writefn = raw_write }, 1905 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 1906 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1907 .access = PL0_RW, .accessfn = pmreg_access, 1908 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1909 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1910 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1911 .access = PL0_RW, 1912 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1913 .accessfn = pmreg_access, 1914 .writefn = pmcntenclr_write, 1915 .type = ARM_CP_ALIAS }, 1916 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1917 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1918 .access = PL0_RW, .accessfn = pmreg_access, 1919 .type = ARM_CP_ALIAS, 1920 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 1921 .writefn = pmcntenclr_write }, 1922 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 1923 .access = PL0_RW, .type = ARM_CP_IO, 1924 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 1925 .accessfn = pmreg_access, 1926 .writefn = pmovsr_write, 1927 .raw_writefn = raw_write }, 1928 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 1929 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 1930 .access = PL0_RW, .accessfn = pmreg_access, 1931 .type = ARM_CP_ALIAS | ARM_CP_IO, 1932 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1933 .writefn = pmovsr_write, 1934 .raw_writefn = raw_write }, 1935 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 1936 .access = PL0_W, .accessfn = pmreg_access_swinc, 1937 .type = ARM_CP_NO_RAW | ARM_CP_IO, 1938 .writefn = pmswinc_write }, 1939 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 1940 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 1941 .access = PL0_W, .accessfn = pmreg_access_swinc, 1942 .type = ARM_CP_NO_RAW | ARM_CP_IO, 1943 .writefn = pmswinc_write }, 1944 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 1945 .access = PL0_RW, .type = ARM_CP_ALIAS, 1946 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 1947 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 1948 .raw_writefn = raw_write}, 1949 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 1950 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 1951 .access = PL0_RW, .accessfn = pmreg_access_selr, 1952 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 1953 .writefn = pmselr_write, .raw_writefn = raw_write, }, 1954 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 1955 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 1956 .readfn = pmccntr_read, .writefn = pmccntr_write32, 1957 .accessfn = pmreg_access_ccntr }, 1958 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 1959 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 1960 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 1961 .type = ARM_CP_IO, 1962 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 1963 .readfn = pmccntr_read, .writefn = pmccntr_write, 1964 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 1965 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 1966 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 1967 .access = PL0_RW, .accessfn = pmreg_access, 1968 .type = ARM_CP_ALIAS | ARM_CP_IO, 1969 .resetvalue = 0, }, 1970 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 1971 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 1972 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 1973 .access = PL0_RW, .accessfn = pmreg_access, 1974 .type = ARM_CP_IO, 1975 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 1976 .resetvalue = 0, }, 1977 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 1978 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 1979 .accessfn = pmreg_access, 1980 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1981 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 1982 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 1983 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 1984 .accessfn = pmreg_access, 1985 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 1986 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 1987 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 1988 .accessfn = pmreg_access_xevcntr, 1989 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 1990 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 1991 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 1992 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 1993 .accessfn = pmreg_access_xevcntr, 1994 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 1995 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 1996 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 1997 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 1998 .resetvalue = 0, 1999 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2000 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2001 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2002 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2003 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2004 .resetvalue = 0, 2005 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2006 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2007 .access = PL1_RW, .accessfn = access_tpm, 2008 .type = ARM_CP_ALIAS | ARM_CP_IO, 2009 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2010 .resetvalue = 0, 2011 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2012 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2013 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2014 .access = PL1_RW, .accessfn = access_tpm, 2015 .type = ARM_CP_IO, 2016 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2017 .writefn = pmintenset_write, .raw_writefn = raw_write, 2018 .resetvalue = 0x0 }, 2019 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2020 .access = PL1_RW, .accessfn = access_tpm, 2021 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, 2022 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2023 .writefn = pmintenclr_write, }, 2024 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2025 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2026 .access = PL1_RW, .accessfn = access_tpm, 2027 .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, 2028 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2029 .writefn = pmintenclr_write }, 2030 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2031 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2032 .access = PL1_R, 2033 .accessfn = access_aa64_tid2, 2034 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2035 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2036 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2037 .access = PL1_RW, 2038 .accessfn = access_aa64_tid2, 2039 .writefn = csselr_write, .resetvalue = 0, 2040 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2041 offsetof(CPUARMState, cp15.csselr_ns) } }, 2042 /* Auxiliary ID register: this actually has an IMPDEF value but for now 2043 * just RAZ for all cores: 2044 */ 2045 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2046 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2047 .access = PL1_R, .type = ARM_CP_CONST, 2048 .accessfn = access_aa64_tid1, 2049 .resetvalue = 0 }, 2050 /* Auxiliary fault status registers: these also are IMPDEF, and we 2051 * choose to RAZ/WI for all cores. 2052 */ 2053 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2054 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2055 .access = PL1_RW, .accessfn = access_tvm_trvm, 2056 .type = ARM_CP_CONST, .resetvalue = 0 }, 2057 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2058 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2059 .access = PL1_RW, .accessfn = access_tvm_trvm, 2060 .type = ARM_CP_CONST, .resetvalue = 0 }, 2061 /* MAIR can just read-as-written because we don't implement caches 2062 * and so don't need to care about memory attributes. 2063 */ 2064 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2065 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2066 .access = PL1_RW, .accessfn = access_tvm_trvm, 2067 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2068 .resetvalue = 0 }, 2069 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2070 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2071 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2072 .resetvalue = 0 }, 2073 /* For non-long-descriptor page tables these are PRRR and NMRR; 2074 * regardless they still act as reads-as-written for QEMU. 2075 */ 2076 /* MAIR0/1 are defined separately from their 64-bit counterpart which 2077 * allows them to assign the correct fieldoffset based on the endianness 2078 * handled in the field definitions. 2079 */ 2080 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2081 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2082 .access = PL1_RW, .accessfn = access_tvm_trvm, 2083 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2084 offsetof(CPUARMState, cp15.mair0_ns) }, 2085 .resetfn = arm_cp_reset_ignore }, 2086 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2087 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, 2088 .access = PL1_RW, .accessfn = access_tvm_trvm, 2089 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2090 offsetof(CPUARMState, cp15.mair1_ns) }, 2091 .resetfn = arm_cp_reset_ignore }, 2092 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2093 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2094 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2095 /* 32 bit ITLB invalidates */ 2096 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 2097 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2098 .writefn = tlbiall_write }, 2099 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 2100 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2101 .writefn = tlbimva_write }, 2102 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 2103 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2104 .writefn = tlbiasid_write }, 2105 /* 32 bit DTLB invalidates */ 2106 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 2107 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2108 .writefn = tlbiall_write }, 2109 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 2110 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2111 .writefn = tlbimva_write }, 2112 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 2113 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2114 .writefn = tlbiasid_write }, 2115 /* 32 bit TLB invalidates */ 2116 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 2117 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2118 .writefn = tlbiall_write }, 2119 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 2120 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2121 .writefn = tlbimva_write }, 2122 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 2123 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2124 .writefn = tlbiasid_write }, 2125 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 2126 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2127 .writefn = tlbimvaa_write }, 2128 }; 2129 2130 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 2131 /* 32 bit TLB invalidates, Inner Shareable */ 2132 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 2133 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2134 .writefn = tlbiall_is_write }, 2135 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 2136 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2137 .writefn = tlbimva_is_write }, 2138 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 2139 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2140 .writefn = tlbiasid_is_write }, 2141 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 2142 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2143 .writefn = tlbimvaa_is_write }, 2144 }; 2145 2146 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2147 /* PMOVSSET is not implemented in v7 before v7ve */ 2148 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2149 .access = PL0_RW, .accessfn = pmreg_access, 2150 .type = ARM_CP_ALIAS | ARM_CP_IO, 2151 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2152 .writefn = pmovsset_write, 2153 .raw_writefn = raw_write }, 2154 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2155 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2156 .access = PL0_RW, .accessfn = pmreg_access, 2157 .type = ARM_CP_ALIAS | ARM_CP_IO, 2158 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2159 .writefn = pmovsset_write, 2160 .raw_writefn = raw_write }, 2161 }; 2162 2163 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2164 uint64_t value) 2165 { 2166 value &= 1; 2167 env->teecr = value; 2168 } 2169 2170 static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2171 bool isread) 2172 { 2173 /* 2174 * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE 2175 * at all, so we don't need to check whether we're v8A. 2176 */ 2177 if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && 2178 (env->cp15.hstr_el2 & HSTR_TTEE)) { 2179 return CP_ACCESS_TRAP_EL2; 2180 } 2181 return CP_ACCESS_OK; 2182 } 2183 2184 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2185 bool isread) 2186 { 2187 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2188 return CP_ACCESS_TRAP; 2189 } 2190 return teecr_access(env, ri, isread); 2191 } 2192 2193 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2194 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2195 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2196 .resetvalue = 0, 2197 .writefn = teecr_write, .accessfn = teecr_access }, 2198 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2199 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2200 .accessfn = teehbr_access, .resetvalue = 0 }, 2201 }; 2202 2203 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2204 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2205 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2206 .access = PL0_RW, 2207 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2208 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2209 .access = PL0_RW, 2210 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2211 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2212 .resetfn = arm_cp_reset_ignore }, 2213 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2214 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2215 .access = PL0_R|PL1_W, 2216 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2217 .resetvalue = 0}, 2218 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2219 .access = PL0_R|PL1_W, 2220 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2221 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2222 .resetfn = arm_cp_reset_ignore }, 2223 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2224 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2225 .access = PL1_RW, 2226 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2227 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2228 .access = PL1_RW, 2229 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2230 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2231 .resetvalue = 0 }, 2232 }; 2233 2234 #ifndef CONFIG_USER_ONLY 2235 2236 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2237 bool isread) 2238 { 2239 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2240 * Writable only at the highest implemented exception level. 2241 */ 2242 int el = arm_current_el(env); 2243 uint64_t hcr; 2244 uint32_t cntkctl; 2245 2246 switch (el) { 2247 case 0: 2248 hcr = arm_hcr_el2_eff(env); 2249 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2250 cntkctl = env->cp15.cnthctl_el2; 2251 } else { 2252 cntkctl = env->cp15.c14_cntkctl; 2253 } 2254 if (!extract32(cntkctl, 0, 2)) { 2255 return CP_ACCESS_TRAP; 2256 } 2257 break; 2258 case 1: 2259 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2260 arm_is_secure_below_el3(env)) { 2261 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2262 return CP_ACCESS_TRAP_UNCATEGORIZED; 2263 } 2264 break; 2265 case 2: 2266 case 3: 2267 break; 2268 } 2269 2270 if (!isread && el < arm_highest_el(env)) { 2271 return CP_ACCESS_TRAP_UNCATEGORIZED; 2272 } 2273 2274 return CP_ACCESS_OK; 2275 } 2276 2277 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2278 bool isread) 2279 { 2280 unsigned int cur_el = arm_current_el(env); 2281 bool has_el2 = arm_is_el2_enabled(env); 2282 uint64_t hcr = arm_hcr_el2_eff(env); 2283 2284 switch (cur_el) { 2285 case 0: 2286 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ 2287 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2288 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) 2289 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2290 } 2291 2292 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ 2293 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2294 return CP_ACCESS_TRAP; 2295 } 2296 2297 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */ 2298 if (hcr & HCR_E2H) { 2299 if (timeridx == GTIMER_PHYS && 2300 !extract32(env->cp15.cnthctl_el2, 10, 1)) { 2301 return CP_ACCESS_TRAP_EL2; 2302 } 2303 } else { 2304 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2305 if (has_el2 && timeridx == GTIMER_PHYS && 2306 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 2307 return CP_ACCESS_TRAP_EL2; 2308 } 2309 } 2310 break; 2311 2312 case 1: 2313 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ 2314 if (has_el2 && timeridx == GTIMER_PHYS && 2315 (hcr & HCR_E2H 2316 ? !extract32(env->cp15.cnthctl_el2, 10, 1) 2317 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { 2318 return CP_ACCESS_TRAP_EL2; 2319 } 2320 break; 2321 } 2322 return CP_ACCESS_OK; 2323 } 2324 2325 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2326 bool isread) 2327 { 2328 unsigned int cur_el = arm_current_el(env); 2329 bool has_el2 = arm_is_el2_enabled(env); 2330 uint64_t hcr = arm_hcr_el2_eff(env); 2331 2332 switch (cur_el) { 2333 case 0: 2334 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2335 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ 2336 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) 2337 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2338 } 2339 2340 /* 2341 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from 2342 * EL0 if EL0[PV]TEN is zero. 2343 */ 2344 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2345 return CP_ACCESS_TRAP; 2346 } 2347 /* fall through */ 2348 2349 case 1: 2350 if (has_el2 && timeridx == GTIMER_PHYS) { 2351 if (hcr & HCR_E2H) { 2352 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ 2353 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { 2354 return CP_ACCESS_TRAP_EL2; 2355 } 2356 } else { 2357 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2358 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { 2359 return CP_ACCESS_TRAP_EL2; 2360 } 2361 } 2362 } 2363 break; 2364 } 2365 return CP_ACCESS_OK; 2366 } 2367 2368 static CPAccessResult gt_pct_access(CPUARMState *env, 2369 const ARMCPRegInfo *ri, 2370 bool isread) 2371 { 2372 return gt_counter_access(env, GTIMER_PHYS, isread); 2373 } 2374 2375 static CPAccessResult gt_vct_access(CPUARMState *env, 2376 const ARMCPRegInfo *ri, 2377 bool isread) 2378 { 2379 return gt_counter_access(env, GTIMER_VIRT, isread); 2380 } 2381 2382 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2383 bool isread) 2384 { 2385 return gt_timer_access(env, GTIMER_PHYS, isread); 2386 } 2387 2388 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2389 bool isread) 2390 { 2391 return gt_timer_access(env, GTIMER_VIRT, isread); 2392 } 2393 2394 static CPAccessResult gt_stimer_access(CPUARMState *env, 2395 const ARMCPRegInfo *ri, 2396 bool isread) 2397 { 2398 /* The AArch64 register view of the secure physical timer is 2399 * always accessible from EL3, and configurably accessible from 2400 * Secure EL1. 2401 */ 2402 switch (arm_current_el(env)) { 2403 case 1: 2404 if (!arm_is_secure(env)) { 2405 return CP_ACCESS_TRAP; 2406 } 2407 if (!(env->cp15.scr_el3 & SCR_ST)) { 2408 return CP_ACCESS_TRAP_EL3; 2409 } 2410 return CP_ACCESS_OK; 2411 case 0: 2412 case 2: 2413 return CP_ACCESS_TRAP; 2414 case 3: 2415 return CP_ACCESS_OK; 2416 default: 2417 g_assert_not_reached(); 2418 } 2419 } 2420 2421 static uint64_t gt_get_countervalue(CPUARMState *env) 2422 { 2423 ARMCPU *cpu = env_archcpu(env); 2424 2425 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); 2426 } 2427 2428 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2429 { 2430 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2431 2432 if (gt->ctl & 1) { 2433 /* Timer enabled: calculate and set current ISTATUS, irq, and 2434 * reset timer to when ISTATUS next has to change 2435 */ 2436 uint64_t offset = timeridx == GTIMER_VIRT ? 2437 cpu->env.cp15.cntvoff_el2 : 0; 2438 uint64_t count = gt_get_countervalue(&cpu->env); 2439 /* Note that this must be unsigned 64 bit arithmetic: */ 2440 int istatus = count - offset >= gt->cval; 2441 uint64_t nexttick; 2442 int irqstate; 2443 2444 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2445 2446 irqstate = (istatus && !(gt->ctl & 2)); 2447 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2448 2449 if (istatus) { 2450 /* Next transition is when count rolls back over to zero */ 2451 nexttick = UINT64_MAX; 2452 } else { 2453 /* Next transition is when we hit cval */ 2454 nexttick = gt->cval + offset; 2455 } 2456 /* Note that the desired next expiry time might be beyond the 2457 * signed-64-bit range of a QEMUTimer -- in this case we just 2458 * set the timer for as far in the future as possible. When the 2459 * timer expires we will reset the timer for any remaining period. 2460 */ 2461 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { 2462 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); 2463 } else { 2464 timer_mod(cpu->gt_timer[timeridx], nexttick); 2465 } 2466 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 2467 } else { 2468 /* Timer disabled: ISTATUS and timer output always clear */ 2469 gt->ctl &= ~4; 2470 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 2471 timer_del(cpu->gt_timer[timeridx]); 2472 trace_arm_gt_recalc_disabled(timeridx); 2473 } 2474 } 2475 2476 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2477 int timeridx) 2478 { 2479 ARMCPU *cpu = env_archcpu(env); 2480 2481 timer_del(cpu->gt_timer[timeridx]); 2482 } 2483 2484 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2485 { 2486 return gt_get_countervalue(env); 2487 } 2488 2489 static uint64_t gt_virt_cnt_offset(CPUARMState *env) 2490 { 2491 uint64_t hcr; 2492 2493 switch (arm_current_el(env)) { 2494 case 2: 2495 hcr = arm_hcr_el2_eff(env); 2496 if (hcr & HCR_E2H) { 2497 return 0; 2498 } 2499 break; 2500 case 0: 2501 hcr = arm_hcr_el2_eff(env); 2502 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2503 return 0; 2504 } 2505 break; 2506 } 2507 2508 return env->cp15.cntvoff_el2; 2509 } 2510 2511 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2512 { 2513 return gt_get_countervalue(env) - gt_virt_cnt_offset(env); 2514 } 2515 2516 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2517 int timeridx, 2518 uint64_t value) 2519 { 2520 trace_arm_gt_cval_write(timeridx, value); 2521 env->cp15.c14_timer[timeridx].cval = value; 2522 gt_recalc_timer(env_archcpu(env), timeridx); 2523 } 2524 2525 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2526 int timeridx) 2527 { 2528 uint64_t offset = 0; 2529 2530 switch (timeridx) { 2531 case GTIMER_VIRT: 2532 case GTIMER_HYPVIRT: 2533 offset = gt_virt_cnt_offset(env); 2534 break; 2535 } 2536 2537 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2538 (gt_get_countervalue(env) - offset)); 2539 } 2540 2541 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2542 int timeridx, 2543 uint64_t value) 2544 { 2545 uint64_t offset = 0; 2546 2547 switch (timeridx) { 2548 case GTIMER_VIRT: 2549 case GTIMER_HYPVIRT: 2550 offset = gt_virt_cnt_offset(env); 2551 break; 2552 } 2553 2554 trace_arm_gt_tval_write(timeridx, value); 2555 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2556 sextract64(value, 0, 32); 2557 gt_recalc_timer(env_archcpu(env), timeridx); 2558 } 2559 2560 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2561 int timeridx, 2562 uint64_t value) 2563 { 2564 ARMCPU *cpu = env_archcpu(env); 2565 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2566 2567 trace_arm_gt_ctl_write(timeridx, value); 2568 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2569 if ((oldval ^ value) & 1) { 2570 /* Enable toggled */ 2571 gt_recalc_timer(cpu, timeridx); 2572 } else if ((oldval ^ value) & 2) { 2573 /* IMASK toggled: don't need to recalculate, 2574 * just set the interrupt line based on ISTATUS 2575 */ 2576 int irqstate = (oldval & 4) && !(value & 2); 2577 2578 trace_arm_gt_imask_toggle(timeridx, irqstate); 2579 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2580 } 2581 } 2582 2583 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2584 { 2585 gt_timer_reset(env, ri, GTIMER_PHYS); 2586 } 2587 2588 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2589 uint64_t value) 2590 { 2591 gt_cval_write(env, ri, GTIMER_PHYS, value); 2592 } 2593 2594 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2595 { 2596 return gt_tval_read(env, ri, GTIMER_PHYS); 2597 } 2598 2599 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2600 uint64_t value) 2601 { 2602 gt_tval_write(env, ri, GTIMER_PHYS, value); 2603 } 2604 2605 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2606 uint64_t value) 2607 { 2608 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2609 } 2610 2611 static int gt_phys_redir_timeridx(CPUARMState *env) 2612 { 2613 switch (arm_mmu_idx(env)) { 2614 case ARMMMUIdx_E20_0: 2615 case ARMMMUIdx_E20_2: 2616 case ARMMMUIdx_E20_2_PAN: 2617 case ARMMMUIdx_SE20_0: 2618 case ARMMMUIdx_SE20_2: 2619 case ARMMMUIdx_SE20_2_PAN: 2620 return GTIMER_HYP; 2621 default: 2622 return GTIMER_PHYS; 2623 } 2624 } 2625 2626 static int gt_virt_redir_timeridx(CPUARMState *env) 2627 { 2628 switch (arm_mmu_idx(env)) { 2629 case ARMMMUIdx_E20_0: 2630 case ARMMMUIdx_E20_2: 2631 case ARMMMUIdx_E20_2_PAN: 2632 case ARMMMUIdx_SE20_0: 2633 case ARMMMUIdx_SE20_2: 2634 case ARMMMUIdx_SE20_2_PAN: 2635 return GTIMER_HYPVIRT; 2636 default: 2637 return GTIMER_VIRT; 2638 } 2639 } 2640 2641 static uint64_t gt_phys_redir_cval_read(CPUARMState *env, 2642 const ARMCPRegInfo *ri) 2643 { 2644 int timeridx = gt_phys_redir_timeridx(env); 2645 return env->cp15.c14_timer[timeridx].cval; 2646 } 2647 2648 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2649 uint64_t value) 2650 { 2651 int timeridx = gt_phys_redir_timeridx(env); 2652 gt_cval_write(env, ri, timeridx, value); 2653 } 2654 2655 static uint64_t gt_phys_redir_tval_read(CPUARMState *env, 2656 const ARMCPRegInfo *ri) 2657 { 2658 int timeridx = gt_phys_redir_timeridx(env); 2659 return gt_tval_read(env, ri, timeridx); 2660 } 2661 2662 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2663 uint64_t value) 2664 { 2665 int timeridx = gt_phys_redir_timeridx(env); 2666 gt_tval_write(env, ri, timeridx, value); 2667 } 2668 2669 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, 2670 const ARMCPRegInfo *ri) 2671 { 2672 int timeridx = gt_phys_redir_timeridx(env); 2673 return env->cp15.c14_timer[timeridx].ctl; 2674 } 2675 2676 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2677 uint64_t value) 2678 { 2679 int timeridx = gt_phys_redir_timeridx(env); 2680 gt_ctl_write(env, ri, timeridx, value); 2681 } 2682 2683 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2684 { 2685 gt_timer_reset(env, ri, GTIMER_VIRT); 2686 } 2687 2688 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2689 uint64_t value) 2690 { 2691 gt_cval_write(env, ri, GTIMER_VIRT, value); 2692 } 2693 2694 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2695 { 2696 return gt_tval_read(env, ri, GTIMER_VIRT); 2697 } 2698 2699 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2700 uint64_t value) 2701 { 2702 gt_tval_write(env, ri, GTIMER_VIRT, value); 2703 } 2704 2705 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2706 uint64_t value) 2707 { 2708 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2709 } 2710 2711 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2712 uint64_t value) 2713 { 2714 ARMCPU *cpu = env_archcpu(env); 2715 2716 trace_arm_gt_cntvoff_write(value); 2717 raw_write(env, ri, value); 2718 gt_recalc_timer(cpu, GTIMER_VIRT); 2719 } 2720 2721 static uint64_t gt_virt_redir_cval_read(CPUARMState *env, 2722 const ARMCPRegInfo *ri) 2723 { 2724 int timeridx = gt_virt_redir_timeridx(env); 2725 return env->cp15.c14_timer[timeridx].cval; 2726 } 2727 2728 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2729 uint64_t value) 2730 { 2731 int timeridx = gt_virt_redir_timeridx(env); 2732 gt_cval_write(env, ri, timeridx, value); 2733 } 2734 2735 static uint64_t gt_virt_redir_tval_read(CPUARMState *env, 2736 const ARMCPRegInfo *ri) 2737 { 2738 int timeridx = gt_virt_redir_timeridx(env); 2739 return gt_tval_read(env, ri, timeridx); 2740 } 2741 2742 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2743 uint64_t value) 2744 { 2745 int timeridx = gt_virt_redir_timeridx(env); 2746 gt_tval_write(env, ri, timeridx, value); 2747 } 2748 2749 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, 2750 const ARMCPRegInfo *ri) 2751 { 2752 int timeridx = gt_virt_redir_timeridx(env); 2753 return env->cp15.c14_timer[timeridx].ctl; 2754 } 2755 2756 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2757 uint64_t value) 2758 { 2759 int timeridx = gt_virt_redir_timeridx(env); 2760 gt_ctl_write(env, ri, timeridx, value); 2761 } 2762 2763 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2764 { 2765 gt_timer_reset(env, ri, GTIMER_HYP); 2766 } 2767 2768 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2769 uint64_t value) 2770 { 2771 gt_cval_write(env, ri, GTIMER_HYP, value); 2772 } 2773 2774 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2775 { 2776 return gt_tval_read(env, ri, GTIMER_HYP); 2777 } 2778 2779 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2780 uint64_t value) 2781 { 2782 gt_tval_write(env, ri, GTIMER_HYP, value); 2783 } 2784 2785 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2786 uint64_t value) 2787 { 2788 gt_ctl_write(env, ri, GTIMER_HYP, value); 2789 } 2790 2791 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2792 { 2793 gt_timer_reset(env, ri, GTIMER_SEC); 2794 } 2795 2796 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2797 uint64_t value) 2798 { 2799 gt_cval_write(env, ri, GTIMER_SEC, value); 2800 } 2801 2802 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2803 { 2804 return gt_tval_read(env, ri, GTIMER_SEC); 2805 } 2806 2807 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2808 uint64_t value) 2809 { 2810 gt_tval_write(env, ri, GTIMER_SEC, value); 2811 } 2812 2813 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2814 uint64_t value) 2815 { 2816 gt_ctl_write(env, ri, GTIMER_SEC, value); 2817 } 2818 2819 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2820 { 2821 gt_timer_reset(env, ri, GTIMER_HYPVIRT); 2822 } 2823 2824 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2825 uint64_t value) 2826 { 2827 gt_cval_write(env, ri, GTIMER_HYPVIRT, value); 2828 } 2829 2830 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2831 { 2832 return gt_tval_read(env, ri, GTIMER_HYPVIRT); 2833 } 2834 2835 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2836 uint64_t value) 2837 { 2838 gt_tval_write(env, ri, GTIMER_HYPVIRT, value); 2839 } 2840 2841 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2842 uint64_t value) 2843 { 2844 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); 2845 } 2846 2847 void arm_gt_ptimer_cb(void *opaque) 2848 { 2849 ARMCPU *cpu = opaque; 2850 2851 gt_recalc_timer(cpu, GTIMER_PHYS); 2852 } 2853 2854 void arm_gt_vtimer_cb(void *opaque) 2855 { 2856 ARMCPU *cpu = opaque; 2857 2858 gt_recalc_timer(cpu, GTIMER_VIRT); 2859 } 2860 2861 void arm_gt_htimer_cb(void *opaque) 2862 { 2863 ARMCPU *cpu = opaque; 2864 2865 gt_recalc_timer(cpu, GTIMER_HYP); 2866 } 2867 2868 void arm_gt_stimer_cb(void *opaque) 2869 { 2870 ARMCPU *cpu = opaque; 2871 2872 gt_recalc_timer(cpu, GTIMER_SEC); 2873 } 2874 2875 void arm_gt_hvtimer_cb(void *opaque) 2876 { 2877 ARMCPU *cpu = opaque; 2878 2879 gt_recalc_timer(cpu, GTIMER_HYPVIRT); 2880 } 2881 2882 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) 2883 { 2884 ARMCPU *cpu = env_archcpu(env); 2885 2886 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; 2887 } 2888 2889 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2890 /* Note that CNTFRQ is purely reads-as-written for the benefit 2891 * of software; writing it doesn't actually change the timer frequency. 2892 * Our reset value matches the fixed frequency we implement the timer at. 2893 */ 2894 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 2895 .type = ARM_CP_ALIAS, 2896 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2897 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 2898 }, 2899 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2900 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2901 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2902 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2903 .resetfn = arm_gt_cntfrq_reset, 2904 }, 2905 /* overall control: mostly access permissions */ 2906 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2907 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2908 .access = PL1_RW, 2909 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2910 .resetvalue = 0, 2911 }, 2912 /* per-timer control */ 2913 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2914 .secure = ARM_CP_SECSTATE_NS, 2915 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 2916 .accessfn = gt_ptimer_access, 2917 .fieldoffset = offsetoflow32(CPUARMState, 2918 cp15.c14_timer[GTIMER_PHYS].ctl), 2919 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 2920 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 2921 }, 2922 { .name = "CNTP_CTL_S", 2923 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2924 .secure = ARM_CP_SECSTATE_S, 2925 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 2926 .accessfn = gt_ptimer_access, 2927 .fieldoffset = offsetoflow32(CPUARMState, 2928 cp15.c14_timer[GTIMER_SEC].ctl), 2929 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2930 }, 2931 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 2932 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 2933 .type = ARM_CP_IO, .access = PL0_RW, 2934 .accessfn = gt_ptimer_access, 2935 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 2936 .resetvalue = 0, 2937 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 2938 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 2939 }, 2940 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 2941 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 2942 .accessfn = gt_vtimer_access, 2943 .fieldoffset = offsetoflow32(CPUARMState, 2944 cp15.c14_timer[GTIMER_VIRT].ctl), 2945 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 2946 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 2947 }, 2948 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 2949 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 2950 .type = ARM_CP_IO, .access = PL0_RW, 2951 .accessfn = gt_vtimer_access, 2952 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 2953 .resetvalue = 0, 2954 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 2955 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 2956 }, 2957 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 2958 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2959 .secure = ARM_CP_SECSTATE_NS, 2960 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2961 .accessfn = gt_ptimer_access, 2962 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 2963 }, 2964 { .name = "CNTP_TVAL_S", 2965 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2966 .secure = ARM_CP_SECSTATE_S, 2967 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2968 .accessfn = gt_ptimer_access, 2969 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 2970 }, 2971 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2972 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 2973 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2974 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 2975 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 2976 }, 2977 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 2978 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2979 .accessfn = gt_vtimer_access, 2980 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 2981 }, 2982 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2983 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 2984 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 2985 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 2986 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 2987 }, 2988 /* The counter itself */ 2989 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 2990 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2991 .accessfn = gt_pct_access, 2992 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 2993 }, 2994 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 2995 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 2996 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2997 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 2998 }, 2999 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 3000 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3001 .accessfn = gt_vct_access, 3002 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 3003 }, 3004 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3005 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3006 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3007 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 3008 }, 3009 /* Comparison value, indicating when the timer goes off */ 3010 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 3011 .secure = ARM_CP_SECSTATE_NS, 3012 .access = PL0_RW, 3013 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3014 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3015 .accessfn = gt_ptimer_access, 3016 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3017 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3018 }, 3019 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 3020 .secure = ARM_CP_SECSTATE_S, 3021 .access = PL0_RW, 3022 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3023 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3024 .accessfn = gt_ptimer_access, 3025 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3026 }, 3027 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3028 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 3029 .access = PL0_RW, 3030 .type = ARM_CP_IO, 3031 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3032 .resetvalue = 0, .accessfn = gt_ptimer_access, 3033 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3034 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3035 }, 3036 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 3037 .access = PL0_RW, 3038 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3039 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3040 .accessfn = gt_vtimer_access, 3041 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3042 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3043 }, 3044 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3045 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 3046 .access = PL0_RW, 3047 .type = ARM_CP_IO, 3048 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3049 .resetvalue = 0, .accessfn = gt_vtimer_access, 3050 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3051 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3052 }, 3053 /* Secure timer -- this is actually restricted to only EL3 3054 * and configurably Secure-EL1 via the accessfn. 3055 */ 3056 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 3057 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 3058 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 3059 .accessfn = gt_stimer_access, 3060 .readfn = gt_sec_tval_read, 3061 .writefn = gt_sec_tval_write, 3062 .resetfn = gt_sec_timer_reset, 3063 }, 3064 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 3065 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 3066 .type = ARM_CP_IO, .access = PL1_RW, 3067 .accessfn = gt_stimer_access, 3068 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 3069 .resetvalue = 0, 3070 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3071 }, 3072 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 3073 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 3074 .type = ARM_CP_IO, .access = PL1_RW, 3075 .accessfn = gt_stimer_access, 3076 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3077 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3078 }, 3079 }; 3080 3081 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, 3082 bool isread) 3083 { 3084 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { 3085 return CP_ACCESS_TRAP; 3086 } 3087 return CP_ACCESS_OK; 3088 } 3089 3090 #else 3091 3092 /* In user-mode most of the generic timer registers are inaccessible 3093 * however modern kernels (4.12+) allow access to cntvct_el0 3094 */ 3095 3096 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 3097 { 3098 ARMCPU *cpu = env_archcpu(env); 3099 3100 /* Currently we have no support for QEMUTimer in linux-user so we 3101 * can't call gt_get_countervalue(env), instead we directly 3102 * call the lower level functions. 3103 */ 3104 return cpu_get_clock() / gt_cntfrq_period_ns(cpu); 3105 } 3106 3107 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3108 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3109 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3110 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 3111 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3112 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 3113 }, 3114 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3115 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3116 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3117 .readfn = gt_virt_cnt_read, 3118 }, 3119 }; 3120 3121 #endif 3122 3123 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3124 { 3125 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3126 raw_write(env, ri, value); 3127 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3128 raw_write(env, ri, value & 0xfffff6ff); 3129 } else { 3130 raw_write(env, ri, value & 0xfffff1ff); 3131 } 3132 } 3133 3134 #ifndef CONFIG_USER_ONLY 3135 /* get_phys_addr() isn't present for user-mode-only targets */ 3136 3137 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 3138 bool isread) 3139 { 3140 if (ri->opc2 & 4) { 3141 /* The ATS12NSO* operations must trap to EL3 or EL2 if executed in 3142 * Secure EL1 (which can only happen if EL3 is AArch64). 3143 * They are simply UNDEF if executed from NS EL1. 3144 * They function normally from EL2 or EL3. 3145 */ 3146 if (arm_current_el(env) == 1) { 3147 if (arm_is_secure_below_el3(env)) { 3148 if (env->cp15.scr_el3 & SCR_EEL2) { 3149 return CP_ACCESS_TRAP_UNCATEGORIZED_EL2; 3150 } 3151 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 3152 } 3153 return CP_ACCESS_TRAP_UNCATEGORIZED; 3154 } 3155 } 3156 return CP_ACCESS_OK; 3157 } 3158 3159 #ifdef CONFIG_TCG 3160 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 3161 MMUAccessType access_type, ARMMMUIdx mmu_idx) 3162 { 3163 hwaddr phys_addr; 3164 target_ulong page_size; 3165 int prot; 3166 bool ret; 3167 uint64_t par64; 3168 bool format64 = false; 3169 MemTxAttrs attrs = {}; 3170 ARMMMUFaultInfo fi = {}; 3171 ARMCacheAttrs cacheattrs = {}; 3172 3173 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 3174 &prot, &page_size, &fi, &cacheattrs); 3175 3176 if (ret) { 3177 /* 3178 * Some kinds of translation fault must cause exceptions rather 3179 * than being reported in the PAR. 3180 */ 3181 int current_el = arm_current_el(env); 3182 int target_el; 3183 uint32_t syn, fsr, fsc; 3184 bool take_exc = false; 3185 3186 if (fi.s1ptw && current_el == 1 3187 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 3188 /* 3189 * Synchronous stage 2 fault on an access made as part of the 3190 * translation table walk for AT S1E0* or AT S1E1* insn 3191 * executed from NS EL1. If this is a synchronous external abort 3192 * and SCR_EL3.EA == 1, then we take a synchronous external abort 3193 * to EL3. Otherwise the fault is taken as an exception to EL2, 3194 * and HPFAR_EL2 holds the faulting IPA. 3195 */ 3196 if (fi.type == ARMFault_SyncExternalOnWalk && 3197 (env->cp15.scr_el3 & SCR_EA)) { 3198 target_el = 3; 3199 } else { 3200 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; 3201 if (arm_is_secure_below_el3(env) && fi.s1ns) { 3202 env->cp15.hpfar_el2 |= HPFAR_NS; 3203 } 3204 target_el = 2; 3205 } 3206 take_exc = true; 3207 } else if (fi.type == ARMFault_SyncExternalOnWalk) { 3208 /* 3209 * Synchronous external aborts during a translation table walk 3210 * are taken as Data Abort exceptions. 3211 */ 3212 if (fi.stage2) { 3213 if (current_el == 3) { 3214 target_el = 3; 3215 } else { 3216 target_el = 2; 3217 } 3218 } else { 3219 target_el = exception_target_el(env); 3220 } 3221 take_exc = true; 3222 } 3223 3224 if (take_exc) { 3225 /* Construct FSR and FSC using same logic as arm_deliver_fault() */ 3226 if (target_el == 2 || arm_el_is_aa64(env, target_el) || 3227 arm_s1_regime_using_lpae_format(env, mmu_idx)) { 3228 fsr = arm_fi_to_lfsc(&fi); 3229 fsc = extract32(fsr, 0, 6); 3230 } else { 3231 fsr = arm_fi_to_sfsc(&fi); 3232 fsc = 0x3f; 3233 } 3234 /* 3235 * Report exception with ESR indicating a fault due to a 3236 * translation table walk for a cache maintenance instruction. 3237 */ 3238 syn = syn_data_abort_no_iss(current_el == target_el, 0, 3239 fi.ea, 1, fi.s1ptw, 1, fsc); 3240 env->exception.vaddress = value; 3241 env->exception.fsr = fsr; 3242 raise_exception(env, EXCP_DATA_ABORT, syn, target_el); 3243 } 3244 } 3245 3246 if (is_a64(env)) { 3247 format64 = true; 3248 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 3249 /* 3250 * ATS1Cxx: 3251 * * TTBCR.EAE determines whether the result is returned using the 3252 * 32-bit or the 64-bit PAR format 3253 * * Instructions executed in Hyp mode always use the 64bit format 3254 * 3255 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 3256 * * The Non-secure TTBCR.EAE bit is set to 1 3257 * * The implementation includes EL2, and the value of HCR.VM is 1 3258 * 3259 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 3260 * 3261 * ATS1Hx always uses the 64bit format. 3262 */ 3263 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 3264 3265 if (arm_feature(env, ARM_FEATURE_EL2)) { 3266 if (mmu_idx == ARMMMUIdx_E10_0 || 3267 mmu_idx == ARMMMUIdx_E10_1 || 3268 mmu_idx == ARMMMUIdx_E10_1_PAN) { 3269 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 3270 } else { 3271 format64 |= arm_current_el(env) == 2; 3272 } 3273 } 3274 } 3275 3276 if (format64) { 3277 /* Create a 64-bit PAR */ 3278 par64 = (1 << 11); /* LPAE bit always set */ 3279 if (!ret) { 3280 par64 |= phys_addr & ~0xfffULL; 3281 if (!attrs.secure) { 3282 par64 |= (1 << 9); /* NS */ 3283 } 3284 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 3285 par64 |= cacheattrs.shareability << 7; /* SH */ 3286 } else { 3287 uint32_t fsr = arm_fi_to_lfsc(&fi); 3288 3289 par64 |= 1; /* F */ 3290 par64 |= (fsr & 0x3f) << 1; /* FS */ 3291 if (fi.stage2) { 3292 par64 |= (1 << 9); /* S */ 3293 } 3294 if (fi.s1ptw) { 3295 par64 |= (1 << 8); /* PTW */ 3296 } 3297 } 3298 } else { 3299 /* fsr is a DFSR/IFSR value for the short descriptor 3300 * translation table format (with WnR always clear). 3301 * Convert it to a 32-bit PAR. 3302 */ 3303 if (!ret) { 3304 /* We do not set any attribute bits in the PAR */ 3305 if (page_size == (1 << 24) 3306 && arm_feature(env, ARM_FEATURE_V7)) { 3307 par64 = (phys_addr & 0xff000000) | (1 << 1); 3308 } else { 3309 par64 = phys_addr & 0xfffff000; 3310 } 3311 if (!attrs.secure) { 3312 par64 |= (1 << 9); /* NS */ 3313 } 3314 } else { 3315 uint32_t fsr = arm_fi_to_sfsc(&fi); 3316 3317 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3318 ((fsr & 0xf) << 1) | 1; 3319 } 3320 } 3321 return par64; 3322 } 3323 #endif /* CONFIG_TCG */ 3324 3325 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3326 { 3327 #ifdef CONFIG_TCG 3328 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3329 uint64_t par64; 3330 ARMMMUIdx mmu_idx; 3331 int el = arm_current_el(env); 3332 bool secure = arm_is_secure_below_el3(env); 3333 3334 switch (ri->opc2 & 6) { 3335 case 0: 3336 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ 3337 switch (el) { 3338 case 3: 3339 mmu_idx = ARMMMUIdx_SE3; 3340 break; 3341 case 2: 3342 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ 3343 /* fall through */ 3344 case 1: 3345 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { 3346 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN 3347 : ARMMMUIdx_Stage1_E1_PAN); 3348 } else { 3349 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1; 3350 } 3351 break; 3352 default: 3353 g_assert_not_reached(); 3354 } 3355 break; 3356 case 2: 3357 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3358 switch (el) { 3359 case 3: 3360 mmu_idx = ARMMMUIdx_SE10_0; 3361 break; 3362 case 2: 3363 g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ 3364 mmu_idx = ARMMMUIdx_Stage1_E0; 3365 break; 3366 case 1: 3367 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0; 3368 break; 3369 default: 3370 g_assert_not_reached(); 3371 } 3372 break; 3373 case 4: 3374 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3375 mmu_idx = ARMMMUIdx_E10_1; 3376 break; 3377 case 6: 3378 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3379 mmu_idx = ARMMMUIdx_E10_0; 3380 break; 3381 default: 3382 g_assert_not_reached(); 3383 } 3384 3385 par64 = do_ats_write(env, value, access_type, mmu_idx); 3386 3387 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3388 #else 3389 /* Handled by hardware accelerator. */ 3390 g_assert_not_reached(); 3391 #endif /* CONFIG_TCG */ 3392 } 3393 3394 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3395 uint64_t value) 3396 { 3397 #ifdef CONFIG_TCG 3398 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3399 uint64_t par64; 3400 3401 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); 3402 3403 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3404 #else 3405 /* Handled by hardware accelerator. */ 3406 g_assert_not_reached(); 3407 #endif /* CONFIG_TCG */ 3408 } 3409 3410 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3411 bool isread) 3412 { 3413 if (arm_current_el(env) == 3 && 3414 !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { 3415 return CP_ACCESS_TRAP; 3416 } 3417 return CP_ACCESS_OK; 3418 } 3419 3420 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3421 uint64_t value) 3422 { 3423 #ifdef CONFIG_TCG 3424 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3425 ARMMMUIdx mmu_idx; 3426 int secure = arm_is_secure_below_el3(env); 3427 3428 switch (ri->opc2 & 6) { 3429 case 0: 3430 switch (ri->opc1) { 3431 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ 3432 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { 3433 mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN 3434 : ARMMMUIdx_Stage1_E1_PAN); 3435 } else { 3436 mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1; 3437 } 3438 break; 3439 case 4: /* AT S1E2R, AT S1E2W */ 3440 mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2; 3441 break; 3442 case 6: /* AT S1E3R, AT S1E3W */ 3443 mmu_idx = ARMMMUIdx_SE3; 3444 break; 3445 default: 3446 g_assert_not_reached(); 3447 } 3448 break; 3449 case 2: /* AT S1E0R, AT S1E0W */ 3450 mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0; 3451 break; 3452 case 4: /* AT S12E1R, AT S12E1W */ 3453 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; 3454 break; 3455 case 6: /* AT S12E0R, AT S12E0W */ 3456 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; 3457 break; 3458 default: 3459 g_assert_not_reached(); 3460 } 3461 3462 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 3463 #else 3464 /* Handled by hardware accelerator. */ 3465 g_assert_not_reached(); 3466 #endif /* CONFIG_TCG */ 3467 } 3468 #endif 3469 3470 static const ARMCPRegInfo vapa_cp_reginfo[] = { 3471 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 3472 .access = PL1_RW, .resetvalue = 0, 3473 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 3474 offsetoflow32(CPUARMState, cp15.par_ns) }, 3475 .writefn = par_write }, 3476 #ifndef CONFIG_USER_ONLY 3477 /* This underdecoding is safe because the reginfo is NO_RAW. */ 3478 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 3479 .access = PL1_W, .accessfn = ats_access, 3480 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 3481 #endif 3482 }; 3483 3484 /* Return basic MPU access permission bits. */ 3485 static uint32_t simple_mpu_ap_bits(uint32_t val) 3486 { 3487 uint32_t ret; 3488 uint32_t mask; 3489 int i; 3490 ret = 0; 3491 mask = 3; 3492 for (i = 0; i < 16; i += 2) { 3493 ret |= (val >> i) & mask; 3494 mask <<= 2; 3495 } 3496 return ret; 3497 } 3498 3499 /* Pad basic MPU access permission bits to extended format. */ 3500 static uint32_t extended_mpu_ap_bits(uint32_t val) 3501 { 3502 uint32_t ret; 3503 uint32_t mask; 3504 int i; 3505 ret = 0; 3506 mask = 3; 3507 for (i = 0; i < 16; i += 2) { 3508 ret |= (val & mask) << i; 3509 mask <<= 2; 3510 } 3511 return ret; 3512 } 3513 3514 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3515 uint64_t value) 3516 { 3517 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3518 } 3519 3520 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3521 { 3522 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3523 } 3524 3525 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3526 uint64_t value) 3527 { 3528 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3529 } 3530 3531 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3532 { 3533 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3534 } 3535 3536 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3537 { 3538 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3539 3540 if (!u32p) { 3541 return 0; 3542 } 3543 3544 u32p += env->pmsav7.rnr[M_REG_NS]; 3545 return *u32p; 3546 } 3547 3548 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3549 uint64_t value) 3550 { 3551 ARMCPU *cpu = env_archcpu(env); 3552 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3553 3554 if (!u32p) { 3555 return; 3556 } 3557 3558 u32p += env->pmsav7.rnr[M_REG_NS]; 3559 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3560 *u32p = value; 3561 } 3562 3563 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3564 uint64_t value) 3565 { 3566 ARMCPU *cpu = env_archcpu(env); 3567 uint32_t nrgs = cpu->pmsav7_dregion; 3568 3569 if (value >= nrgs) { 3570 qemu_log_mask(LOG_GUEST_ERROR, 3571 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3572 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3573 return; 3574 } 3575 3576 raw_write(env, ri, value); 3577 } 3578 3579 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3580 /* Reset for all these registers is handled in arm_cpu_reset(), 3581 * because the PMSAv7 is also used by M-profile CPUs, which do 3582 * not register cpregs but still need the state to be reset. 3583 */ 3584 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 3585 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3586 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 3587 .readfn = pmsav7_read, .writefn = pmsav7_write, 3588 .resetfn = arm_cp_reset_ignore }, 3589 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 3590 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3591 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 3592 .readfn = pmsav7_read, .writefn = pmsav7_write, 3593 .resetfn = arm_cp_reset_ignore }, 3594 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 3595 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3596 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 3597 .readfn = pmsav7_read, .writefn = pmsav7_write, 3598 .resetfn = arm_cp_reset_ignore }, 3599 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 3600 .access = PL1_RW, 3601 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 3602 .writefn = pmsav7_rgnr_write, 3603 .resetfn = arm_cp_reset_ignore }, 3604 }; 3605 3606 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 3607 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3608 .access = PL1_RW, .type = ARM_CP_ALIAS, 3609 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3610 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 3611 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3612 .access = PL1_RW, .type = ARM_CP_ALIAS, 3613 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3614 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 3615 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 3616 .access = PL1_RW, 3617 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3618 .resetvalue = 0, }, 3619 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 3620 .access = PL1_RW, 3621 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3622 .resetvalue = 0, }, 3623 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 3624 .access = PL1_RW, 3625 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 3626 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 3627 .access = PL1_RW, 3628 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 3629 /* Protection region base and size registers */ 3630 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 3631 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3632 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 3633 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 3634 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3635 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 3636 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 3637 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3638 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 3639 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 3640 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3641 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 3642 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 3643 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3644 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 3645 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 3646 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3647 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 3648 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 3649 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3650 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 3651 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 3652 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3653 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 3654 }; 3655 3656 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 3657 uint64_t value) 3658 { 3659 TCR *tcr = raw_ptr(env, ri); 3660 int maskshift = extract32(value, 0, 3); 3661 3662 if (!arm_feature(env, ARM_FEATURE_V8)) { 3663 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 3664 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 3665 * using Long-desciptor translation table format */ 3666 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 3667 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 3668 /* In an implementation that includes the Security Extensions 3669 * TTBCR has additional fields PD0 [4] and PD1 [5] for 3670 * Short-descriptor translation table format. 3671 */ 3672 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 3673 } else { 3674 value &= TTBCR_N; 3675 } 3676 } 3677 3678 /* Update the masks corresponding to the TCR bank being written 3679 * Note that we always calculate mask and base_mask, but 3680 * they are only used for short-descriptor tables (ie if EAE is 0); 3681 * for long-descriptor tables the TCR fields are used differently 3682 * and the mask and base_mask values are meaningless. 3683 */ 3684 tcr->raw_tcr = value; 3685 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 3686 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 3687 } 3688 3689 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3690 uint64_t value) 3691 { 3692 ARMCPU *cpu = env_archcpu(env); 3693 TCR *tcr = raw_ptr(env, ri); 3694 3695 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3696 /* With LPAE the TTBCR could result in a change of ASID 3697 * via the TTBCR.A1 bit, so do a TLB flush. 3698 */ 3699 tlb_flush(CPU(cpu)); 3700 } 3701 /* Preserve the high half of TCR_EL1, set via TTBCR2. */ 3702 value = deposit64(tcr->raw_tcr, 0, 32, value); 3703 vmsa_ttbcr_raw_write(env, ri, value); 3704 } 3705 3706 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3707 { 3708 TCR *tcr = raw_ptr(env, ri); 3709 3710 /* Reset both the TCR as well as the masks corresponding to the bank of 3711 * the TCR being reset. 3712 */ 3713 tcr->raw_tcr = 0; 3714 tcr->mask = 0; 3715 tcr->base_mask = 0xffffc000u; 3716 } 3717 3718 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, 3719 uint64_t value) 3720 { 3721 ARMCPU *cpu = env_archcpu(env); 3722 TCR *tcr = raw_ptr(env, ri); 3723 3724 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 3725 tlb_flush(CPU(cpu)); 3726 tcr->raw_tcr = value; 3727 } 3728 3729 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3730 uint64_t value) 3731 { 3732 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3733 if (cpreg_field_is_64bit(ri) && 3734 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3735 ARMCPU *cpu = env_archcpu(env); 3736 tlb_flush(CPU(cpu)); 3737 } 3738 raw_write(env, ri, value); 3739 } 3740 3741 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3742 uint64_t value) 3743 { 3744 /* 3745 * If we are running with E2&0 regime, then an ASID is active. 3746 * Flush if that might be changing. Note we're not checking 3747 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that 3748 * holds the active ASID, only checking the field that might. 3749 */ 3750 if (extract64(raw_read(env, ri) ^ value, 48, 16) && 3751 (arm_hcr_el2_eff(env) & HCR_E2H)) { 3752 uint16_t mask = ARMMMUIdxBit_E20_2 | 3753 ARMMMUIdxBit_E20_2_PAN | 3754 ARMMMUIdxBit_E20_0; 3755 3756 if (arm_is_secure_below_el3(env)) { 3757 mask >>= ARM_MMU_IDX_A_NS; 3758 } 3759 3760 tlb_flush_by_mmuidx(env_cpu(env), mask); 3761 } 3762 raw_write(env, ri, value); 3763 } 3764 3765 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3766 uint64_t value) 3767 { 3768 ARMCPU *cpu = env_archcpu(env); 3769 CPUState *cs = CPU(cpu); 3770 3771 /* 3772 * A change in VMID to the stage2 page table (Stage2) invalidates 3773 * the combined stage 1&2 tlbs (EL10_1 and EL10_0). 3774 */ 3775 if (raw_read(env, ri) != value) { 3776 uint16_t mask = ARMMMUIdxBit_E10_1 | 3777 ARMMMUIdxBit_E10_1_PAN | 3778 ARMMMUIdxBit_E10_0; 3779 3780 if (arm_is_secure_below_el3(env)) { 3781 mask >>= ARM_MMU_IDX_A_NS; 3782 } 3783 3784 tlb_flush_by_mmuidx(cs, mask); 3785 raw_write(env, ri, value); 3786 } 3787 } 3788 3789 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 3790 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3791 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, 3792 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 3793 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 3794 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3795 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 3796 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 3797 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 3798 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 3799 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 3800 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 3801 offsetof(CPUARMState, cp15.dfar_ns) } }, 3802 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 3803 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 3804 .access = PL1_RW, .accessfn = access_tvm_trvm, 3805 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 3806 .resetvalue = 0, }, 3807 }; 3808 3809 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 3810 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 3811 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 3812 .access = PL1_RW, .accessfn = access_tvm_trvm, 3813 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 3814 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 3815 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 3816 .access = PL1_RW, .accessfn = access_tvm_trvm, 3817 .writefn = vmsa_ttbr_write, .resetvalue = 0, 3818 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3819 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 3820 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 3821 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 3822 .access = PL1_RW, .accessfn = access_tvm_trvm, 3823 .writefn = vmsa_ttbr_write, .resetvalue = 0, 3824 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3825 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 3826 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 3827 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3828 .access = PL1_RW, .accessfn = access_tvm_trvm, 3829 .writefn = vmsa_tcr_el12_write, 3830 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 3831 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 3832 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3833 .access = PL1_RW, .accessfn = access_tvm_trvm, 3834 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 3835 .raw_writefn = vmsa_ttbcr_raw_write, 3836 /* No offsetoflow32 -- pass the entire TCR to writefn/raw_writefn. */ 3837 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.tcr_el[3]), 3838 offsetof(CPUARMState, cp15.tcr_el[1])} }, 3839 }; 3840 3841 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing 3842 * qemu tlbs nor adjusting cached masks. 3843 */ 3844 static const ARMCPRegInfo ttbcr2_reginfo = { 3845 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 3846 .access = PL1_RW, .accessfn = access_tvm_trvm, 3847 .type = ARM_CP_ALIAS, 3848 .bank_fieldoffsets = { 3849 offsetofhigh32(CPUARMState, cp15.tcr_el[3].raw_tcr), 3850 offsetofhigh32(CPUARMState, cp15.tcr_el[1].raw_tcr), 3851 }, 3852 }; 3853 3854 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 3855 uint64_t value) 3856 { 3857 env->cp15.c15_ticonfig = value & 0xe7; 3858 /* The OS_TYPE bit in this register changes the reported CPUID! */ 3859 env->cp15.c0_cpuid = (value & (1 << 5)) ? 3860 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 3861 } 3862 3863 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 3864 uint64_t value) 3865 { 3866 env->cp15.c15_threadid = value & 0xffff; 3867 } 3868 3869 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 3870 uint64_t value) 3871 { 3872 /* Wait-for-interrupt (deprecated) */ 3873 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 3874 } 3875 3876 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 3877 uint64_t value) 3878 { 3879 /* On OMAP there are registers indicating the max/min index of dcache lines 3880 * containing a dirty line; cache flush operations have to reset these. 3881 */ 3882 env->cp15.c15_i_max = 0x000; 3883 env->cp15.c15_i_min = 0xff0; 3884 } 3885 3886 static const ARMCPRegInfo omap_cp_reginfo[] = { 3887 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 3888 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 3889 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 3890 .resetvalue = 0, }, 3891 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 3892 .access = PL1_RW, .type = ARM_CP_NOP }, 3893 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 3894 .access = PL1_RW, 3895 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 3896 .writefn = omap_ticonfig_write }, 3897 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 3898 .access = PL1_RW, 3899 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 3900 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 3901 .access = PL1_RW, .resetvalue = 0xff0, 3902 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 3903 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 3904 .access = PL1_RW, 3905 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 3906 .writefn = omap_threadid_write }, 3907 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 3908 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 3909 .type = ARM_CP_NO_RAW, 3910 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 3911 /* TODO: Peripheral port remap register: 3912 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 3913 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 3914 * when MMU is off. 3915 */ 3916 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 3917 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 3918 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 3919 .writefn = omap_cachemaint_write }, 3920 { .name = "C9", .cp = 15, .crn = 9, 3921 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 3922 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 3923 }; 3924 3925 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 3926 uint64_t value) 3927 { 3928 env->cp15.c15_cpar = value & 0x3fff; 3929 } 3930 3931 static const ARMCPRegInfo xscale_cp_reginfo[] = { 3932 { .name = "XSCALE_CPAR", 3933 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 3934 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 3935 .writefn = xscale_cpar_write, }, 3936 { .name = "XSCALE_AUXCR", 3937 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 3938 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 3939 .resetvalue = 0, }, 3940 /* XScale specific cache-lockdown: since we have no cache we NOP these 3941 * and hope the guest does not really rely on cache behaviour. 3942 */ 3943 { .name = "XSCALE_LOCK_ICACHE_LINE", 3944 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 3945 .access = PL1_W, .type = ARM_CP_NOP }, 3946 { .name = "XSCALE_UNLOCK_ICACHE", 3947 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 3948 .access = PL1_W, .type = ARM_CP_NOP }, 3949 { .name = "XSCALE_DCACHE_LOCK", 3950 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 3951 .access = PL1_RW, .type = ARM_CP_NOP }, 3952 { .name = "XSCALE_UNLOCK_DCACHE", 3953 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 3954 .access = PL1_W, .type = ARM_CP_NOP }, 3955 }; 3956 3957 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 3958 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 3959 * implementation of this implementation-defined space. 3960 * Ideally this should eventually disappear in favour of actually 3961 * implementing the correct behaviour for all cores. 3962 */ 3963 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 3964 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 3965 .access = PL1_RW, 3966 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 3967 .resetvalue = 0 }, 3968 }; 3969 3970 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 3971 /* Cache status: RAZ because we have no cache so it's always clean */ 3972 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 3973 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3974 .resetvalue = 0 }, 3975 }; 3976 3977 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 3978 /* We never have a a block transfer operation in progress */ 3979 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 3980 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3981 .resetvalue = 0 }, 3982 /* The cache ops themselves: these all NOP for QEMU */ 3983 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 3984 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3985 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 3986 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3987 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 3988 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3989 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 3990 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3991 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 3992 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3993 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 3994 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3995 }; 3996 3997 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 3998 /* The cache test-and-clean instructions always return (1 << 30) 3999 * to indicate that there are no dirty cache lines. 4000 */ 4001 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 4002 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4003 .resetvalue = (1 << 30) }, 4004 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 4005 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4006 .resetvalue = (1 << 30) }, 4007 }; 4008 4009 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 4010 /* Ignore ReadBuffer accesses */ 4011 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 4012 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4013 .access = PL1_RW, .resetvalue = 0, 4014 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 4015 }; 4016 4017 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4018 { 4019 unsigned int cur_el = arm_current_el(env); 4020 4021 if (arm_is_el2_enabled(env) && cur_el == 1) { 4022 return env->cp15.vpidr_el2; 4023 } 4024 return raw_read(env, ri); 4025 } 4026 4027 static uint64_t mpidr_read_val(CPUARMState *env) 4028 { 4029 ARMCPU *cpu = env_archcpu(env); 4030 uint64_t mpidr = cpu->mp_affinity; 4031 4032 if (arm_feature(env, ARM_FEATURE_V7MP)) { 4033 mpidr |= (1U << 31); 4034 /* Cores which are uniprocessor (non-coherent) 4035 * but still implement the MP extensions set 4036 * bit 30. (For instance, Cortex-R5). 4037 */ 4038 if (cpu->mp_is_up) { 4039 mpidr |= (1u << 30); 4040 } 4041 } 4042 return mpidr; 4043 } 4044 4045 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4046 { 4047 unsigned int cur_el = arm_current_el(env); 4048 4049 if (arm_is_el2_enabled(env) && cur_el == 1) { 4050 return env->cp15.vmpidr_el2; 4051 } 4052 return mpidr_read_val(env); 4053 } 4054 4055 static const ARMCPRegInfo lpae_cp_reginfo[] = { 4056 /* NOP AMAIR0/1 */ 4057 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 4058 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 4059 .access = PL1_RW, .accessfn = access_tvm_trvm, 4060 .type = ARM_CP_CONST, .resetvalue = 0 }, 4061 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 4062 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 4063 .access = PL1_RW, .accessfn = access_tvm_trvm, 4064 .type = ARM_CP_CONST, .resetvalue = 0 }, 4065 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 4066 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 4067 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 4068 offsetof(CPUARMState, cp15.par_ns)} }, 4069 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 4070 .access = PL1_RW, .accessfn = access_tvm_trvm, 4071 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4072 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4073 offsetof(CPUARMState, cp15.ttbr0_ns) }, 4074 .writefn = vmsa_ttbr_write, }, 4075 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 4076 .access = PL1_RW, .accessfn = access_tvm_trvm, 4077 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4078 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4079 offsetof(CPUARMState, cp15.ttbr1_ns) }, 4080 .writefn = vmsa_ttbr_write, }, 4081 }; 4082 4083 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4084 { 4085 return vfp_get_fpcr(env); 4086 } 4087 4088 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4089 uint64_t value) 4090 { 4091 vfp_set_fpcr(env, value); 4092 } 4093 4094 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4095 { 4096 return vfp_get_fpsr(env); 4097 } 4098 4099 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4100 uint64_t value) 4101 { 4102 vfp_set_fpsr(env, value); 4103 } 4104 4105 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 4106 bool isread) 4107 { 4108 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { 4109 return CP_ACCESS_TRAP; 4110 } 4111 return CP_ACCESS_OK; 4112 } 4113 4114 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 4115 uint64_t value) 4116 { 4117 env->daif = value & PSTATE_DAIF; 4118 } 4119 4120 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) 4121 { 4122 return env->pstate & PSTATE_PAN; 4123 } 4124 4125 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, 4126 uint64_t value) 4127 { 4128 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); 4129 } 4130 4131 static const ARMCPRegInfo pan_reginfo = { 4132 .name = "PAN", .state = ARM_CP_STATE_AA64, 4133 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, 4134 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4135 .readfn = aa64_pan_read, .writefn = aa64_pan_write 4136 }; 4137 4138 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) 4139 { 4140 return env->pstate & PSTATE_UAO; 4141 } 4142 4143 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, 4144 uint64_t value) 4145 { 4146 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); 4147 } 4148 4149 static const ARMCPRegInfo uao_reginfo = { 4150 .name = "UAO", .state = ARM_CP_STATE_AA64, 4151 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, 4152 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4153 .readfn = aa64_uao_read, .writefn = aa64_uao_write 4154 }; 4155 4156 static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri) 4157 { 4158 return env->pstate & PSTATE_DIT; 4159 } 4160 4161 static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri, 4162 uint64_t value) 4163 { 4164 env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); 4165 } 4166 4167 static const ARMCPRegInfo dit_reginfo = { 4168 .name = "DIT", .state = ARM_CP_STATE_AA64, 4169 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5, 4170 .type = ARM_CP_NO_RAW, .access = PL0_RW, 4171 .readfn = aa64_dit_read, .writefn = aa64_dit_write 4172 }; 4173 4174 static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri) 4175 { 4176 return env->pstate & PSTATE_SSBS; 4177 } 4178 4179 static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri, 4180 uint64_t value) 4181 { 4182 env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); 4183 } 4184 4185 static const ARMCPRegInfo ssbs_reginfo = { 4186 .name = "SSBS", .state = ARM_CP_STATE_AA64, 4187 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6, 4188 .type = ARM_CP_NO_RAW, .access = PL0_RW, 4189 .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write 4190 }; 4191 4192 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, 4193 const ARMCPRegInfo *ri, 4194 bool isread) 4195 { 4196 /* Cache invalidate/clean to Point of Coherency or Persistence... */ 4197 switch (arm_current_el(env)) { 4198 case 0: 4199 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ 4200 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4201 return CP_ACCESS_TRAP; 4202 } 4203 /* fall through */ 4204 case 1: 4205 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ 4206 if (arm_hcr_el2_eff(env) & HCR_TPCP) { 4207 return CP_ACCESS_TRAP_EL2; 4208 } 4209 break; 4210 } 4211 return CP_ACCESS_OK; 4212 } 4213 4214 static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, 4215 const ARMCPRegInfo *ri, 4216 bool isread) 4217 { 4218 /* Cache invalidate/clean to Point of Unification... */ 4219 switch (arm_current_el(env)) { 4220 case 0: 4221 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ 4222 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4223 return CP_ACCESS_TRAP; 4224 } 4225 /* fall through */ 4226 case 1: 4227 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */ 4228 if (arm_hcr_el2_eff(env) & HCR_TPU) { 4229 return CP_ACCESS_TRAP_EL2; 4230 } 4231 break; 4232 } 4233 return CP_ACCESS_OK; 4234 } 4235 4236 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 4237 * Page D4-1736 (DDI0487A.b) 4238 */ 4239 4240 static int vae1_tlbmask(CPUARMState *env) 4241 { 4242 uint64_t hcr = arm_hcr_el2_eff(env); 4243 uint16_t mask; 4244 4245 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4246 mask = ARMMMUIdxBit_E20_2 | 4247 ARMMMUIdxBit_E20_2_PAN | 4248 ARMMMUIdxBit_E20_0; 4249 } else { 4250 mask = ARMMMUIdxBit_E10_1 | 4251 ARMMMUIdxBit_E10_1_PAN | 4252 ARMMMUIdxBit_E10_0; 4253 } 4254 4255 if (arm_is_secure_below_el3(env)) { 4256 mask >>= ARM_MMU_IDX_A_NS; 4257 } 4258 4259 return mask; 4260 } 4261 4262 /* Return 56 if TBI is enabled, 64 otherwise. */ 4263 static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, 4264 uint64_t addr) 4265 { 4266 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 4267 int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 4268 int select = extract64(addr, 55, 1); 4269 4270 return (tbi >> select) & 1 ? 56 : 64; 4271 } 4272 4273 static int vae1_tlbbits(CPUARMState *env, uint64_t addr) 4274 { 4275 uint64_t hcr = arm_hcr_el2_eff(env); 4276 ARMMMUIdx mmu_idx; 4277 4278 /* Only the regime of the mmu_idx below is significant. */ 4279 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4280 mmu_idx = ARMMMUIdx_E20_0; 4281 } else { 4282 mmu_idx = ARMMMUIdx_E10_0; 4283 } 4284 4285 if (arm_is_secure_below_el3(env)) { 4286 mmu_idx &= ~ARM_MMU_IDX_A_NS; 4287 } 4288 4289 return tlbbits_for_regime(env, mmu_idx, addr); 4290 } 4291 4292 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4293 uint64_t value) 4294 { 4295 CPUState *cs = env_cpu(env); 4296 int mask = vae1_tlbmask(env); 4297 4298 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4299 } 4300 4301 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4302 uint64_t value) 4303 { 4304 CPUState *cs = env_cpu(env); 4305 int mask = vae1_tlbmask(env); 4306 4307 if (tlb_force_broadcast(env)) { 4308 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4309 } else { 4310 tlb_flush_by_mmuidx(cs, mask); 4311 } 4312 } 4313 4314 static int alle1_tlbmask(CPUARMState *env) 4315 { 4316 /* 4317 * Note that the 'ALL' scope must invalidate both stage 1 and 4318 * stage 2 translations, whereas most other scopes only invalidate 4319 * stage 1 translations. 4320 */ 4321 if (arm_is_secure_below_el3(env)) { 4322 return ARMMMUIdxBit_SE10_1 | 4323 ARMMMUIdxBit_SE10_1_PAN | 4324 ARMMMUIdxBit_SE10_0; 4325 } else { 4326 return ARMMMUIdxBit_E10_1 | 4327 ARMMMUIdxBit_E10_1_PAN | 4328 ARMMMUIdxBit_E10_0; 4329 } 4330 } 4331 4332 static int e2_tlbmask(CPUARMState *env) 4333 { 4334 if (arm_is_secure_below_el3(env)) { 4335 return ARMMMUIdxBit_SE20_0 | 4336 ARMMMUIdxBit_SE20_2 | 4337 ARMMMUIdxBit_SE20_2_PAN | 4338 ARMMMUIdxBit_SE2; 4339 } else { 4340 return ARMMMUIdxBit_E20_0 | 4341 ARMMMUIdxBit_E20_2 | 4342 ARMMMUIdxBit_E20_2_PAN | 4343 ARMMMUIdxBit_E2; 4344 } 4345 } 4346 4347 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4348 uint64_t value) 4349 { 4350 CPUState *cs = env_cpu(env); 4351 int mask = alle1_tlbmask(env); 4352 4353 tlb_flush_by_mmuidx(cs, mask); 4354 } 4355 4356 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4357 uint64_t value) 4358 { 4359 CPUState *cs = env_cpu(env); 4360 int mask = e2_tlbmask(env); 4361 4362 tlb_flush_by_mmuidx(cs, mask); 4363 } 4364 4365 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4366 uint64_t value) 4367 { 4368 ARMCPU *cpu = env_archcpu(env); 4369 CPUState *cs = CPU(cpu); 4370 4371 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); 4372 } 4373 4374 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4375 uint64_t value) 4376 { 4377 CPUState *cs = env_cpu(env); 4378 int mask = alle1_tlbmask(env); 4379 4380 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4381 } 4382 4383 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4384 uint64_t value) 4385 { 4386 CPUState *cs = env_cpu(env); 4387 int mask = e2_tlbmask(env); 4388 4389 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4390 } 4391 4392 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4393 uint64_t value) 4394 { 4395 CPUState *cs = env_cpu(env); 4396 4397 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); 4398 } 4399 4400 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4401 uint64_t value) 4402 { 4403 /* Invalidate by VA, EL2 4404 * Currently handles both VAE2 and VALE2, since we don't support 4405 * flush-last-level-only. 4406 */ 4407 CPUState *cs = env_cpu(env); 4408 int mask = e2_tlbmask(env); 4409 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4410 4411 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4412 } 4413 4414 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4415 uint64_t value) 4416 { 4417 /* Invalidate by VA, EL3 4418 * Currently handles both VAE3 and VALE3, since we don't support 4419 * flush-last-level-only. 4420 */ 4421 ARMCPU *cpu = env_archcpu(env); 4422 CPUState *cs = CPU(cpu); 4423 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4424 4425 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); 4426 } 4427 4428 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4429 uint64_t value) 4430 { 4431 CPUState *cs = env_cpu(env); 4432 int mask = vae1_tlbmask(env); 4433 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4434 int bits = vae1_tlbbits(env, pageaddr); 4435 4436 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); 4437 } 4438 4439 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4440 uint64_t value) 4441 { 4442 /* Invalidate by VA, EL1&0 (AArch64 version). 4443 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 4444 * since we don't support flush-for-specific-ASID-only or 4445 * flush-last-level-only. 4446 */ 4447 CPUState *cs = env_cpu(env); 4448 int mask = vae1_tlbmask(env); 4449 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4450 int bits = vae1_tlbbits(env, pageaddr); 4451 4452 if (tlb_force_broadcast(env)) { 4453 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); 4454 } else { 4455 tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); 4456 } 4457 } 4458 4459 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4460 uint64_t value) 4461 { 4462 CPUState *cs = env_cpu(env); 4463 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4464 bool secure = arm_is_secure_below_el3(env); 4465 int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2; 4466 int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2, 4467 pageaddr); 4468 4469 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); 4470 } 4471 4472 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4473 uint64_t value) 4474 { 4475 CPUState *cs = env_cpu(env); 4476 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4477 int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr); 4478 4479 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, 4480 ARMMMUIdxBit_SE3, bits); 4481 } 4482 4483 #ifdef TARGET_AARCH64 4484 typedef struct { 4485 uint64_t base; 4486 uint64_t length; 4487 } TLBIRange; 4488 4489 static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, 4490 uint64_t value) 4491 { 4492 unsigned int page_size_granule, page_shift, num, scale, exponent; 4493 /* Extract one bit to represent the va selector in use. */ 4494 uint64_t select = sextract64(value, 36, 1); 4495 ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true); 4496 TLBIRange ret = { }; 4497 4498 page_size_granule = extract64(value, 46, 2); 4499 4500 /* The granule encoded in value must match the granule in use. */ 4501 if (page_size_granule != (param.using64k ? 3 : param.using16k ? 2 : 1)) { 4502 qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n", 4503 page_size_granule); 4504 return ret; 4505 } 4506 4507 page_shift = (page_size_granule - 1) * 2 + 12; 4508 num = extract64(value, 39, 5); 4509 scale = extract64(value, 44, 2); 4510 exponent = (5 * scale) + 1; 4511 4512 ret.length = (num + 1) << (exponent + page_shift); 4513 4514 if (param.select) { 4515 ret.base = sextract64(value, 0, 37); 4516 } else { 4517 ret.base = extract64(value, 0, 37); 4518 } 4519 if (param.ds) { 4520 /* 4521 * With DS=1, BaseADDR is always shifted 16 so that it is able 4522 * to address all 52 va bits. The input address is perforce 4523 * aligned on a 64k boundary regardless of translation granule. 4524 */ 4525 page_shift = 16; 4526 } 4527 ret.base <<= page_shift; 4528 4529 return ret; 4530 } 4531 4532 static void do_rvae_write(CPUARMState *env, uint64_t value, 4533 int idxmap, bool synced) 4534 { 4535 ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap); 4536 TLBIRange range; 4537 int bits; 4538 4539 range = tlbi_aa64_get_range(env, one_idx, value); 4540 bits = tlbbits_for_regime(env, one_idx, range.base); 4541 4542 if (synced) { 4543 tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env), 4544 range.base, 4545 range.length, 4546 idxmap, 4547 bits); 4548 } else { 4549 tlb_flush_range_by_mmuidx(env_cpu(env), range.base, 4550 range.length, idxmap, bits); 4551 } 4552 } 4553 4554 static void tlbi_aa64_rvae1_write(CPUARMState *env, 4555 const ARMCPRegInfo *ri, 4556 uint64_t value) 4557 { 4558 /* 4559 * Invalidate by VA range, EL1&0. 4560 * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1, 4561 * since we don't support flush-for-specific-ASID-only or 4562 * flush-last-level-only. 4563 */ 4564 4565 do_rvae_write(env, value, vae1_tlbmask(env), 4566 tlb_force_broadcast(env)); 4567 } 4568 4569 static void tlbi_aa64_rvae1is_write(CPUARMState *env, 4570 const ARMCPRegInfo *ri, 4571 uint64_t value) 4572 { 4573 /* 4574 * Invalidate by VA range, Inner/Outer Shareable EL1&0. 4575 * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS, 4576 * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support 4577 * flush-for-specific-ASID-only, flush-last-level-only or inner/outer 4578 * shareable specific flushes. 4579 */ 4580 4581 do_rvae_write(env, value, vae1_tlbmask(env), true); 4582 } 4583 4584 static int vae2_tlbmask(CPUARMState *env) 4585 { 4586 return (arm_is_secure_below_el3(env) 4587 ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2); 4588 } 4589 4590 static void tlbi_aa64_rvae2_write(CPUARMState *env, 4591 const ARMCPRegInfo *ri, 4592 uint64_t value) 4593 { 4594 /* 4595 * Invalidate by VA range, EL2. 4596 * Currently handles all of RVAE2 and RVALE2, 4597 * since we don't support flush-for-specific-ASID-only or 4598 * flush-last-level-only. 4599 */ 4600 4601 do_rvae_write(env, value, vae2_tlbmask(env), 4602 tlb_force_broadcast(env)); 4603 4604 4605 } 4606 4607 static void tlbi_aa64_rvae2is_write(CPUARMState *env, 4608 const ARMCPRegInfo *ri, 4609 uint64_t value) 4610 { 4611 /* 4612 * Invalidate by VA range, Inner/Outer Shareable, EL2. 4613 * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS, 4614 * since we don't support flush-for-specific-ASID-only, 4615 * flush-last-level-only or inner/outer shareable specific flushes. 4616 */ 4617 4618 do_rvae_write(env, value, vae2_tlbmask(env), true); 4619 4620 } 4621 4622 static void tlbi_aa64_rvae3_write(CPUARMState *env, 4623 const ARMCPRegInfo *ri, 4624 uint64_t value) 4625 { 4626 /* 4627 * Invalidate by VA range, EL3. 4628 * Currently handles all of RVAE3 and RVALE3, 4629 * since we don't support flush-for-specific-ASID-only or 4630 * flush-last-level-only. 4631 */ 4632 4633 do_rvae_write(env, value, ARMMMUIdxBit_SE3, 4634 tlb_force_broadcast(env)); 4635 } 4636 4637 static void tlbi_aa64_rvae3is_write(CPUARMState *env, 4638 const ARMCPRegInfo *ri, 4639 uint64_t value) 4640 { 4641 /* 4642 * Invalidate by VA range, EL3, Inner/Outer Shareable. 4643 * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS, 4644 * since we don't support flush-for-specific-ASID-only, 4645 * flush-last-level-only or inner/outer specific flushes. 4646 */ 4647 4648 do_rvae_write(env, value, ARMMMUIdxBit_SE3, true); 4649 } 4650 #endif 4651 4652 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4653 bool isread) 4654 { 4655 int cur_el = arm_current_el(env); 4656 4657 if (cur_el < 2) { 4658 uint64_t hcr = arm_hcr_el2_eff(env); 4659 4660 if (cur_el == 0) { 4661 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4662 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { 4663 return CP_ACCESS_TRAP_EL2; 4664 } 4665 } else { 4666 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4667 return CP_ACCESS_TRAP; 4668 } 4669 if (hcr & HCR_TDZ) { 4670 return CP_ACCESS_TRAP_EL2; 4671 } 4672 } 4673 } else if (hcr & HCR_TDZ) { 4674 return CP_ACCESS_TRAP_EL2; 4675 } 4676 } 4677 return CP_ACCESS_OK; 4678 } 4679 4680 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4681 { 4682 ARMCPU *cpu = env_archcpu(env); 4683 int dzp_bit = 1 << 4; 4684 4685 /* DZP indicates whether DC ZVA access is allowed */ 4686 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4687 dzp_bit = 0; 4688 } 4689 return cpu->dcz_blocksize | dzp_bit; 4690 } 4691 4692 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4693 bool isread) 4694 { 4695 if (!(env->pstate & PSTATE_SP)) { 4696 /* Access to SP_EL0 is undefined if it's being used as 4697 * the stack pointer. 4698 */ 4699 return CP_ACCESS_TRAP_UNCATEGORIZED; 4700 } 4701 return CP_ACCESS_OK; 4702 } 4703 4704 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4705 { 4706 return env->pstate & PSTATE_SP; 4707 } 4708 4709 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 4710 { 4711 update_spsel(env, val); 4712 } 4713 4714 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4715 uint64_t value) 4716 { 4717 ARMCPU *cpu = env_archcpu(env); 4718 4719 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 4720 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 4721 value &= ~SCTLR_M; 4722 } 4723 4724 /* ??? Lots of these bits are not implemented. */ 4725 4726 if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) { 4727 if (ri->opc1 == 6) { /* SCTLR_EL3 */ 4728 value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA); 4729 } else { 4730 value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF | 4731 SCTLR_ATA0 | SCTLR_ATA); 4732 } 4733 } 4734 4735 if (raw_read(env, ri) == value) { 4736 /* Skip the TLB flush if nothing actually changed; Linux likes 4737 * to do a lot of pointless SCTLR writes. 4738 */ 4739 return; 4740 } 4741 4742 raw_write(env, ri, value); 4743 4744 /* This may enable/disable the MMU, so do a TLB flush. */ 4745 tlb_flush(CPU(cpu)); 4746 4747 if (ri->type & ARM_CP_SUPPRESS_TB_END) { 4748 /* 4749 * Normally we would always end the TB on an SCTLR write; see the 4750 * comment in ARMCPRegInfo sctlr initialization below for why Xscale 4751 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild 4752 * of hflags from the translator, so do it here. 4753 */ 4754 arm_rebuild_hflags(env); 4755 } 4756 } 4757 4758 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4759 uint64_t value) 4760 { 4761 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 4762 } 4763 4764 static const ARMCPRegInfo v8_cp_reginfo[] = { 4765 /* Minimal set of EL0-visible registers. This will need to be expanded 4766 * significantly for system emulation of AArch64 CPUs. 4767 */ 4768 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 4769 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 4770 .access = PL0_RW, .type = ARM_CP_NZCV }, 4771 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 4772 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 4773 .type = ARM_CP_NO_RAW, 4774 .access = PL0_RW, .accessfn = aa64_daif_access, 4775 .fieldoffset = offsetof(CPUARMState, daif), 4776 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 4777 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 4778 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 4779 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4780 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 4781 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 4782 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 4783 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4784 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 4785 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 4786 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 4787 .access = PL0_R, .type = ARM_CP_NO_RAW, 4788 .readfn = aa64_dczid_read }, 4789 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 4790 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 4791 .access = PL0_W, .type = ARM_CP_DC_ZVA, 4792 #ifndef CONFIG_USER_ONLY 4793 /* Avoid overhead of an access check that always passes in user-mode */ 4794 .accessfn = aa64_zva_access, 4795 #endif 4796 }, 4797 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 4798 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 4799 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 4800 /* Cache ops: all NOPs since we don't emulate caches */ 4801 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 4802 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4803 .access = PL1_W, .type = ARM_CP_NOP, 4804 .accessfn = aa64_cacheop_pou_access }, 4805 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 4806 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4807 .access = PL1_W, .type = ARM_CP_NOP, 4808 .accessfn = aa64_cacheop_pou_access }, 4809 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 4810 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 4811 .access = PL0_W, .type = ARM_CP_NOP, 4812 .accessfn = aa64_cacheop_pou_access }, 4813 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 4814 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4815 .access = PL1_W, .accessfn = aa64_cacheop_poc_access, 4816 .type = ARM_CP_NOP }, 4817 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 4818 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4819 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4820 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 4821 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 4822 .access = PL0_W, .type = ARM_CP_NOP, 4823 .accessfn = aa64_cacheop_poc_access }, 4824 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 4825 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4826 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4827 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 4828 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 4829 .access = PL0_W, .type = ARM_CP_NOP, 4830 .accessfn = aa64_cacheop_pou_access }, 4831 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 4832 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 4833 .access = PL0_W, .type = ARM_CP_NOP, 4834 .accessfn = aa64_cacheop_poc_access }, 4835 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 4836 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4837 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4838 /* TLBI operations */ 4839 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 4840 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 4841 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4842 .writefn = tlbi_aa64_vmalle1is_write }, 4843 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 4844 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 4845 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4846 .writefn = tlbi_aa64_vae1is_write }, 4847 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 4848 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 4849 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4850 .writefn = tlbi_aa64_vmalle1is_write }, 4851 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 4852 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 4853 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4854 .writefn = tlbi_aa64_vae1is_write }, 4855 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 4856 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4857 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4858 .writefn = tlbi_aa64_vae1is_write }, 4859 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 4860 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4861 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4862 .writefn = tlbi_aa64_vae1is_write }, 4863 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 4864 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 4865 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4866 .writefn = tlbi_aa64_vmalle1_write }, 4867 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 4868 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 4869 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4870 .writefn = tlbi_aa64_vae1_write }, 4871 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 4872 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 4873 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4874 .writefn = tlbi_aa64_vmalle1_write }, 4875 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 4876 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 4877 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4878 .writefn = tlbi_aa64_vae1_write }, 4879 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 4880 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4881 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4882 .writefn = tlbi_aa64_vae1_write }, 4883 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 4884 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4885 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4886 .writefn = tlbi_aa64_vae1_write }, 4887 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 4888 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4889 .access = PL2_W, .type = ARM_CP_NOP }, 4890 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 4891 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4892 .access = PL2_W, .type = ARM_CP_NOP }, 4893 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 4894 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4895 .access = PL2_W, .type = ARM_CP_NO_RAW, 4896 .writefn = tlbi_aa64_alle1is_write }, 4897 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 4898 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 4899 .access = PL2_W, .type = ARM_CP_NO_RAW, 4900 .writefn = tlbi_aa64_alle1is_write }, 4901 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 4902 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4903 .access = PL2_W, .type = ARM_CP_NOP }, 4904 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 4905 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4906 .access = PL2_W, .type = ARM_CP_NOP }, 4907 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 4908 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4909 .access = PL2_W, .type = ARM_CP_NO_RAW, 4910 .writefn = tlbi_aa64_alle1_write }, 4911 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 4912 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 4913 .access = PL2_W, .type = ARM_CP_NO_RAW, 4914 .writefn = tlbi_aa64_alle1is_write }, 4915 #ifndef CONFIG_USER_ONLY 4916 /* 64 bit address translation operations */ 4917 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 4918 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 4919 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4920 .writefn = ats_write64 }, 4921 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 4922 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 4923 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4924 .writefn = ats_write64 }, 4925 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 4926 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 4927 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4928 .writefn = ats_write64 }, 4929 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 4930 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 4931 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4932 .writefn = ats_write64 }, 4933 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 4934 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 4935 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4936 .writefn = ats_write64 }, 4937 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 4938 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 4939 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4940 .writefn = ats_write64 }, 4941 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 4942 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 4943 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4944 .writefn = ats_write64 }, 4945 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 4946 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 4947 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4948 .writefn = ats_write64 }, 4949 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 4950 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 4951 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 4952 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4953 .writefn = ats_write64 }, 4954 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 4955 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 4956 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4957 .writefn = ats_write64 }, 4958 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 4959 .type = ARM_CP_ALIAS, 4960 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 4961 .access = PL1_RW, .resetvalue = 0, 4962 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 4963 .writefn = par_write }, 4964 #endif 4965 /* TLB invalidate last level of translation table walk */ 4966 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4967 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4968 .writefn = tlbimva_is_write }, 4969 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4970 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4971 .writefn = tlbimvaa_is_write }, 4972 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4973 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4974 .writefn = tlbimva_write }, 4975 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4976 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4977 .writefn = tlbimvaa_write }, 4978 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4979 .type = ARM_CP_NO_RAW, .access = PL2_W, 4980 .writefn = tlbimva_hyp_write }, 4981 { .name = "TLBIMVALHIS", 4982 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4983 .type = ARM_CP_NO_RAW, .access = PL2_W, 4984 .writefn = tlbimva_hyp_is_write }, 4985 { .name = "TLBIIPAS2", 4986 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4987 .type = ARM_CP_NOP, .access = PL2_W }, 4988 { .name = "TLBIIPAS2IS", 4989 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4990 .type = ARM_CP_NOP, .access = PL2_W }, 4991 { .name = "TLBIIPAS2L", 4992 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4993 .type = ARM_CP_NOP, .access = PL2_W }, 4994 { .name = "TLBIIPAS2LIS", 4995 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4996 .type = ARM_CP_NOP, .access = PL2_W }, 4997 /* 32 bit cache operations */ 4998 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4999 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5000 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 5001 .type = ARM_CP_NOP, .access = PL1_W }, 5002 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 5003 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5004 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 5005 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5006 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 5007 .type = ARM_CP_NOP, .access = PL1_W }, 5008 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 5009 .type = ARM_CP_NOP, .access = PL1_W }, 5010 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 5011 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5012 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 5013 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5014 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 5015 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5016 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 5017 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5018 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 5019 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 5020 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 5021 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 5022 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 5023 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 5024 /* MMU Domain access control / MPU write buffer control */ 5025 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 5026 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 5027 .writefn = dacr_write, .raw_writefn = raw_write, 5028 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 5029 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 5030 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 5031 .type = ARM_CP_ALIAS, 5032 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 5033 .access = PL1_RW, 5034 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 5035 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 5036 .type = ARM_CP_ALIAS, 5037 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 5038 .access = PL1_RW, 5039 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 5040 /* We rely on the access checks not allowing the guest to write to the 5041 * state field when SPSel indicates that it's being used as the stack 5042 * pointer. 5043 */ 5044 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 5045 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 5046 .access = PL1_RW, .accessfn = sp_el0_access, 5047 .type = ARM_CP_ALIAS, 5048 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 5049 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 5050 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 5051 .access = PL2_RW, .type = ARM_CP_ALIAS, 5052 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 5053 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 5054 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 5055 .type = ARM_CP_NO_RAW, 5056 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 5057 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 5058 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 5059 .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_FPU, 5060 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, 5061 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 5062 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 5063 .access = PL2_RW, .resetvalue = 0, 5064 .writefn = dacr_write, .raw_writefn = raw_write, 5065 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 5066 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 5067 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 5068 .access = PL2_RW, .resetvalue = 0, 5069 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 5070 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 5071 .type = ARM_CP_ALIAS, 5072 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 5073 .access = PL2_RW, 5074 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 5075 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 5076 .type = ARM_CP_ALIAS, 5077 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 5078 .access = PL2_RW, 5079 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 5080 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 5081 .type = ARM_CP_ALIAS, 5082 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 5083 .access = PL2_RW, 5084 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 5085 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 5086 .type = ARM_CP_ALIAS, 5087 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 5088 .access = PL2_RW, 5089 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 5090 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 5091 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 5092 .resetvalue = 0, 5093 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 5094 { .name = "SDCR", .type = ARM_CP_ALIAS, 5095 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 5096 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5097 .writefn = sdcr_write, 5098 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 5099 }; 5100 5101 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 5102 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 5103 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5104 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5105 .access = PL2_RW, 5106 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 5107 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 5108 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5109 .access = PL2_RW, 5110 .type = ARM_CP_CONST, .resetvalue = 0 }, 5111 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5112 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5113 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5114 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5115 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5116 .access = PL2_RW, 5117 .type = ARM_CP_CONST, .resetvalue = 0 }, 5118 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5119 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5120 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5121 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5122 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5123 .access = PL2_RW, .type = ARM_CP_CONST, 5124 .resetvalue = 0 }, 5125 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5126 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5127 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5128 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5129 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5130 .access = PL2_RW, .type = ARM_CP_CONST, 5131 .resetvalue = 0 }, 5132 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5133 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5134 .access = PL2_RW, .type = ARM_CP_CONST, 5135 .resetvalue = 0 }, 5136 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5137 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5138 .access = PL2_RW, .type = ARM_CP_CONST, 5139 .resetvalue = 0 }, 5140 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5141 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5142 .access = PL2_RW, .type = ARM_CP_CONST, 5143 .resetvalue = 0 }, 5144 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5145 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5146 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5147 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 5148 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5149 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5150 .type = ARM_CP_CONST, .resetvalue = 0 }, 5151 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5152 .cp = 15, .opc1 = 6, .crm = 2, 5153 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5154 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 5155 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5156 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5157 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5158 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5159 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5160 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5161 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5162 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5163 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5164 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5165 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5166 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5167 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5168 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5169 .resetvalue = 0 }, 5170 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5171 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5172 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5173 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5174 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5175 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5176 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5177 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5178 .resetvalue = 0 }, 5179 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5180 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5181 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5182 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5183 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5184 .resetvalue = 0 }, 5185 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5186 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5187 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5188 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5189 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5190 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5191 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5192 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5193 .access = PL2_RW, .accessfn = access_tda, 5194 .type = ARM_CP_CONST, .resetvalue = 0 }, 5195 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 5196 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5197 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5198 .type = ARM_CP_CONST, .resetvalue = 0 }, 5199 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5200 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5201 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5202 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5203 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5204 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5205 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5206 .type = ARM_CP_CONST, 5207 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5208 .access = PL2_RW, .resetvalue = 0 }, 5209 }; 5210 5211 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 5212 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 5213 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5214 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5215 .access = PL2_RW, 5216 .type = ARM_CP_CONST, .resetvalue = 0 }, 5217 }; 5218 5219 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) 5220 { 5221 ARMCPU *cpu = env_archcpu(env); 5222 5223 if (arm_feature(env, ARM_FEATURE_V8)) { 5224 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */ 5225 } else { 5226 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */ 5227 } 5228 5229 if (arm_feature(env, ARM_FEATURE_EL3)) { 5230 valid_mask &= ~HCR_HCD; 5231 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 5232 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 5233 * However, if we're using the SMC PSCI conduit then QEMU is 5234 * effectively acting like EL3 firmware and so the guest at 5235 * EL2 should retain the ability to prevent EL1 from being 5236 * able to make SMC calls into the ersatz firmware, so in 5237 * that case HCR.TSC should be read/write. 5238 */ 5239 valid_mask &= ~HCR_TSC; 5240 } 5241 5242 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5243 if (cpu_isar_feature(aa64_vh, cpu)) { 5244 valid_mask |= HCR_E2H; 5245 } 5246 if (cpu_isar_feature(aa64_lor, cpu)) { 5247 valid_mask |= HCR_TLOR; 5248 } 5249 if (cpu_isar_feature(aa64_pauth, cpu)) { 5250 valid_mask |= HCR_API | HCR_APK; 5251 } 5252 if (cpu_isar_feature(aa64_mte, cpu)) { 5253 valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5; 5254 } 5255 } 5256 5257 /* Clear RES0 bits. */ 5258 value &= valid_mask; 5259 5260 /* 5261 * These bits change the MMU setup: 5262 * HCR_VM enables stage 2 translation 5263 * HCR_PTW forbids certain page-table setups 5264 * HCR_DC disables stage1 and enables stage2 translation 5265 * HCR_DCT enables tagging on (disabled) stage1 translation 5266 */ 5267 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) { 5268 tlb_flush(CPU(cpu)); 5269 } 5270 env->cp15.hcr_el2 = value; 5271 5272 /* 5273 * Updates to VI and VF require us to update the status of 5274 * virtual interrupts, which are the logical OR of these bits 5275 * and the state of the input lines from the GIC. (This requires 5276 * that we have the iothread lock, which is done by marking the 5277 * reginfo structs as ARM_CP_IO.) 5278 * Note that if a write to HCR pends a VIRQ or VFIQ it is never 5279 * possible for it to be taken immediately, because VIRQ and 5280 * VFIQ are masked unless running at EL0 or EL1, and HCR 5281 * can only be written at EL2. 5282 */ 5283 g_assert(qemu_mutex_iothread_locked()); 5284 arm_cpu_update_virq(cpu); 5285 arm_cpu_update_vfiq(cpu); 5286 } 5287 5288 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 5289 { 5290 do_hcr_write(env, value, 0); 5291 } 5292 5293 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 5294 uint64_t value) 5295 { 5296 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 5297 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 5298 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32)); 5299 } 5300 5301 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 5302 uint64_t value) 5303 { 5304 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 5305 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 5306 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); 5307 } 5308 5309 /* 5310 * Return the effective value of HCR_EL2. 5311 * Bits that are not included here: 5312 * RW (read from SCR_EL3.RW as needed) 5313 */ 5314 uint64_t arm_hcr_el2_eff(CPUARMState *env) 5315 { 5316 uint64_t ret = env->cp15.hcr_el2; 5317 5318 if (!arm_is_el2_enabled(env)) { 5319 /* 5320 * "This register has no effect if EL2 is not enabled in the 5321 * current Security state". This is ARMv8.4-SecEL2 speak for 5322 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 5323 * 5324 * Prior to that, the language was "In an implementation that 5325 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 5326 * as if this field is 0 for all purposes other than a direct 5327 * read or write access of HCR_EL2". With lots of enumeration 5328 * on a per-field basis. In current QEMU, this is condition 5329 * is arm_is_secure_below_el3. 5330 * 5331 * Since the v8.4 language applies to the entire register, and 5332 * appears to be backward compatible, use that. 5333 */ 5334 return 0; 5335 } 5336 5337 /* 5338 * For a cpu that supports both aarch64 and aarch32, we can set bits 5339 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32. 5340 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32. 5341 */ 5342 if (!arm_el_is_aa64(env, 2)) { 5343 uint64_t aa32_valid; 5344 5345 /* 5346 * These bits are up-to-date as of ARMv8.6. 5347 * For HCR, it's easiest to list just the 2 bits that are invalid. 5348 * For HCR2, list those that are valid. 5349 */ 5350 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ); 5351 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE | 5352 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS); 5353 ret &= aa32_valid; 5354 } 5355 5356 if (ret & HCR_TGE) { 5357 /* These bits are up-to-date as of ARMv8.6. */ 5358 if (ret & HCR_E2H) { 5359 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 5360 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 5361 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 5362 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE | 5363 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT | 5364 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5); 5365 } else { 5366 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 5367 } 5368 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 5369 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 5370 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 5371 HCR_TLOR); 5372 } 5373 5374 return ret; 5375 } 5376 5377 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 5378 uint64_t value) 5379 { 5380 /* 5381 * For A-profile AArch32 EL3, if NSACR.CP10 5382 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5383 */ 5384 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5385 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5386 value &= ~(0x3 << 10); 5387 value |= env->cp15.cptr_el[2] & (0x3 << 10); 5388 } 5389 env->cp15.cptr_el[2] = value; 5390 } 5391 5392 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) 5393 { 5394 /* 5395 * For A-profile AArch32 EL3, if NSACR.CP10 5396 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5397 */ 5398 uint64_t value = env->cp15.cptr_el[2]; 5399 5400 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5401 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5402 value |= 0x3 << 10; 5403 } 5404 return value; 5405 } 5406 5407 static const ARMCPRegInfo el2_cp_reginfo[] = { 5408 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 5409 .type = ARM_CP_IO, 5410 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5411 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5412 .writefn = hcr_write }, 5413 { .name = "HCR", .state = ARM_CP_STATE_AA32, 5414 .type = ARM_CP_ALIAS | ARM_CP_IO, 5415 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5416 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5417 .writefn = hcr_writelow }, 5418 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5419 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5420 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5421 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 5422 .type = ARM_CP_ALIAS, 5423 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 5424 .access = PL2_RW, 5425 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 5426 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5427 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5428 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 5429 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5430 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5431 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 5432 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5433 .type = ARM_CP_ALIAS, 5434 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5435 .access = PL2_RW, 5436 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 5437 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 5438 .type = ARM_CP_ALIAS, 5439 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 5440 .access = PL2_RW, 5441 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 5442 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5443 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5444 .access = PL2_RW, .writefn = vbar_write, 5445 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 5446 .resetvalue = 0 }, 5447 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 5448 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 5449 .access = PL3_RW, .type = ARM_CP_ALIAS, 5450 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 5451 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5452 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5453 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 5454 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), 5455 .readfn = cptr_el2_read, .writefn = cptr_el2_write }, 5456 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5457 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5458 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 5459 .resetvalue = 0 }, 5460 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5461 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5462 .access = PL2_RW, .type = ARM_CP_ALIAS, 5463 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 5464 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5465 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5466 .access = PL2_RW, .type = ARM_CP_CONST, 5467 .resetvalue = 0 }, 5468 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 5469 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5470 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5471 .access = PL2_RW, .type = ARM_CP_CONST, 5472 .resetvalue = 0 }, 5473 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5474 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5475 .access = PL2_RW, .type = ARM_CP_CONST, 5476 .resetvalue = 0 }, 5477 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5478 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5479 .access = PL2_RW, .type = ARM_CP_CONST, 5480 .resetvalue = 0 }, 5481 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5482 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5483 .access = PL2_RW, .writefn = vmsa_tcr_el12_write, 5484 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ 5485 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 5486 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 5487 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5488 .type = ARM_CP_ALIAS, 5489 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5490 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5491 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 5492 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5493 .access = PL2_RW, 5494 /* no .writefn needed as this can't cause an ASID change; 5495 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 5496 */ 5497 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5498 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5499 .cp = 15, .opc1 = 6, .crm = 2, 5500 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5501 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5502 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 5503 .writefn = vttbr_write }, 5504 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5505 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5506 .access = PL2_RW, .writefn = vttbr_write, 5507 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 5508 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5509 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5510 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 5511 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 5512 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5513 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5514 .access = PL2_RW, .resetvalue = 0, 5515 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 5516 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5517 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5518 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, 5519 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5520 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5521 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5522 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5523 { .name = "TLBIALLNSNH", 5524 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 5525 .type = ARM_CP_NO_RAW, .access = PL2_W, 5526 .writefn = tlbiall_nsnh_write }, 5527 { .name = "TLBIALLNSNHIS", 5528 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 5529 .type = ARM_CP_NO_RAW, .access = PL2_W, 5530 .writefn = tlbiall_nsnh_is_write }, 5531 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5532 .type = ARM_CP_NO_RAW, .access = PL2_W, 5533 .writefn = tlbiall_hyp_write }, 5534 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5535 .type = ARM_CP_NO_RAW, .access = PL2_W, 5536 .writefn = tlbiall_hyp_is_write }, 5537 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5538 .type = ARM_CP_NO_RAW, .access = PL2_W, 5539 .writefn = tlbimva_hyp_write }, 5540 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5541 .type = ARM_CP_NO_RAW, .access = PL2_W, 5542 .writefn = tlbimva_hyp_is_write }, 5543 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 5544 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5545 .type = ARM_CP_NO_RAW, .access = PL2_W, 5546 .writefn = tlbi_aa64_alle2_write }, 5547 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 5548 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5549 .type = ARM_CP_NO_RAW, .access = PL2_W, 5550 .writefn = tlbi_aa64_vae2_write }, 5551 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 5552 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 5553 .access = PL2_W, .type = ARM_CP_NO_RAW, 5554 .writefn = tlbi_aa64_vae2_write }, 5555 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 5556 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5557 .access = PL2_W, .type = ARM_CP_NO_RAW, 5558 .writefn = tlbi_aa64_alle2is_write }, 5559 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 5560 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5561 .type = ARM_CP_NO_RAW, .access = PL2_W, 5562 .writefn = tlbi_aa64_vae2is_write }, 5563 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 5564 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 5565 .access = PL2_W, .type = ARM_CP_NO_RAW, 5566 .writefn = tlbi_aa64_vae2is_write }, 5567 #ifndef CONFIG_USER_ONLY 5568 /* Unlike the other EL2-related AT operations, these must 5569 * UNDEF from EL3 if EL2 is not implemented, which is why we 5570 * define them here rather than with the rest of the AT ops. 5571 */ 5572 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 5573 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5574 .access = PL2_W, .accessfn = at_s1e2_access, 5575 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5576 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 5577 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5578 .access = PL2_W, .accessfn = at_s1e2_access, 5579 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5580 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 5581 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 5582 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 5583 * to behave as if SCR.NS was 1. 5584 */ 5585 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5586 .access = PL2_W, 5587 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5588 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5589 .access = PL2_W, 5590 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5591 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5592 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5593 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 5594 * reset values as IMPDEF. We choose to reset to 3 to comply with 5595 * both ARMv7 and ARMv8. 5596 */ 5597 .access = PL2_RW, .resetvalue = 3, 5598 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 5599 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5600 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5601 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 5602 .writefn = gt_cntvoff_write, 5603 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5604 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5605 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 5606 .writefn = gt_cntvoff_write, 5607 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5608 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5609 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5610 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5611 .type = ARM_CP_IO, .access = PL2_RW, 5612 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5613 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5614 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5615 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 5616 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5617 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5618 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5619 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 5620 .resetfn = gt_hyp_timer_reset, 5621 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 5622 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5623 .type = ARM_CP_IO, 5624 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5625 .access = PL2_RW, 5626 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 5627 .resetvalue = 0, 5628 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 5629 #endif 5630 /* The only field of MDCR_EL2 that has a defined architectural reset value 5631 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N. 5632 */ 5633 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5634 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5635 .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS, 5636 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 5637 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 5638 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5639 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5640 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5641 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 5642 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5643 .access = PL2_RW, 5644 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5645 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5646 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5647 .access = PL2_RW, 5648 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 5649 }; 5650 5651 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 5652 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5653 .type = ARM_CP_ALIAS | ARM_CP_IO, 5654 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5655 .access = PL2_RW, 5656 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 5657 .writefn = hcr_writehigh }, 5658 }; 5659 5660 static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri, 5661 bool isread) 5662 { 5663 if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) { 5664 return CP_ACCESS_OK; 5665 } 5666 return CP_ACCESS_TRAP_UNCATEGORIZED; 5667 } 5668 5669 static const ARMCPRegInfo el2_sec_cp_reginfo[] = { 5670 { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64, 5671 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0, 5672 .access = PL2_RW, .accessfn = sel2_access, 5673 .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) }, 5674 { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64, 5675 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2, 5676 .access = PL2_RW, .accessfn = sel2_access, 5677 .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, 5678 }; 5679 5680 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 5681 bool isread) 5682 { 5683 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 5684 * At Secure EL1 it traps to EL3 or EL2. 5685 */ 5686 if (arm_current_el(env) == 3) { 5687 return CP_ACCESS_OK; 5688 } 5689 if (arm_is_secure_below_el3(env)) { 5690 if (env->cp15.scr_el3 & SCR_EEL2) { 5691 return CP_ACCESS_TRAP_EL2; 5692 } 5693 return CP_ACCESS_TRAP_EL3; 5694 } 5695 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 5696 if (isread) { 5697 return CP_ACCESS_OK; 5698 } 5699 return CP_ACCESS_TRAP_UNCATEGORIZED; 5700 } 5701 5702 static const ARMCPRegInfo el3_cp_reginfo[] = { 5703 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 5704 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 5705 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 5706 .resetfn = scr_reset, .writefn = scr_write }, 5707 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, 5708 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 5709 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5710 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 5711 .writefn = scr_write }, 5712 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 5713 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 5714 .access = PL3_RW, .resetvalue = 0, 5715 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 5716 { .name = "SDER", 5717 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 5718 .access = PL3_RW, .resetvalue = 0, 5719 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 5720 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5721 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5722 .writefn = vbar_write, .resetvalue = 0, 5723 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 5724 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 5725 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 5726 .access = PL3_RW, .resetvalue = 0, 5727 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 5728 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 5729 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 5730 .access = PL3_RW, 5731 /* no .writefn needed as this can't cause an ASID change; 5732 * we must provide a .raw_writefn and .resetfn because we handle 5733 * reset and migration for the AArch32 TTBCR(S), which might be 5734 * using mask and base_mask. 5735 */ 5736 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 5737 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 5738 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 5739 .type = ARM_CP_ALIAS, 5740 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 5741 .access = PL3_RW, 5742 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 5743 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 5744 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 5745 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 5746 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 5747 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 5748 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 5749 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 5750 .type = ARM_CP_ALIAS, 5751 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 5752 .access = PL3_RW, 5753 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 5754 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 5755 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 5756 .access = PL3_RW, .writefn = vbar_write, 5757 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 5758 .resetvalue = 0 }, 5759 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 5760 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 5761 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 5762 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 5763 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 5764 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 5765 .access = PL3_RW, .resetvalue = 0, 5766 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 5767 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 5768 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 5769 .access = PL3_RW, .type = ARM_CP_CONST, 5770 .resetvalue = 0 }, 5771 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 5772 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 5773 .access = PL3_RW, .type = ARM_CP_CONST, 5774 .resetvalue = 0 }, 5775 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 5776 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 5777 .access = PL3_RW, .type = ARM_CP_CONST, 5778 .resetvalue = 0 }, 5779 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 5780 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 5781 .access = PL3_W, .type = ARM_CP_NO_RAW, 5782 .writefn = tlbi_aa64_alle3is_write }, 5783 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 5784 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 5785 .access = PL3_W, .type = ARM_CP_NO_RAW, 5786 .writefn = tlbi_aa64_vae3is_write }, 5787 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 5788 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 5789 .access = PL3_W, .type = ARM_CP_NO_RAW, 5790 .writefn = tlbi_aa64_vae3is_write }, 5791 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 5792 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 5793 .access = PL3_W, .type = ARM_CP_NO_RAW, 5794 .writefn = tlbi_aa64_alle3_write }, 5795 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 5796 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 5797 .access = PL3_W, .type = ARM_CP_NO_RAW, 5798 .writefn = tlbi_aa64_vae3_write }, 5799 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 5800 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 5801 .access = PL3_W, .type = ARM_CP_NO_RAW, 5802 .writefn = tlbi_aa64_vae3_write }, 5803 }; 5804 5805 #ifndef CONFIG_USER_ONLY 5806 /* Test if system register redirection is to occur in the current state. */ 5807 static bool redirect_for_e2h(CPUARMState *env) 5808 { 5809 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); 5810 } 5811 5812 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) 5813 { 5814 CPReadFn *readfn; 5815 5816 if (redirect_for_e2h(env)) { 5817 /* Switch to the saved EL2 version of the register. */ 5818 ri = ri->opaque; 5819 readfn = ri->readfn; 5820 } else { 5821 readfn = ri->orig_readfn; 5822 } 5823 if (readfn == NULL) { 5824 readfn = raw_read; 5825 } 5826 return readfn(env, ri); 5827 } 5828 5829 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, 5830 uint64_t value) 5831 { 5832 CPWriteFn *writefn; 5833 5834 if (redirect_for_e2h(env)) { 5835 /* Switch to the saved EL2 version of the register. */ 5836 ri = ri->opaque; 5837 writefn = ri->writefn; 5838 } else { 5839 writefn = ri->orig_writefn; 5840 } 5841 if (writefn == NULL) { 5842 writefn = raw_write; 5843 } 5844 writefn(env, ri, value); 5845 } 5846 5847 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) 5848 { 5849 struct E2HAlias { 5850 uint32_t src_key, dst_key, new_key; 5851 const char *src_name, *dst_name, *new_name; 5852 bool (*feature)(const ARMISARegisters *id); 5853 }; 5854 5855 #define K(op0, op1, crn, crm, op2) \ 5856 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) 5857 5858 static const struct E2HAlias aliases[] = { 5859 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), 5860 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, 5861 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), 5862 "CPACR", "CPTR_EL2", "CPACR_EL12" }, 5863 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), 5864 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, 5865 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), 5866 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, 5867 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), 5868 "TCR_EL1", "TCR_EL2", "TCR_EL12" }, 5869 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), 5870 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, 5871 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), 5872 "ELR_EL1", "ELR_EL2", "ELR_EL12" }, 5873 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), 5874 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, 5875 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), 5876 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, 5877 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), 5878 "ESR_EL1", "ESR_EL2", "ESR_EL12" }, 5879 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), 5880 "FAR_EL1", "FAR_EL2", "FAR_EL12" }, 5881 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), 5882 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, 5883 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), 5884 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, 5885 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), 5886 "VBAR", "VBAR_EL2", "VBAR_EL12" }, 5887 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), 5888 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, 5889 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), 5890 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, 5891 5892 /* 5893 * Note that redirection of ZCR is mentioned in the description 5894 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but 5895 * not in the summary table. 5896 */ 5897 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), 5898 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, 5899 5900 { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0), 5901 "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte }, 5902 5903 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ 5904 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ 5905 }; 5906 #undef K 5907 5908 size_t i; 5909 5910 for (i = 0; i < ARRAY_SIZE(aliases); i++) { 5911 const struct E2HAlias *a = &aliases[i]; 5912 ARMCPRegInfo *src_reg, *dst_reg, *new_reg; 5913 bool ok; 5914 5915 if (a->feature && !a->feature(&cpu->isar)) { 5916 continue; 5917 } 5918 5919 src_reg = g_hash_table_lookup(cpu->cp_regs, 5920 (gpointer)(uintptr_t)a->src_key); 5921 dst_reg = g_hash_table_lookup(cpu->cp_regs, 5922 (gpointer)(uintptr_t)a->dst_key); 5923 g_assert(src_reg != NULL); 5924 g_assert(dst_reg != NULL); 5925 5926 /* Cross-compare names to detect typos in the keys. */ 5927 g_assert(strcmp(src_reg->name, a->src_name) == 0); 5928 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); 5929 5930 /* None of the core system registers use opaque; we will. */ 5931 g_assert(src_reg->opaque == NULL); 5932 5933 /* Create alias before redirection so we dup the right data. */ 5934 new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); 5935 5936 new_reg->name = a->new_name; 5937 new_reg->type |= ARM_CP_ALIAS; 5938 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ 5939 new_reg->access &= PL2_RW | PL3_RW; 5940 5941 ok = g_hash_table_insert(cpu->cp_regs, 5942 (gpointer)(uintptr_t)a->new_key, new_reg); 5943 g_assert(ok); 5944 5945 src_reg->opaque = dst_reg; 5946 src_reg->orig_readfn = src_reg->readfn ?: raw_read; 5947 src_reg->orig_writefn = src_reg->writefn ?: raw_write; 5948 if (!src_reg->raw_readfn) { 5949 src_reg->raw_readfn = raw_read; 5950 } 5951 if (!src_reg->raw_writefn) { 5952 src_reg->raw_writefn = raw_write; 5953 } 5954 src_reg->readfn = el2_e2h_read; 5955 src_reg->writefn = el2_e2h_write; 5956 } 5957 } 5958 #endif 5959 5960 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 5961 bool isread) 5962 { 5963 int cur_el = arm_current_el(env); 5964 5965 if (cur_el < 2) { 5966 uint64_t hcr = arm_hcr_el2_eff(env); 5967 5968 if (cur_el == 0) { 5969 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 5970 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { 5971 return CP_ACCESS_TRAP_EL2; 5972 } 5973 } else { 5974 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 5975 return CP_ACCESS_TRAP; 5976 } 5977 if (hcr & HCR_TID2) { 5978 return CP_ACCESS_TRAP_EL2; 5979 } 5980 } 5981 } else if (hcr & HCR_TID2) { 5982 return CP_ACCESS_TRAP_EL2; 5983 } 5984 } 5985 5986 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { 5987 return CP_ACCESS_TRAP_EL2; 5988 } 5989 5990 return CP_ACCESS_OK; 5991 } 5992 5993 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 5994 uint64_t value) 5995 { 5996 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 5997 * read via a bit in OSLSR_EL1. 5998 */ 5999 int oslock; 6000 6001 if (ri->state == ARM_CP_STATE_AA32) { 6002 oslock = (value == 0xC5ACCE55); 6003 } else { 6004 oslock = value & 1; 6005 } 6006 6007 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 6008 } 6009 6010 static const ARMCPRegInfo debug_cp_reginfo[] = { 6011 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 6012 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 6013 * unlike DBGDRAR it is never accessible from EL0. 6014 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 6015 * accessor. 6016 */ 6017 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 6018 .access = PL0_R, .accessfn = access_tdra, 6019 .type = ARM_CP_CONST, .resetvalue = 0 }, 6020 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 6021 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 6022 .access = PL1_R, .accessfn = access_tdra, 6023 .type = ARM_CP_CONST, .resetvalue = 0 }, 6024 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 6025 .access = PL0_R, .accessfn = access_tdra, 6026 .type = ARM_CP_CONST, .resetvalue = 0 }, 6027 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 6028 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 6029 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 6030 .access = PL1_RW, .accessfn = access_tda, 6031 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 6032 .resetvalue = 0 }, 6033 /* 6034 * MDCCSR_EL0[30:29] map to EDSCR[30:29]. Simply RAZ as the external 6035 * Debug Communication Channel is not implemented. 6036 */ 6037 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64, 6038 .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0, 6039 .access = PL0_R, .accessfn = access_tda, 6040 .type = ARM_CP_CONST, .resetvalue = 0 }, 6041 /* 6042 * DBGDSCRint[15,12,5:2] map to MDSCR_EL1[15,12,5:2]. Map all bits as 6043 * it is unlikely a guest will care. 6044 * We don't implement the configurable EL0 access. 6045 */ 6046 { .name = "DBGDSCRint", .state = ARM_CP_STATE_AA32, 6047 .cp = 14, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 6048 .type = ARM_CP_ALIAS, 6049 .access = PL1_R, .accessfn = access_tda, 6050 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 6051 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 6052 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 6053 .access = PL1_W, .type = ARM_CP_NO_RAW, 6054 .accessfn = access_tdosa, 6055 .writefn = oslar_write }, 6056 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 6057 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 6058 .access = PL1_R, .resetvalue = 10, 6059 .accessfn = access_tdosa, 6060 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 6061 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 6062 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 6063 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 6064 .access = PL1_RW, .accessfn = access_tdosa, 6065 .type = ARM_CP_NOP }, 6066 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 6067 * implement vector catch debug events yet. 6068 */ 6069 { .name = "DBGVCR", 6070 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 6071 .access = PL1_RW, .accessfn = access_tda, 6072 .type = ARM_CP_NOP }, 6073 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 6074 * to save and restore a 32-bit guest's DBGVCR) 6075 */ 6076 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 6077 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 6078 .access = PL2_RW, .accessfn = access_tda, 6079 .type = ARM_CP_NOP }, 6080 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 6081 * Channel but Linux may try to access this register. The 32-bit 6082 * alias is DBGDCCINT. 6083 */ 6084 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 6085 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 6086 .access = PL1_RW, .accessfn = access_tda, 6087 .type = ARM_CP_NOP }, 6088 }; 6089 6090 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 6091 /* 64 bit access versions of the (dummy) debug registers */ 6092 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 6093 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 6094 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 6095 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 6096 }; 6097 6098 /* Return the exception level to which exceptions should be taken 6099 * via SVEAccessTrap. If an exception should be routed through 6100 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 6101 * take care of raising that exception. 6102 * C.f. the ARM pseudocode function CheckSVEEnabled. 6103 */ 6104 int sve_exception_el(CPUARMState *env, int el) 6105 { 6106 #ifndef CONFIG_USER_ONLY 6107 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 6108 6109 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 6110 /* Check CPACR.ZEN. */ 6111 switch (extract32(env->cp15.cpacr_el1, 16, 2)) { 6112 case 1: 6113 if (el != 0) { 6114 break; 6115 } 6116 /* fall through */ 6117 case 0: 6118 case 2: 6119 /* route_to_el2 */ 6120 return hcr_el2 & HCR_TGE ? 2 : 1; 6121 } 6122 6123 /* Check CPACR.FPEN. */ 6124 switch (extract32(env->cp15.cpacr_el1, 20, 2)) { 6125 case 1: 6126 if (el != 0) { 6127 break; 6128 } 6129 /* fall through */ 6130 case 0: 6131 case 2: 6132 return 0; 6133 } 6134 } 6135 6136 /* 6137 * CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). 6138 */ 6139 if (el <= 2) { 6140 if (hcr_el2 & HCR_E2H) { 6141 /* Check CPTR_EL2.ZEN. */ 6142 switch (extract32(env->cp15.cptr_el[2], 16, 2)) { 6143 case 1: 6144 if (el != 0 || !(hcr_el2 & HCR_TGE)) { 6145 break; 6146 } 6147 /* fall through */ 6148 case 0: 6149 case 2: 6150 return 2; 6151 } 6152 6153 /* Check CPTR_EL2.FPEN. */ 6154 switch (extract32(env->cp15.cptr_el[2], 20, 2)) { 6155 case 1: 6156 if (el == 2 || !(hcr_el2 & HCR_TGE)) { 6157 break; 6158 } 6159 /* fall through */ 6160 case 0: 6161 case 2: 6162 return 0; 6163 } 6164 } else if (arm_is_el2_enabled(env)) { 6165 if (env->cp15.cptr_el[2] & CPTR_TZ) { 6166 return 2; 6167 } 6168 if (env->cp15.cptr_el[2] & CPTR_TFP) { 6169 return 0; 6170 } 6171 } 6172 } 6173 6174 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 6175 if (arm_feature(env, ARM_FEATURE_EL3) 6176 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 6177 return 3; 6178 } 6179 #endif 6180 return 0; 6181 } 6182 6183 uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) 6184 { 6185 uint32_t end_len; 6186 6187 start_len = MIN(start_len, ARM_MAX_VQ - 1); 6188 end_len = start_len; 6189 6190 if (!test_bit(start_len, cpu->sve_vq_map)) { 6191 end_len = find_last_bit(cpu->sve_vq_map, start_len); 6192 assert(end_len < start_len); 6193 } 6194 return end_len; 6195 } 6196 6197 /* 6198 * Given that SVE is enabled, return the vector length for EL. 6199 */ 6200 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 6201 { 6202 ARMCPU *cpu = env_archcpu(env); 6203 uint32_t zcr_len = cpu->sve_max_vq - 1; 6204 6205 if (el <= 1 && 6206 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 6207 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 6208 } 6209 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { 6210 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 6211 } 6212 if (arm_feature(env, ARM_FEATURE_EL3)) { 6213 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 6214 } 6215 6216 return aarch64_sve_zcr_get_valid_len(cpu, zcr_len); 6217 } 6218 6219 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6220 uint64_t value) 6221 { 6222 int cur_el = arm_current_el(env); 6223 int old_len = sve_zcr_len_for_el(env, cur_el); 6224 int new_len; 6225 6226 /* Bits other than [3:0] are RAZ/WI. */ 6227 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); 6228 raw_write(env, ri, value & 0xf); 6229 6230 /* 6231 * Because we arrived here, we know both FP and SVE are enabled; 6232 * otherwise we would have trapped access to the ZCR_ELn register. 6233 */ 6234 new_len = sve_zcr_len_for_el(env, cur_el); 6235 if (new_len < old_len) { 6236 aarch64_sve_narrow_vq(env, new_len + 1); 6237 } 6238 } 6239 6240 static const ARMCPRegInfo zcr_el1_reginfo = { 6241 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 6242 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 6243 .access = PL1_RW, .type = ARM_CP_SVE, 6244 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 6245 .writefn = zcr_write, .raw_writefn = raw_write 6246 }; 6247 6248 static const ARMCPRegInfo zcr_el2_reginfo = { 6249 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6250 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6251 .access = PL2_RW, .type = ARM_CP_SVE, 6252 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 6253 .writefn = zcr_write, .raw_writefn = raw_write 6254 }; 6255 6256 static const ARMCPRegInfo zcr_no_el2_reginfo = { 6257 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6258 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6259 .access = PL2_RW, .type = ARM_CP_SVE, 6260 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 6261 }; 6262 6263 static const ARMCPRegInfo zcr_el3_reginfo = { 6264 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 6265 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 6266 .access = PL3_RW, .type = ARM_CP_SVE, 6267 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 6268 .writefn = zcr_write, .raw_writefn = raw_write 6269 }; 6270 6271 void hw_watchpoint_update(ARMCPU *cpu, int n) 6272 { 6273 CPUARMState *env = &cpu->env; 6274 vaddr len = 0; 6275 vaddr wvr = env->cp15.dbgwvr[n]; 6276 uint64_t wcr = env->cp15.dbgwcr[n]; 6277 int mask; 6278 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 6279 6280 if (env->cpu_watchpoint[n]) { 6281 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 6282 env->cpu_watchpoint[n] = NULL; 6283 } 6284 6285 if (!FIELD_EX64(wcr, DBGWCR, E)) { 6286 /* E bit clear : watchpoint disabled */ 6287 return; 6288 } 6289 6290 switch (FIELD_EX64(wcr, DBGWCR, LSC)) { 6291 case 0: 6292 /* LSC 00 is reserved and must behave as if the wp is disabled */ 6293 return; 6294 case 1: 6295 flags |= BP_MEM_READ; 6296 break; 6297 case 2: 6298 flags |= BP_MEM_WRITE; 6299 break; 6300 case 3: 6301 flags |= BP_MEM_ACCESS; 6302 break; 6303 } 6304 6305 /* Attempts to use both MASK and BAS fields simultaneously are 6306 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 6307 * thus generating a watchpoint for every byte in the masked region. 6308 */ 6309 mask = FIELD_EX64(wcr, DBGWCR, MASK); 6310 if (mask == 1 || mask == 2) { 6311 /* Reserved values of MASK; we must act as if the mask value was 6312 * some non-reserved value, or as if the watchpoint were disabled. 6313 * We choose the latter. 6314 */ 6315 return; 6316 } else if (mask) { 6317 /* Watchpoint covers an aligned area up to 2GB in size */ 6318 len = 1ULL << mask; 6319 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 6320 * whether the watchpoint fires when the unmasked bits match; we opt 6321 * to generate the exceptions. 6322 */ 6323 wvr &= ~(len - 1); 6324 } else { 6325 /* Watchpoint covers bytes defined by the byte address select bits */ 6326 int bas = FIELD_EX64(wcr, DBGWCR, BAS); 6327 int basstart; 6328 6329 if (extract64(wvr, 2, 1)) { 6330 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 6331 * ignored, and BAS[3:0] define which bytes to watch. 6332 */ 6333 bas &= 0xf; 6334 } 6335 6336 if (bas == 0) { 6337 /* This must act as if the watchpoint is disabled */ 6338 return; 6339 } 6340 6341 /* The BAS bits are supposed to be programmed to indicate a contiguous 6342 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 6343 * we fire for each byte in the word/doubleword addressed by the WVR. 6344 * We choose to ignore any non-zero bits after the first range of 1s. 6345 */ 6346 basstart = ctz32(bas); 6347 len = cto32(bas >> basstart); 6348 wvr += basstart; 6349 } 6350 6351 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 6352 &env->cpu_watchpoint[n]); 6353 } 6354 6355 void hw_watchpoint_update_all(ARMCPU *cpu) 6356 { 6357 int i; 6358 CPUARMState *env = &cpu->env; 6359 6360 /* Completely clear out existing QEMU watchpoints and our array, to 6361 * avoid possible stale entries following migration load. 6362 */ 6363 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 6364 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 6365 6366 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 6367 hw_watchpoint_update(cpu, i); 6368 } 6369 } 6370 6371 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6372 uint64_t value) 6373 { 6374 ARMCPU *cpu = env_archcpu(env); 6375 int i = ri->crm; 6376 6377 /* 6378 * Bits [1:0] are RES0. 6379 * 6380 * It is IMPLEMENTATION DEFINED whether [63:49] ([63:53] with FEAT_LVA) 6381 * are hardwired to the value of bit [48] ([52] with FEAT_LVA), or if 6382 * they contain the value written. It is CONSTRAINED UNPREDICTABLE 6383 * whether the RESS bits are ignored when comparing an address. 6384 * 6385 * Therefore we are allowed to compare the entire register, which lets 6386 * us avoid considering whether or not FEAT_LVA is actually enabled. 6387 */ 6388 value &= ~3ULL; 6389 6390 raw_write(env, ri, value); 6391 hw_watchpoint_update(cpu, i); 6392 } 6393 6394 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6395 uint64_t value) 6396 { 6397 ARMCPU *cpu = env_archcpu(env); 6398 int i = ri->crm; 6399 6400 raw_write(env, ri, value); 6401 hw_watchpoint_update(cpu, i); 6402 } 6403 6404 void hw_breakpoint_update(ARMCPU *cpu, int n) 6405 { 6406 CPUARMState *env = &cpu->env; 6407 uint64_t bvr = env->cp15.dbgbvr[n]; 6408 uint64_t bcr = env->cp15.dbgbcr[n]; 6409 vaddr addr; 6410 int bt; 6411 int flags = BP_CPU; 6412 6413 if (env->cpu_breakpoint[n]) { 6414 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 6415 env->cpu_breakpoint[n] = NULL; 6416 } 6417 6418 if (!extract64(bcr, 0, 1)) { 6419 /* E bit clear : watchpoint disabled */ 6420 return; 6421 } 6422 6423 bt = extract64(bcr, 20, 4); 6424 6425 switch (bt) { 6426 case 4: /* unlinked address mismatch (reserved if AArch64) */ 6427 case 5: /* linked address mismatch (reserved if AArch64) */ 6428 qemu_log_mask(LOG_UNIMP, 6429 "arm: address mismatch breakpoint types not implemented\n"); 6430 return; 6431 case 0: /* unlinked address match */ 6432 case 1: /* linked address match */ 6433 { 6434 /* 6435 * Bits [1:0] are RES0. 6436 * 6437 * It is IMPLEMENTATION DEFINED whether bits [63:49] 6438 * ([63:53] for FEAT_LVA) are hardwired to a copy of the sign bit 6439 * of the VA field ([48] or [52] for FEAT_LVA), or whether the 6440 * value is read as written. It is CONSTRAINED UNPREDICTABLE 6441 * whether the RESS bits are ignored when comparing an address. 6442 * Therefore we are allowed to compare the entire register, which 6443 * lets us avoid considering whether FEAT_LVA is actually enabled. 6444 * 6445 * The BAS field is used to allow setting breakpoints on 16-bit 6446 * wide instructions; it is CONSTRAINED UNPREDICTABLE whether 6447 * a bp will fire if the addresses covered by the bp and the addresses 6448 * covered by the insn overlap but the insn doesn't start at the 6449 * start of the bp address range. We choose to require the insn and 6450 * the bp to have the same address. The constraints on writing to 6451 * BAS enforced in dbgbcr_write mean we have only four cases: 6452 * 0b0000 => no breakpoint 6453 * 0b0011 => breakpoint on addr 6454 * 0b1100 => breakpoint on addr + 2 6455 * 0b1111 => breakpoint on addr 6456 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 6457 */ 6458 int bas = extract64(bcr, 5, 4); 6459 addr = bvr & ~3ULL; 6460 if (bas == 0) { 6461 return; 6462 } 6463 if (bas == 0xc) { 6464 addr += 2; 6465 } 6466 break; 6467 } 6468 case 2: /* unlinked context ID match */ 6469 case 8: /* unlinked VMID match (reserved if no EL2) */ 6470 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 6471 qemu_log_mask(LOG_UNIMP, 6472 "arm: unlinked context breakpoint types not implemented\n"); 6473 return; 6474 case 9: /* linked VMID match (reserved if no EL2) */ 6475 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 6476 case 3: /* linked context ID match */ 6477 default: 6478 /* We must generate no events for Linked context matches (unless 6479 * they are linked to by some other bp/wp, which is handled in 6480 * updates for the linking bp/wp). We choose to also generate no events 6481 * for reserved values. 6482 */ 6483 return; 6484 } 6485 6486 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 6487 } 6488 6489 void hw_breakpoint_update_all(ARMCPU *cpu) 6490 { 6491 int i; 6492 CPUARMState *env = &cpu->env; 6493 6494 /* Completely clear out existing QEMU breakpoints and our array, to 6495 * avoid possible stale entries following migration load. 6496 */ 6497 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 6498 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 6499 6500 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 6501 hw_breakpoint_update(cpu, i); 6502 } 6503 } 6504 6505 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6506 uint64_t value) 6507 { 6508 ARMCPU *cpu = env_archcpu(env); 6509 int i = ri->crm; 6510 6511 raw_write(env, ri, value); 6512 hw_breakpoint_update(cpu, i); 6513 } 6514 6515 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6516 uint64_t value) 6517 { 6518 ARMCPU *cpu = env_archcpu(env); 6519 int i = ri->crm; 6520 6521 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 6522 * copy of BAS[0]. 6523 */ 6524 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 6525 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 6526 6527 raw_write(env, ri, value); 6528 hw_breakpoint_update(cpu, i); 6529 } 6530 6531 static void define_debug_regs(ARMCPU *cpu) 6532 { 6533 /* Define v7 and v8 architectural debug registers. 6534 * These are just dummy implementations for now. 6535 */ 6536 int i; 6537 int wrps, brps, ctx_cmps; 6538 6539 /* 6540 * The Arm ARM says DBGDIDR is optional and deprecated if EL1 cannot 6541 * use AArch32. Given that bit 15 is RES1, if the value is 0 then 6542 * the register must not exist for this cpu. 6543 */ 6544 if (cpu->isar.dbgdidr != 0) { 6545 ARMCPRegInfo dbgdidr = { 6546 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, 6547 .opc1 = 0, .opc2 = 0, 6548 .access = PL0_R, .accessfn = access_tda, 6549 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 6550 }; 6551 define_one_arm_cp_reg(cpu, &dbgdidr); 6552 } 6553 6554 /* Note that all these register fields hold "number of Xs minus 1". */ 6555 brps = arm_num_brps(cpu); 6556 wrps = arm_num_wrps(cpu); 6557 ctx_cmps = arm_num_ctx_cmps(cpu); 6558 6559 assert(ctx_cmps <= brps); 6560 6561 define_arm_cp_regs(cpu, debug_cp_reginfo); 6562 6563 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 6564 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 6565 } 6566 6567 for (i = 0; i < brps; i++) { 6568 ARMCPRegInfo dbgregs[] = { 6569 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 6570 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 6571 .access = PL1_RW, .accessfn = access_tda, 6572 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 6573 .writefn = dbgbvr_write, .raw_writefn = raw_write 6574 }, 6575 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 6576 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 6577 .access = PL1_RW, .accessfn = access_tda, 6578 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 6579 .writefn = dbgbcr_write, .raw_writefn = raw_write 6580 }, 6581 }; 6582 define_arm_cp_regs(cpu, dbgregs); 6583 } 6584 6585 for (i = 0; i < wrps; i++) { 6586 ARMCPRegInfo dbgregs[] = { 6587 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 6588 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 6589 .access = PL1_RW, .accessfn = access_tda, 6590 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 6591 .writefn = dbgwvr_write, .raw_writefn = raw_write 6592 }, 6593 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 6594 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 6595 .access = PL1_RW, .accessfn = access_tda, 6596 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 6597 .writefn = dbgwcr_write, .raw_writefn = raw_write 6598 }, 6599 }; 6600 define_arm_cp_regs(cpu, dbgregs); 6601 } 6602 } 6603 6604 static void define_pmu_regs(ARMCPU *cpu) 6605 { 6606 /* 6607 * v7 performance monitor control register: same implementor 6608 * field as main ID register, and we implement four counters in 6609 * addition to the cycle count register. 6610 */ 6611 unsigned int i, pmcrn = PMCR_NUM_COUNTERS; 6612 ARMCPRegInfo pmcr = { 6613 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 6614 .access = PL0_RW, 6615 .type = ARM_CP_IO | ARM_CP_ALIAS, 6616 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 6617 .accessfn = pmreg_access, .writefn = pmcr_write, 6618 .raw_writefn = raw_write, 6619 }; 6620 ARMCPRegInfo pmcr64 = { 6621 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 6622 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 6623 .access = PL0_RW, .accessfn = pmreg_access, 6624 .type = ARM_CP_IO, 6625 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 6626 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | 6627 PMCRLC, 6628 .writefn = pmcr_write, .raw_writefn = raw_write, 6629 }; 6630 define_one_arm_cp_reg(cpu, &pmcr); 6631 define_one_arm_cp_reg(cpu, &pmcr64); 6632 for (i = 0; i < pmcrn; i++) { 6633 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 6634 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 6635 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 6636 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 6637 ARMCPRegInfo pmev_regs[] = { 6638 { .name = pmevcntr_name, .cp = 15, .crn = 14, 6639 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6640 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6641 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6642 .accessfn = pmreg_access_xevcntr }, 6643 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 6644 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 6645 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr, 6646 .type = ARM_CP_IO, 6647 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6648 .raw_readfn = pmevcntr_rawread, 6649 .raw_writefn = pmevcntr_rawwrite }, 6650 { .name = pmevtyper_name, .cp = 15, .crn = 14, 6651 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6652 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6653 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6654 .accessfn = pmreg_access }, 6655 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 6656 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 6657 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6658 .type = ARM_CP_IO, 6659 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6660 .raw_writefn = pmevtyper_rawwrite }, 6661 }; 6662 define_arm_cp_regs(cpu, pmev_regs); 6663 g_free(pmevcntr_name); 6664 g_free(pmevcntr_el0_name); 6665 g_free(pmevtyper_name); 6666 g_free(pmevtyper_el0_name); 6667 } 6668 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { 6669 ARMCPRegInfo v81_pmu_regs[] = { 6670 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 6671 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 6672 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6673 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 6674 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 6675 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 6676 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6677 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 6678 }; 6679 define_arm_cp_regs(cpu, v81_pmu_regs); 6680 } 6681 if (cpu_isar_feature(any_pmu_8_4, cpu)) { 6682 static const ARMCPRegInfo v84_pmmir = { 6683 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, 6684 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, 6685 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6686 .resetvalue = 0 6687 }; 6688 define_one_arm_cp_reg(cpu, &v84_pmmir); 6689 } 6690 } 6691 6692 /* We don't know until after realize whether there's a GICv3 6693 * attached, and that is what registers the gicv3 sysregs. 6694 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 6695 * at runtime. 6696 */ 6697 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 6698 { 6699 ARMCPU *cpu = env_archcpu(env); 6700 uint64_t pfr1 = cpu->isar.id_pfr1; 6701 6702 if (env->gicv3state) { 6703 pfr1 |= 1 << 28; 6704 } 6705 return pfr1; 6706 } 6707 6708 #ifndef CONFIG_USER_ONLY 6709 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 6710 { 6711 ARMCPU *cpu = env_archcpu(env); 6712 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 6713 6714 if (env->gicv3state) { 6715 pfr0 |= 1 << 24; 6716 } 6717 return pfr0; 6718 } 6719 #endif 6720 6721 /* Shared logic between LORID and the rest of the LOR* registers. 6722 * Secure state exclusion has already been dealt with. 6723 */ 6724 static CPAccessResult access_lor_ns(CPUARMState *env, 6725 const ARMCPRegInfo *ri, bool isread) 6726 { 6727 int el = arm_current_el(env); 6728 6729 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 6730 return CP_ACCESS_TRAP_EL2; 6731 } 6732 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 6733 return CP_ACCESS_TRAP_EL3; 6734 } 6735 return CP_ACCESS_OK; 6736 } 6737 6738 static CPAccessResult access_lor_other(CPUARMState *env, 6739 const ARMCPRegInfo *ri, bool isread) 6740 { 6741 if (arm_is_secure_below_el3(env)) { 6742 /* Access denied in secure mode. */ 6743 return CP_ACCESS_TRAP; 6744 } 6745 return access_lor_ns(env, ri, isread); 6746 } 6747 6748 /* 6749 * A trivial implementation of ARMv8.1-LOR leaves all of these 6750 * registers fixed at 0, which indicates that there are zero 6751 * supported Limited Ordering regions. 6752 */ 6753 static const ARMCPRegInfo lor_reginfo[] = { 6754 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 6755 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 6756 .access = PL1_RW, .accessfn = access_lor_other, 6757 .type = ARM_CP_CONST, .resetvalue = 0 }, 6758 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 6759 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 6760 .access = PL1_RW, .accessfn = access_lor_other, 6761 .type = ARM_CP_CONST, .resetvalue = 0 }, 6762 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 6763 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 6764 .access = PL1_RW, .accessfn = access_lor_other, 6765 .type = ARM_CP_CONST, .resetvalue = 0 }, 6766 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 6767 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 6768 .access = PL1_RW, .accessfn = access_lor_other, 6769 .type = ARM_CP_CONST, .resetvalue = 0 }, 6770 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 6771 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 6772 .access = PL1_R, .accessfn = access_lor_ns, 6773 .type = ARM_CP_CONST, .resetvalue = 0 }, 6774 }; 6775 6776 #ifdef TARGET_AARCH64 6777 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 6778 bool isread) 6779 { 6780 int el = arm_current_el(env); 6781 6782 if (el < 2 && 6783 arm_feature(env, ARM_FEATURE_EL2) && 6784 !(arm_hcr_el2_eff(env) & HCR_APK)) { 6785 return CP_ACCESS_TRAP_EL2; 6786 } 6787 if (el < 3 && 6788 arm_feature(env, ARM_FEATURE_EL3) && 6789 !(env->cp15.scr_el3 & SCR_APK)) { 6790 return CP_ACCESS_TRAP_EL3; 6791 } 6792 return CP_ACCESS_OK; 6793 } 6794 6795 static const ARMCPRegInfo pauth_reginfo[] = { 6796 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6797 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 6798 .access = PL1_RW, .accessfn = access_pauth, 6799 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, 6800 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6801 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 6802 .access = PL1_RW, .accessfn = access_pauth, 6803 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, 6804 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6805 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 6806 .access = PL1_RW, .accessfn = access_pauth, 6807 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, 6808 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6809 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 6810 .access = PL1_RW, .accessfn = access_pauth, 6811 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, 6812 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6813 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 6814 .access = PL1_RW, .accessfn = access_pauth, 6815 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, 6816 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6817 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 6818 .access = PL1_RW, .accessfn = access_pauth, 6819 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, 6820 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6821 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 6822 .access = PL1_RW, .accessfn = access_pauth, 6823 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, 6824 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6825 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 6826 .access = PL1_RW, .accessfn = access_pauth, 6827 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, 6828 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6829 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 6830 .access = PL1_RW, .accessfn = access_pauth, 6831 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, 6832 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6833 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 6834 .access = PL1_RW, .accessfn = access_pauth, 6835 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, 6836 }; 6837 6838 static const ARMCPRegInfo tlbirange_reginfo[] = { 6839 { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, 6840 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, 6841 .access = PL1_W, .type = ARM_CP_NO_RAW, 6842 .writefn = tlbi_aa64_rvae1is_write }, 6843 { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, 6844 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, 6845 .access = PL1_W, .type = ARM_CP_NO_RAW, 6846 .writefn = tlbi_aa64_rvae1is_write }, 6847 { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, 6848 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, 6849 .access = PL1_W, .type = ARM_CP_NO_RAW, 6850 .writefn = tlbi_aa64_rvae1is_write }, 6851 { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, 6852 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, 6853 .access = PL1_W, .type = ARM_CP_NO_RAW, 6854 .writefn = tlbi_aa64_rvae1is_write }, 6855 { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, 6856 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 6857 .access = PL1_W, .type = ARM_CP_NO_RAW, 6858 .writefn = tlbi_aa64_rvae1is_write }, 6859 { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, 6860 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, 6861 .access = PL1_W, .type = ARM_CP_NO_RAW, 6862 .writefn = tlbi_aa64_rvae1is_write }, 6863 { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, 6864 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, 6865 .access = PL1_W, .type = ARM_CP_NO_RAW, 6866 .writefn = tlbi_aa64_rvae1is_write }, 6867 { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, 6868 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, 6869 .access = PL1_W, .type = ARM_CP_NO_RAW, 6870 .writefn = tlbi_aa64_rvae1is_write }, 6871 { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, 6872 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 6873 .access = PL1_W, .type = ARM_CP_NO_RAW, 6874 .writefn = tlbi_aa64_rvae1_write }, 6875 { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64, 6876 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3, 6877 .access = PL1_W, .type = ARM_CP_NO_RAW, 6878 .writefn = tlbi_aa64_rvae1_write }, 6879 { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64, 6880 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5, 6881 .access = PL1_W, .type = ARM_CP_NO_RAW, 6882 .writefn = tlbi_aa64_rvae1_write }, 6883 { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64, 6884 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7, 6885 .access = PL1_W, .type = ARM_CP_NO_RAW, 6886 .writefn = tlbi_aa64_rvae1_write }, 6887 { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64, 6888 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2, 6889 .access = PL2_W, .type = ARM_CP_NOP }, 6890 { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64, 6891 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6, 6892 .access = PL2_W, .type = ARM_CP_NOP }, 6893 { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64, 6894 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1, 6895 .access = PL2_W, .type = ARM_CP_NO_RAW, 6896 .writefn = tlbi_aa64_rvae2is_write }, 6897 { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64, 6898 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5, 6899 .access = PL2_W, .type = ARM_CP_NO_RAW, 6900 .writefn = tlbi_aa64_rvae2is_write }, 6901 { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64, 6902 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2, 6903 .access = PL2_W, .type = ARM_CP_NOP }, 6904 { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, 6905 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6, 6906 .access = PL2_W, .type = ARM_CP_NOP }, 6907 { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64, 6908 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1, 6909 .access = PL2_W, .type = ARM_CP_NO_RAW, 6910 .writefn = tlbi_aa64_rvae2is_write }, 6911 { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64, 6912 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5, 6913 .access = PL2_W, .type = ARM_CP_NO_RAW, 6914 .writefn = tlbi_aa64_rvae2is_write }, 6915 { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64, 6916 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1, 6917 .access = PL2_W, .type = ARM_CP_NO_RAW, 6918 .writefn = tlbi_aa64_rvae2_write }, 6919 { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64, 6920 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5, 6921 .access = PL2_W, .type = ARM_CP_NO_RAW, 6922 .writefn = tlbi_aa64_rvae2_write }, 6923 { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64, 6924 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1, 6925 .access = PL3_W, .type = ARM_CP_NO_RAW, 6926 .writefn = tlbi_aa64_rvae3is_write }, 6927 { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64, 6928 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5, 6929 .access = PL3_W, .type = ARM_CP_NO_RAW, 6930 .writefn = tlbi_aa64_rvae3is_write }, 6931 { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64, 6932 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1, 6933 .access = PL3_W, .type = ARM_CP_NO_RAW, 6934 .writefn = tlbi_aa64_rvae3is_write }, 6935 { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64, 6936 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5, 6937 .access = PL3_W, .type = ARM_CP_NO_RAW, 6938 .writefn = tlbi_aa64_rvae3is_write }, 6939 { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64, 6940 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1, 6941 .access = PL3_W, .type = ARM_CP_NO_RAW, 6942 .writefn = tlbi_aa64_rvae3_write }, 6943 { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64, 6944 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5, 6945 .access = PL3_W, .type = ARM_CP_NO_RAW, 6946 .writefn = tlbi_aa64_rvae3_write }, 6947 }; 6948 6949 static const ARMCPRegInfo tlbios_reginfo[] = { 6950 { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, 6951 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, 6952 .access = PL1_W, .type = ARM_CP_NO_RAW, 6953 .writefn = tlbi_aa64_vmalle1is_write }, 6954 { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64, 6955 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1, 6956 .access = PL1_W, .type = ARM_CP_NO_RAW, 6957 .writefn = tlbi_aa64_vae1is_write }, 6958 { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, 6959 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, 6960 .access = PL1_W, .type = ARM_CP_NO_RAW, 6961 .writefn = tlbi_aa64_vmalle1is_write }, 6962 { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64, 6963 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3, 6964 .access = PL1_W, .type = ARM_CP_NO_RAW, 6965 .writefn = tlbi_aa64_vae1is_write }, 6966 { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64, 6967 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5, 6968 .access = PL1_W, .type = ARM_CP_NO_RAW, 6969 .writefn = tlbi_aa64_vae1is_write }, 6970 { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64, 6971 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7, 6972 .access = PL1_W, .type = ARM_CP_NO_RAW, 6973 .writefn = tlbi_aa64_vae1is_write }, 6974 { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, 6975 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, 6976 .access = PL2_W, .type = ARM_CP_NO_RAW, 6977 .writefn = tlbi_aa64_alle2is_write }, 6978 { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64, 6979 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1, 6980 .access = PL2_W, .type = ARM_CP_NO_RAW, 6981 .writefn = tlbi_aa64_vae2is_write }, 6982 { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, 6983 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, 6984 .access = PL2_W, .type = ARM_CP_NO_RAW, 6985 .writefn = tlbi_aa64_alle1is_write }, 6986 { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64, 6987 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5, 6988 .access = PL2_W, .type = ARM_CP_NO_RAW, 6989 .writefn = tlbi_aa64_vae2is_write }, 6990 { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, 6991 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, 6992 .access = PL2_W, .type = ARM_CP_NO_RAW, 6993 .writefn = tlbi_aa64_alle1is_write }, 6994 { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64, 6995 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0, 6996 .access = PL2_W, .type = ARM_CP_NOP }, 6997 { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64, 6998 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3, 6999 .access = PL2_W, .type = ARM_CP_NOP }, 7000 { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64, 7001 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4, 7002 .access = PL2_W, .type = ARM_CP_NOP }, 7003 { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64, 7004 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7, 7005 .access = PL2_W, .type = ARM_CP_NOP }, 7006 { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64, 7007 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, 7008 .access = PL3_W, .type = ARM_CP_NO_RAW, 7009 .writefn = tlbi_aa64_alle3is_write }, 7010 { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64, 7011 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1, 7012 .access = PL3_W, .type = ARM_CP_NO_RAW, 7013 .writefn = tlbi_aa64_vae3is_write }, 7014 { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64, 7015 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5, 7016 .access = PL3_W, .type = ARM_CP_NO_RAW, 7017 .writefn = tlbi_aa64_vae3is_write }, 7018 }; 7019 7020 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 7021 { 7022 Error *err = NULL; 7023 uint64_t ret; 7024 7025 /* Success sets NZCV = 0000. */ 7026 env->NF = env->CF = env->VF = 0, env->ZF = 1; 7027 7028 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { 7029 /* 7030 * ??? Failed, for unknown reasons in the crypto subsystem. 7031 * The best we can do is log the reason and return the 7032 * timed-out indication to the guest. There is no reason 7033 * we know to expect this failure to be transitory, so the 7034 * guest may well hang retrying the operation. 7035 */ 7036 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 7037 ri->name, error_get_pretty(err)); 7038 error_free(err); 7039 7040 env->ZF = 0; /* NZCF = 0100 */ 7041 return 0; 7042 } 7043 return ret; 7044 } 7045 7046 /* We do not support re-seeding, so the two registers operate the same. */ 7047 static const ARMCPRegInfo rndr_reginfo[] = { 7048 { .name = "RNDR", .state = ARM_CP_STATE_AA64, 7049 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 7050 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, 7051 .access = PL0_R, .readfn = rndr_readfn }, 7052 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, 7053 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 7054 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, 7055 .access = PL0_R, .readfn = rndr_readfn }, 7056 }; 7057 7058 #ifndef CONFIG_USER_ONLY 7059 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, 7060 uint64_t value) 7061 { 7062 ARMCPU *cpu = env_archcpu(env); 7063 /* CTR_EL0 System register -> DminLine, bits [19:16] */ 7064 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); 7065 uint64_t vaddr_in = (uint64_t) value; 7066 uint64_t vaddr = vaddr_in & ~(dline_size - 1); 7067 void *haddr; 7068 int mem_idx = cpu_mmu_index(env, false); 7069 7070 /* This won't be crossing page boundaries */ 7071 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); 7072 if (haddr) { 7073 7074 ram_addr_t offset; 7075 MemoryRegion *mr; 7076 7077 /* RCU lock is already being held */ 7078 mr = memory_region_from_host(haddr, &offset); 7079 7080 if (mr) { 7081 memory_region_writeback(mr, offset, dline_size); 7082 } 7083 } 7084 } 7085 7086 static const ARMCPRegInfo dcpop_reg[] = { 7087 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, 7088 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, 7089 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 7090 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 7091 }; 7092 7093 static const ARMCPRegInfo dcpodp_reg[] = { 7094 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, 7095 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, 7096 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 7097 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 7098 }; 7099 #endif /*CONFIG_USER_ONLY*/ 7100 7101 static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri, 7102 bool isread) 7103 { 7104 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) { 7105 return CP_ACCESS_TRAP_EL2; 7106 } 7107 7108 return CP_ACCESS_OK; 7109 } 7110 7111 static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri, 7112 bool isread) 7113 { 7114 int el = arm_current_el(env); 7115 7116 if (el < 2 && arm_is_el2_enabled(env)) { 7117 uint64_t hcr = arm_hcr_el2_eff(env); 7118 if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { 7119 return CP_ACCESS_TRAP_EL2; 7120 } 7121 } 7122 if (el < 3 && 7123 arm_feature(env, ARM_FEATURE_EL3) && 7124 !(env->cp15.scr_el3 & SCR_ATA)) { 7125 return CP_ACCESS_TRAP_EL3; 7126 } 7127 return CP_ACCESS_OK; 7128 } 7129 7130 static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri) 7131 { 7132 return env->pstate & PSTATE_TCO; 7133 } 7134 7135 static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 7136 { 7137 env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO); 7138 } 7139 7140 static const ARMCPRegInfo mte_reginfo[] = { 7141 { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64, 7142 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1, 7143 .access = PL1_RW, .accessfn = access_mte, 7144 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) }, 7145 { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64, 7146 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0, 7147 .access = PL1_RW, .accessfn = access_mte, 7148 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) }, 7149 { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64, 7150 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0, 7151 .access = PL2_RW, .accessfn = access_mte, 7152 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) }, 7153 { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64, 7154 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0, 7155 .access = PL3_RW, 7156 .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) }, 7157 { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64, 7158 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5, 7159 .access = PL1_RW, .accessfn = access_mte, 7160 .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) }, 7161 { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64, 7162 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6, 7163 .access = PL1_RW, .accessfn = access_mte, 7164 .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) }, 7165 { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64, 7166 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4, 7167 .access = PL1_R, .accessfn = access_aa64_tid5, 7168 .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS }, 7169 { .name = "TCO", .state = ARM_CP_STATE_AA64, 7170 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, 7171 .type = ARM_CP_NO_RAW, 7172 .access = PL0_RW, .readfn = tco_read, .writefn = tco_write }, 7173 { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64, 7174 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3, 7175 .type = ARM_CP_NOP, .access = PL1_W, 7176 .accessfn = aa64_cacheop_poc_access }, 7177 { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64, 7178 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4, 7179 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7180 { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64, 7181 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5, 7182 .type = ARM_CP_NOP, .access = PL1_W, 7183 .accessfn = aa64_cacheop_poc_access }, 7184 { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64, 7185 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6, 7186 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7187 { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64, 7188 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4, 7189 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7190 { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64, 7191 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6, 7192 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7193 { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64, 7194 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4, 7195 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7196 { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64, 7197 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6, 7198 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 7199 }; 7200 7201 static const ARMCPRegInfo mte_tco_ro_reginfo[] = { 7202 { .name = "TCO", .state = ARM_CP_STATE_AA64, 7203 .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, 7204 .type = ARM_CP_CONST, .access = PL0_RW, }, 7205 }; 7206 7207 static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = { 7208 { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64, 7209 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3, 7210 .type = ARM_CP_NOP, .access = PL0_W, 7211 .accessfn = aa64_cacheop_poc_access }, 7212 { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64, 7213 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5, 7214 .type = ARM_CP_NOP, .access = PL0_W, 7215 .accessfn = aa64_cacheop_poc_access }, 7216 { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64, 7217 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3, 7218 .type = ARM_CP_NOP, .access = PL0_W, 7219 .accessfn = aa64_cacheop_poc_access }, 7220 { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64, 7221 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5, 7222 .type = ARM_CP_NOP, .access = PL0_W, 7223 .accessfn = aa64_cacheop_poc_access }, 7224 { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64, 7225 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3, 7226 .type = ARM_CP_NOP, .access = PL0_W, 7227 .accessfn = aa64_cacheop_poc_access }, 7228 { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64, 7229 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5, 7230 .type = ARM_CP_NOP, .access = PL0_W, 7231 .accessfn = aa64_cacheop_poc_access }, 7232 { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64, 7233 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3, 7234 .type = ARM_CP_NOP, .access = PL0_W, 7235 .accessfn = aa64_cacheop_poc_access }, 7236 { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64, 7237 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5, 7238 .type = ARM_CP_NOP, .access = PL0_W, 7239 .accessfn = aa64_cacheop_poc_access }, 7240 { .name = "DC_GVA", .state = ARM_CP_STATE_AA64, 7241 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3, 7242 .access = PL0_W, .type = ARM_CP_DC_GVA, 7243 #ifndef CONFIG_USER_ONLY 7244 /* Avoid overhead of an access check that always passes in user-mode */ 7245 .accessfn = aa64_zva_access, 7246 #endif 7247 }, 7248 { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64, 7249 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4, 7250 .access = PL0_W, .type = ARM_CP_DC_GZVA, 7251 #ifndef CONFIG_USER_ONLY 7252 /* Avoid overhead of an access check that always passes in user-mode */ 7253 .accessfn = aa64_zva_access, 7254 #endif 7255 }, 7256 }; 7257 7258 #endif 7259 7260 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, 7261 bool isread) 7262 { 7263 int el = arm_current_el(env); 7264 7265 if (el == 0) { 7266 uint64_t sctlr = arm_sctlr(env, el); 7267 if (!(sctlr & SCTLR_EnRCTX)) { 7268 return CP_ACCESS_TRAP; 7269 } 7270 } else if (el == 1) { 7271 uint64_t hcr = arm_hcr_el2_eff(env); 7272 if (hcr & HCR_NV) { 7273 return CP_ACCESS_TRAP_EL2; 7274 } 7275 } 7276 return CP_ACCESS_OK; 7277 } 7278 7279 static const ARMCPRegInfo predinv_reginfo[] = { 7280 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, 7281 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, 7282 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7283 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, 7284 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, 7285 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7286 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, 7287 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, 7288 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7289 /* 7290 * Note the AArch32 opcodes have a different OPC1. 7291 */ 7292 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, 7293 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, 7294 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7295 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, 7296 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, 7297 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7298 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, 7299 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, 7300 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 7301 }; 7302 7303 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) 7304 { 7305 /* Read the high 32 bits of the current CCSIDR */ 7306 return extract64(ccsidr_read(env, ri), 32, 32); 7307 } 7308 7309 static const ARMCPRegInfo ccsidr2_reginfo[] = { 7310 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, 7311 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, 7312 .access = PL1_R, 7313 .accessfn = access_aa64_tid2, 7314 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, 7315 }; 7316 7317 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 7318 bool isread) 7319 { 7320 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { 7321 return CP_ACCESS_TRAP_EL2; 7322 } 7323 7324 return CP_ACCESS_OK; 7325 } 7326 7327 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 7328 bool isread) 7329 { 7330 if (arm_feature(env, ARM_FEATURE_V8)) { 7331 return access_aa64_tid3(env, ri, isread); 7332 } 7333 7334 return CP_ACCESS_OK; 7335 } 7336 7337 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, 7338 bool isread) 7339 { 7340 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { 7341 return CP_ACCESS_TRAP_EL2; 7342 } 7343 7344 return CP_ACCESS_OK; 7345 } 7346 7347 static CPAccessResult access_joscr_jmcr(CPUARMState *env, 7348 const ARMCPRegInfo *ri, bool isread) 7349 { 7350 /* 7351 * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only 7352 * in v7A, not in v8A. 7353 */ 7354 if (!arm_feature(env, ARM_FEATURE_V8) && 7355 arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && 7356 (env->cp15.hstr_el2 & HSTR_TJDBX)) { 7357 return CP_ACCESS_TRAP_EL2; 7358 } 7359 return CP_ACCESS_OK; 7360 } 7361 7362 static const ARMCPRegInfo jazelle_regs[] = { 7363 { .name = "JIDR", 7364 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, 7365 .access = PL1_R, .accessfn = access_jazelle, 7366 .type = ARM_CP_CONST, .resetvalue = 0 }, 7367 { .name = "JOSCR", 7368 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, 7369 .accessfn = access_joscr_jmcr, 7370 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 7371 { .name = "JMCR", 7372 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, 7373 .accessfn = access_joscr_jmcr, 7374 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 7375 }; 7376 7377 static const ARMCPRegInfo vhe_reginfo[] = { 7378 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, 7379 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, 7380 .access = PL2_RW, 7381 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, 7382 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, 7383 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, 7384 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, 7385 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, 7386 #ifndef CONFIG_USER_ONLY 7387 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, 7388 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, 7389 .fieldoffset = 7390 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), 7391 .type = ARM_CP_IO, .access = PL2_RW, 7392 .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, 7393 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 7394 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, 7395 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 7396 .resetfn = gt_hv_timer_reset, 7397 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, 7398 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, 7399 .type = ARM_CP_IO, 7400 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, 7401 .access = PL2_RW, 7402 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), 7403 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, 7404 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, 7405 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, 7406 .type = ARM_CP_IO | ARM_CP_ALIAS, 7407 .access = PL2_RW, .accessfn = e2h_access, 7408 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 7409 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, 7410 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, 7411 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, 7412 .type = ARM_CP_IO | ARM_CP_ALIAS, 7413 .access = PL2_RW, .accessfn = e2h_access, 7414 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 7415 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, 7416 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, 7417 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, 7418 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 7419 .access = PL2_RW, .accessfn = e2h_access, 7420 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, 7421 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, 7422 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, 7423 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 7424 .access = PL2_RW, .accessfn = e2h_access, 7425 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, 7426 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, 7427 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, 7428 .type = ARM_CP_IO | ARM_CP_ALIAS, 7429 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 7430 .access = PL2_RW, .accessfn = e2h_access, 7431 .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, 7432 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, 7433 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, 7434 .type = ARM_CP_IO | ARM_CP_ALIAS, 7435 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 7436 .access = PL2_RW, .accessfn = e2h_access, 7437 .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, 7438 #endif 7439 }; 7440 7441 #ifndef CONFIG_USER_ONLY 7442 static const ARMCPRegInfo ats1e1_reginfo[] = { 7443 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 7444 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7445 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7446 .writefn = ats_write64 }, 7447 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 7448 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7449 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7450 .writefn = ats_write64 }, 7451 }; 7452 7453 static const ARMCPRegInfo ats1cp_reginfo[] = { 7454 { .name = "ATS1CPRP", 7455 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7456 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7457 .writefn = ats_write }, 7458 { .name = "ATS1CPWP", 7459 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7460 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7461 .writefn = ats_write }, 7462 }; 7463 #endif 7464 7465 /* 7466 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and 7467 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field 7468 * is non-zero, which is never for ARMv7, optionally in ARMv8 7469 * and mandatorily for ARMv8.2 and up. 7470 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's 7471 * implementation is RAZ/WI we can ignore this detail, as we 7472 * do for ACTLR. 7473 */ 7474 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { 7475 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, 7476 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, 7477 .access = PL1_RW, .accessfn = access_tacr, 7478 .type = ARM_CP_CONST, .resetvalue = 0 }, 7479 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 7480 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 7481 .access = PL2_RW, .type = ARM_CP_CONST, 7482 .resetvalue = 0 }, 7483 }; 7484 7485 void register_cp_regs_for_features(ARMCPU *cpu) 7486 { 7487 /* Register all the coprocessor registers based on feature bits */ 7488 CPUARMState *env = &cpu->env; 7489 if (arm_feature(env, ARM_FEATURE_M)) { 7490 /* M profile has no coprocessor registers */ 7491 return; 7492 } 7493 7494 define_arm_cp_regs(cpu, cp_reginfo); 7495 if (!arm_feature(env, ARM_FEATURE_V8)) { 7496 /* Must go early as it is full of wildcards that may be 7497 * overridden by later definitions. 7498 */ 7499 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 7500 } 7501 7502 if (arm_feature(env, ARM_FEATURE_V6)) { 7503 /* The ID registers all have impdef reset values */ 7504 ARMCPRegInfo v6_idregs[] = { 7505 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 7506 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 7507 .access = PL1_R, .type = ARM_CP_CONST, 7508 .accessfn = access_aa32_tid3, 7509 .resetvalue = cpu->isar.id_pfr0 }, 7510 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 7511 * the value of the GIC field until after we define these regs. 7512 */ 7513 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 7514 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 7515 .access = PL1_R, .type = ARM_CP_NO_RAW, 7516 .accessfn = access_aa32_tid3, 7517 .readfn = id_pfr1_read, 7518 .writefn = arm_cp_write_ignore }, 7519 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 7520 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 7521 .access = PL1_R, .type = ARM_CP_CONST, 7522 .accessfn = access_aa32_tid3, 7523 .resetvalue = cpu->isar.id_dfr0 }, 7524 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 7525 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 7526 .access = PL1_R, .type = ARM_CP_CONST, 7527 .accessfn = access_aa32_tid3, 7528 .resetvalue = cpu->id_afr0 }, 7529 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 7530 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 7531 .access = PL1_R, .type = ARM_CP_CONST, 7532 .accessfn = access_aa32_tid3, 7533 .resetvalue = cpu->isar.id_mmfr0 }, 7534 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 7535 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 7536 .access = PL1_R, .type = ARM_CP_CONST, 7537 .accessfn = access_aa32_tid3, 7538 .resetvalue = cpu->isar.id_mmfr1 }, 7539 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 7540 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 7541 .access = PL1_R, .type = ARM_CP_CONST, 7542 .accessfn = access_aa32_tid3, 7543 .resetvalue = cpu->isar.id_mmfr2 }, 7544 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 7545 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 7546 .access = PL1_R, .type = ARM_CP_CONST, 7547 .accessfn = access_aa32_tid3, 7548 .resetvalue = cpu->isar.id_mmfr3 }, 7549 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 7550 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 7551 .access = PL1_R, .type = ARM_CP_CONST, 7552 .accessfn = access_aa32_tid3, 7553 .resetvalue = cpu->isar.id_isar0 }, 7554 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 7555 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 7556 .access = PL1_R, .type = ARM_CP_CONST, 7557 .accessfn = access_aa32_tid3, 7558 .resetvalue = cpu->isar.id_isar1 }, 7559 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 7560 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 7561 .access = PL1_R, .type = ARM_CP_CONST, 7562 .accessfn = access_aa32_tid3, 7563 .resetvalue = cpu->isar.id_isar2 }, 7564 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 7565 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 7566 .access = PL1_R, .type = ARM_CP_CONST, 7567 .accessfn = access_aa32_tid3, 7568 .resetvalue = cpu->isar.id_isar3 }, 7569 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 7570 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 7571 .access = PL1_R, .type = ARM_CP_CONST, 7572 .accessfn = access_aa32_tid3, 7573 .resetvalue = cpu->isar.id_isar4 }, 7574 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 7575 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 7576 .access = PL1_R, .type = ARM_CP_CONST, 7577 .accessfn = access_aa32_tid3, 7578 .resetvalue = cpu->isar.id_isar5 }, 7579 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 7580 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 7581 .access = PL1_R, .type = ARM_CP_CONST, 7582 .accessfn = access_aa32_tid3, 7583 .resetvalue = cpu->isar.id_mmfr4 }, 7584 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 7585 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 7586 .access = PL1_R, .type = ARM_CP_CONST, 7587 .accessfn = access_aa32_tid3, 7588 .resetvalue = cpu->isar.id_isar6 }, 7589 }; 7590 define_arm_cp_regs(cpu, v6_idregs); 7591 define_arm_cp_regs(cpu, v6_cp_reginfo); 7592 } else { 7593 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 7594 } 7595 if (arm_feature(env, ARM_FEATURE_V6K)) { 7596 define_arm_cp_regs(cpu, v6k_cp_reginfo); 7597 } 7598 if (arm_feature(env, ARM_FEATURE_V7MP) && 7599 !arm_feature(env, ARM_FEATURE_PMSA)) { 7600 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 7601 } 7602 if (arm_feature(env, ARM_FEATURE_V7VE)) { 7603 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 7604 } 7605 if (arm_feature(env, ARM_FEATURE_V7)) { 7606 ARMCPRegInfo clidr = { 7607 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 7608 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 7609 .access = PL1_R, .type = ARM_CP_CONST, 7610 .accessfn = access_aa64_tid2, 7611 .resetvalue = cpu->clidr 7612 }; 7613 define_one_arm_cp_reg(cpu, &clidr); 7614 define_arm_cp_regs(cpu, v7_cp_reginfo); 7615 define_debug_regs(cpu); 7616 define_pmu_regs(cpu); 7617 } else { 7618 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 7619 } 7620 if (arm_feature(env, ARM_FEATURE_V8)) { 7621 /* AArch64 ID registers, which all have impdef reset values. 7622 * Note that within the ID register ranges the unused slots 7623 * must all RAZ, not UNDEF; future architecture versions may 7624 * define new registers here. 7625 */ 7626 ARMCPRegInfo v8_idregs[] = { 7627 /* 7628 * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system 7629 * emulation because we don't know the right value for the 7630 * GIC field until after we define these regs. 7631 */ 7632 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 7633 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 7634 .access = PL1_R, 7635 #ifdef CONFIG_USER_ONLY 7636 .type = ARM_CP_CONST, 7637 .resetvalue = cpu->isar.id_aa64pfr0 7638 #else 7639 .type = ARM_CP_NO_RAW, 7640 .accessfn = access_aa64_tid3, 7641 .readfn = id_aa64pfr0_read, 7642 .writefn = arm_cp_write_ignore 7643 #endif 7644 }, 7645 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 7646 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 7647 .access = PL1_R, .type = ARM_CP_CONST, 7648 .accessfn = access_aa64_tid3, 7649 .resetvalue = cpu->isar.id_aa64pfr1}, 7650 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7651 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 7652 .access = PL1_R, .type = ARM_CP_CONST, 7653 .accessfn = access_aa64_tid3, 7654 .resetvalue = 0 }, 7655 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7656 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 7657 .access = PL1_R, .type = ARM_CP_CONST, 7658 .accessfn = access_aa64_tid3, 7659 .resetvalue = 0 }, 7660 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 7661 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 7662 .access = PL1_R, .type = ARM_CP_CONST, 7663 .accessfn = access_aa64_tid3, 7664 .resetvalue = cpu->isar.id_aa64zfr0 }, 7665 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7666 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 7667 .access = PL1_R, .type = ARM_CP_CONST, 7668 .accessfn = access_aa64_tid3, 7669 .resetvalue = 0 }, 7670 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7671 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 7672 .access = PL1_R, .type = ARM_CP_CONST, 7673 .accessfn = access_aa64_tid3, 7674 .resetvalue = 0 }, 7675 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7676 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 7677 .access = PL1_R, .type = ARM_CP_CONST, 7678 .accessfn = access_aa64_tid3, 7679 .resetvalue = 0 }, 7680 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 7681 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 7682 .access = PL1_R, .type = ARM_CP_CONST, 7683 .accessfn = access_aa64_tid3, 7684 .resetvalue = cpu->isar.id_aa64dfr0 }, 7685 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 7686 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 7687 .access = PL1_R, .type = ARM_CP_CONST, 7688 .accessfn = access_aa64_tid3, 7689 .resetvalue = cpu->isar.id_aa64dfr1 }, 7690 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7691 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 7692 .access = PL1_R, .type = ARM_CP_CONST, 7693 .accessfn = access_aa64_tid3, 7694 .resetvalue = 0 }, 7695 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7696 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 7697 .access = PL1_R, .type = ARM_CP_CONST, 7698 .accessfn = access_aa64_tid3, 7699 .resetvalue = 0 }, 7700 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 7701 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 7702 .access = PL1_R, .type = ARM_CP_CONST, 7703 .accessfn = access_aa64_tid3, 7704 .resetvalue = cpu->id_aa64afr0 }, 7705 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 7706 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 7707 .access = PL1_R, .type = ARM_CP_CONST, 7708 .accessfn = access_aa64_tid3, 7709 .resetvalue = cpu->id_aa64afr1 }, 7710 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7711 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 7712 .access = PL1_R, .type = ARM_CP_CONST, 7713 .accessfn = access_aa64_tid3, 7714 .resetvalue = 0 }, 7715 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7716 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 7717 .access = PL1_R, .type = ARM_CP_CONST, 7718 .accessfn = access_aa64_tid3, 7719 .resetvalue = 0 }, 7720 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 7721 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 7722 .access = PL1_R, .type = ARM_CP_CONST, 7723 .accessfn = access_aa64_tid3, 7724 .resetvalue = cpu->isar.id_aa64isar0 }, 7725 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 7726 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 7727 .access = PL1_R, .type = ARM_CP_CONST, 7728 .accessfn = access_aa64_tid3, 7729 .resetvalue = cpu->isar.id_aa64isar1 }, 7730 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7731 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 7732 .access = PL1_R, .type = ARM_CP_CONST, 7733 .accessfn = access_aa64_tid3, 7734 .resetvalue = 0 }, 7735 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7736 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 7737 .access = PL1_R, .type = ARM_CP_CONST, 7738 .accessfn = access_aa64_tid3, 7739 .resetvalue = 0 }, 7740 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7741 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 7742 .access = PL1_R, .type = ARM_CP_CONST, 7743 .accessfn = access_aa64_tid3, 7744 .resetvalue = 0 }, 7745 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7746 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 7747 .access = PL1_R, .type = ARM_CP_CONST, 7748 .accessfn = access_aa64_tid3, 7749 .resetvalue = 0 }, 7750 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7751 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 7752 .access = PL1_R, .type = ARM_CP_CONST, 7753 .accessfn = access_aa64_tid3, 7754 .resetvalue = 0 }, 7755 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7756 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 7757 .access = PL1_R, .type = ARM_CP_CONST, 7758 .accessfn = access_aa64_tid3, 7759 .resetvalue = 0 }, 7760 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 7761 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 7762 .access = PL1_R, .type = ARM_CP_CONST, 7763 .accessfn = access_aa64_tid3, 7764 .resetvalue = cpu->isar.id_aa64mmfr0 }, 7765 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 7766 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 7767 .access = PL1_R, .type = ARM_CP_CONST, 7768 .accessfn = access_aa64_tid3, 7769 .resetvalue = cpu->isar.id_aa64mmfr1 }, 7770 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, 7771 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 7772 .access = PL1_R, .type = ARM_CP_CONST, 7773 .accessfn = access_aa64_tid3, 7774 .resetvalue = cpu->isar.id_aa64mmfr2 }, 7775 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7776 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 7777 .access = PL1_R, .type = ARM_CP_CONST, 7778 .accessfn = access_aa64_tid3, 7779 .resetvalue = 0 }, 7780 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7781 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 7782 .access = PL1_R, .type = ARM_CP_CONST, 7783 .accessfn = access_aa64_tid3, 7784 .resetvalue = 0 }, 7785 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7786 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 7787 .access = PL1_R, .type = ARM_CP_CONST, 7788 .accessfn = access_aa64_tid3, 7789 .resetvalue = 0 }, 7790 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7791 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 7792 .access = PL1_R, .type = ARM_CP_CONST, 7793 .accessfn = access_aa64_tid3, 7794 .resetvalue = 0 }, 7795 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7796 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 7797 .access = PL1_R, .type = ARM_CP_CONST, 7798 .accessfn = access_aa64_tid3, 7799 .resetvalue = 0 }, 7800 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 7801 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 7802 .access = PL1_R, .type = ARM_CP_CONST, 7803 .accessfn = access_aa64_tid3, 7804 .resetvalue = cpu->isar.mvfr0 }, 7805 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 7806 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 7807 .access = PL1_R, .type = ARM_CP_CONST, 7808 .accessfn = access_aa64_tid3, 7809 .resetvalue = cpu->isar.mvfr1 }, 7810 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 7811 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 7812 .access = PL1_R, .type = ARM_CP_CONST, 7813 .accessfn = access_aa64_tid3, 7814 .resetvalue = cpu->isar.mvfr2 }, 7815 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7816 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 7817 .access = PL1_R, .type = ARM_CP_CONST, 7818 .accessfn = access_aa64_tid3, 7819 .resetvalue = 0 }, 7820 { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH, 7821 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 7822 .access = PL1_R, .type = ARM_CP_CONST, 7823 .accessfn = access_aa64_tid3, 7824 .resetvalue = cpu->isar.id_pfr2 }, 7825 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7826 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 7827 .access = PL1_R, .type = ARM_CP_CONST, 7828 .accessfn = access_aa64_tid3, 7829 .resetvalue = 0 }, 7830 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7831 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 7832 .access = PL1_R, .type = ARM_CP_CONST, 7833 .accessfn = access_aa64_tid3, 7834 .resetvalue = 0 }, 7835 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7836 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 7837 .access = PL1_R, .type = ARM_CP_CONST, 7838 .accessfn = access_aa64_tid3, 7839 .resetvalue = 0 }, 7840 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 7841 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 7842 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7843 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 7844 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 7845 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 7846 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7847 .resetvalue = cpu->pmceid0 }, 7848 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 7849 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 7850 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7851 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 7852 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 7853 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 7854 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7855 .resetvalue = cpu->pmceid1 }, 7856 }; 7857 #ifdef CONFIG_USER_ONLY 7858 static const ARMCPRegUserSpaceInfo v8_user_idregs[] = { 7859 { .name = "ID_AA64PFR0_EL1", 7860 .exported_bits = 0x000f000f00ff0000, 7861 .fixed_bits = 0x0000000000000011 }, 7862 { .name = "ID_AA64PFR1_EL1", 7863 .exported_bits = 0x00000000000000f0 }, 7864 { .name = "ID_AA64PFR*_EL1_RESERVED", 7865 .is_glob = true }, 7866 { .name = "ID_AA64ZFR0_EL1" }, 7867 { .name = "ID_AA64MMFR0_EL1", 7868 .fixed_bits = 0x00000000ff000000 }, 7869 { .name = "ID_AA64MMFR1_EL1" }, 7870 { .name = "ID_AA64MMFR*_EL1_RESERVED", 7871 .is_glob = true }, 7872 { .name = "ID_AA64DFR0_EL1", 7873 .fixed_bits = 0x0000000000000006 }, 7874 { .name = "ID_AA64DFR1_EL1" }, 7875 { .name = "ID_AA64DFR*_EL1_RESERVED", 7876 .is_glob = true }, 7877 { .name = "ID_AA64AFR*", 7878 .is_glob = true }, 7879 { .name = "ID_AA64ISAR0_EL1", 7880 .exported_bits = 0x00fffffff0fffff0 }, 7881 { .name = "ID_AA64ISAR1_EL1", 7882 .exported_bits = 0x000000f0ffffffff }, 7883 { .name = "ID_AA64ISAR*_EL1_RESERVED", 7884 .is_glob = true }, 7885 }; 7886 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 7887 #endif 7888 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 7889 if (!arm_feature(env, ARM_FEATURE_EL3) && 7890 !arm_feature(env, ARM_FEATURE_EL2)) { 7891 ARMCPRegInfo rvbar = { 7892 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 7893 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 7894 .access = PL1_R, 7895 .fieldoffset = offsetof(CPUARMState, cp15.rvbar), 7896 }; 7897 define_one_arm_cp_reg(cpu, &rvbar); 7898 } 7899 define_arm_cp_regs(cpu, v8_idregs); 7900 define_arm_cp_regs(cpu, v8_cp_reginfo); 7901 } 7902 if (arm_feature(env, ARM_FEATURE_EL2)) { 7903 uint64_t vmpidr_def = mpidr_read_val(env); 7904 ARMCPRegInfo vpidr_regs[] = { 7905 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 7906 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7907 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7908 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 7909 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 7910 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 7911 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7912 .access = PL2_RW, .resetvalue = cpu->midr, 7913 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7914 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 7915 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7916 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7917 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 7918 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 7919 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 7920 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7921 .access = PL2_RW, 7922 .resetvalue = vmpidr_def, 7923 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 7924 }; 7925 define_arm_cp_regs(cpu, vpidr_regs); 7926 define_arm_cp_regs(cpu, el2_cp_reginfo); 7927 if (arm_feature(env, ARM_FEATURE_V8)) { 7928 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 7929 } 7930 if (cpu_isar_feature(aa64_sel2, cpu)) { 7931 define_arm_cp_regs(cpu, el2_sec_cp_reginfo); 7932 } 7933 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 7934 if (!arm_feature(env, ARM_FEATURE_EL3)) { 7935 ARMCPRegInfo rvbar = { 7936 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 7937 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 7938 .access = PL2_R, 7939 .fieldoffset = offsetof(CPUARMState, cp15.rvbar), 7940 }; 7941 define_one_arm_cp_reg(cpu, &rvbar); 7942 } 7943 } else { 7944 /* If EL2 is missing but higher ELs are enabled, we need to 7945 * register the no_el2 reginfos. 7946 */ 7947 if (arm_feature(env, ARM_FEATURE_EL3)) { 7948 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 7949 * of MIDR_EL1 and MPIDR_EL1. 7950 */ 7951 ARMCPRegInfo vpidr_regs[] = { 7952 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7953 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7954 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7955 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 7956 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7957 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7958 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7959 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7960 .type = ARM_CP_NO_RAW, 7961 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 7962 }; 7963 define_arm_cp_regs(cpu, vpidr_regs); 7964 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 7965 if (arm_feature(env, ARM_FEATURE_V8)) { 7966 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 7967 } 7968 } 7969 } 7970 if (arm_feature(env, ARM_FEATURE_EL3)) { 7971 define_arm_cp_regs(cpu, el3_cp_reginfo); 7972 ARMCPRegInfo el3_regs[] = { 7973 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 7974 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 7975 .access = PL3_R, 7976 .fieldoffset = offsetof(CPUARMState, cp15.rvbar), 7977 }, 7978 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 7979 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 7980 .access = PL3_RW, 7981 .raw_writefn = raw_write, .writefn = sctlr_write, 7982 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 7983 .resetvalue = cpu->reset_sctlr }, 7984 }; 7985 7986 define_arm_cp_regs(cpu, el3_regs); 7987 } 7988 /* The behaviour of NSACR is sufficiently various that we don't 7989 * try to describe it in a single reginfo: 7990 * if EL3 is 64 bit, then trap to EL3 from S EL1, 7991 * reads as constant 0xc00 from NS EL1 and NS EL2 7992 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 7993 * if v7 without EL3, register doesn't exist 7994 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 7995 */ 7996 if (arm_feature(env, ARM_FEATURE_EL3)) { 7997 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7998 static const ARMCPRegInfo nsacr = { 7999 .name = "NSACR", .type = ARM_CP_CONST, 8000 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8001 .access = PL1_RW, .accessfn = nsacr_access, 8002 .resetvalue = 0xc00 8003 }; 8004 define_one_arm_cp_reg(cpu, &nsacr); 8005 } else { 8006 static const ARMCPRegInfo nsacr = { 8007 .name = "NSACR", 8008 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8009 .access = PL3_RW | PL1_R, 8010 .resetvalue = 0, 8011 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 8012 }; 8013 define_one_arm_cp_reg(cpu, &nsacr); 8014 } 8015 } else { 8016 if (arm_feature(env, ARM_FEATURE_V8)) { 8017 static const ARMCPRegInfo nsacr = { 8018 .name = "NSACR", .type = ARM_CP_CONST, 8019 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 8020 .access = PL1_R, 8021 .resetvalue = 0xc00 8022 }; 8023 define_one_arm_cp_reg(cpu, &nsacr); 8024 } 8025 } 8026 8027 if (arm_feature(env, ARM_FEATURE_PMSA)) { 8028 if (arm_feature(env, ARM_FEATURE_V6)) { 8029 /* PMSAv6 not implemented */ 8030 assert(arm_feature(env, ARM_FEATURE_V7)); 8031 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 8032 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 8033 } else { 8034 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 8035 } 8036 } else { 8037 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 8038 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 8039 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ 8040 if (cpu_isar_feature(aa32_hpd, cpu)) { 8041 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 8042 } 8043 } 8044 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 8045 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 8046 } 8047 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 8048 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 8049 } 8050 if (arm_feature(env, ARM_FEATURE_VAPA)) { 8051 define_arm_cp_regs(cpu, vapa_cp_reginfo); 8052 } 8053 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 8054 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 8055 } 8056 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 8057 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 8058 } 8059 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 8060 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 8061 } 8062 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 8063 define_arm_cp_regs(cpu, omap_cp_reginfo); 8064 } 8065 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 8066 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 8067 } 8068 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 8069 define_arm_cp_regs(cpu, xscale_cp_reginfo); 8070 } 8071 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 8072 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 8073 } 8074 if (arm_feature(env, ARM_FEATURE_LPAE)) { 8075 define_arm_cp_regs(cpu, lpae_cp_reginfo); 8076 } 8077 if (cpu_isar_feature(aa32_jazelle, cpu)) { 8078 define_arm_cp_regs(cpu, jazelle_regs); 8079 } 8080 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 8081 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 8082 * be read-only (ie write causes UNDEF exception). 8083 */ 8084 { 8085 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 8086 /* Pre-v8 MIDR space. 8087 * Note that the MIDR isn't a simple constant register because 8088 * of the TI925 behaviour where writes to another register can 8089 * cause the MIDR value to change. 8090 * 8091 * Unimplemented registers in the c15 0 0 0 space default to 8092 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 8093 * and friends override accordingly. 8094 */ 8095 { .name = "MIDR", 8096 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 8097 .access = PL1_R, .resetvalue = cpu->midr, 8098 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 8099 .readfn = midr_read, 8100 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 8101 .type = ARM_CP_OVERRIDE }, 8102 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 8103 { .name = "DUMMY", 8104 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 8105 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8106 { .name = "DUMMY", 8107 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 8108 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8109 { .name = "DUMMY", 8110 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 8111 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8112 { .name = "DUMMY", 8113 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 8114 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8115 { .name = "DUMMY", 8116 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 8117 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 8118 }; 8119 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 8120 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 8121 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 8122 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 8123 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 8124 .readfn = midr_read }, 8125 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 8126 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 8127 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 8128 .access = PL1_R, .resetvalue = cpu->midr }, 8129 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 8130 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 8131 .access = PL1_R, .resetvalue = cpu->midr }, 8132 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 8133 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 8134 .access = PL1_R, 8135 .accessfn = access_aa64_tid1, 8136 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 8137 }; 8138 ARMCPRegInfo id_cp_reginfo[] = { 8139 /* These are common to v8 and pre-v8 */ 8140 { .name = "CTR", 8141 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 8142 .access = PL1_R, .accessfn = ctr_el0_access, 8143 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 8144 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 8145 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 8146 .access = PL0_R, .accessfn = ctr_el0_access, 8147 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 8148 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 8149 { .name = "TCMTR", 8150 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 8151 .access = PL1_R, 8152 .accessfn = access_aa32_tid1, 8153 .type = ARM_CP_CONST, .resetvalue = 0 }, 8154 }; 8155 /* TLBTR is specific to VMSA */ 8156 ARMCPRegInfo id_tlbtr_reginfo = { 8157 .name = "TLBTR", 8158 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 8159 .access = PL1_R, 8160 .accessfn = access_aa32_tid1, 8161 .type = ARM_CP_CONST, .resetvalue = 0, 8162 }; 8163 /* MPUIR is specific to PMSA V6+ */ 8164 ARMCPRegInfo id_mpuir_reginfo = { 8165 .name = "MPUIR", 8166 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 8167 .access = PL1_R, .type = ARM_CP_CONST, 8168 .resetvalue = cpu->pmsav7_dregion << 8 8169 }; 8170 static const ARMCPRegInfo crn0_wi_reginfo = { 8171 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 8172 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 8173 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 8174 }; 8175 #ifdef CONFIG_USER_ONLY 8176 static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 8177 { .name = "MIDR_EL1", 8178 .exported_bits = 0x00000000ffffffff }, 8179 { .name = "REVIDR_EL1" }, 8180 }; 8181 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 8182 #endif 8183 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 8184 arm_feature(env, ARM_FEATURE_STRONGARM)) { 8185 size_t i; 8186 /* Register the blanket "writes ignored" value first to cover the 8187 * whole space. Then update the specific ID registers to allow write 8188 * access, so that they ignore writes rather than causing them to 8189 * UNDEF. 8190 */ 8191 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 8192 for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) { 8193 id_pre_v8_midr_cp_reginfo[i].access = PL1_RW; 8194 } 8195 for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) { 8196 id_cp_reginfo[i].access = PL1_RW; 8197 } 8198 id_mpuir_reginfo.access = PL1_RW; 8199 id_tlbtr_reginfo.access = PL1_RW; 8200 } 8201 if (arm_feature(env, ARM_FEATURE_V8)) { 8202 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 8203 } else { 8204 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 8205 } 8206 define_arm_cp_regs(cpu, id_cp_reginfo); 8207 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 8208 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 8209 } else if (arm_feature(env, ARM_FEATURE_V7)) { 8210 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 8211 } 8212 } 8213 8214 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 8215 ARMCPRegInfo mpidr_cp_reginfo[] = { 8216 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 8217 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 8218 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 8219 }; 8220 #ifdef CONFIG_USER_ONLY 8221 static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 8222 { .name = "MPIDR_EL1", 8223 .fixed_bits = 0x0000000080000000 }, 8224 }; 8225 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 8226 #endif 8227 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 8228 } 8229 8230 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 8231 ARMCPRegInfo auxcr_reginfo[] = { 8232 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 8233 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 8234 .access = PL1_RW, .accessfn = access_tacr, 8235 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, 8236 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 8237 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 8238 .access = PL2_RW, .type = ARM_CP_CONST, 8239 .resetvalue = 0 }, 8240 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 8241 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 8242 .access = PL3_RW, .type = ARM_CP_CONST, 8243 .resetvalue = 0 }, 8244 }; 8245 define_arm_cp_regs(cpu, auxcr_reginfo); 8246 if (cpu_isar_feature(aa32_ac2, cpu)) { 8247 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); 8248 } 8249 } 8250 8251 if (arm_feature(env, ARM_FEATURE_CBAR)) { 8252 /* 8253 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. 8254 * There are two flavours: 8255 * (1) older 32-bit only cores have a simple 32-bit CBAR 8256 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a 8257 * 32-bit register visible to AArch32 at a different encoding 8258 * to the "flavour 1" register and with the bits rearranged to 8259 * be able to squash a 64-bit address into the 32-bit view. 8260 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but 8261 * in future if we support AArch32-only configs of some of the 8262 * AArch64 cores we might need to add a specific feature flag 8263 * to indicate cores with "flavour 2" CBAR. 8264 */ 8265 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 8266 /* 32 bit view is [31:18] 0...0 [43:32]. */ 8267 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 8268 | extract64(cpu->reset_cbar, 32, 12); 8269 ARMCPRegInfo cbar_reginfo[] = { 8270 { .name = "CBAR", 8271 .type = ARM_CP_CONST, 8272 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, 8273 .access = PL1_R, .resetvalue = cbar32 }, 8274 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 8275 .type = ARM_CP_CONST, 8276 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 8277 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 8278 }; 8279 /* We don't implement a r/w 64 bit CBAR currently */ 8280 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 8281 define_arm_cp_regs(cpu, cbar_reginfo); 8282 } else { 8283 ARMCPRegInfo cbar = { 8284 .name = "CBAR", 8285 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 8286 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 8287 .fieldoffset = offsetof(CPUARMState, 8288 cp15.c15_config_base_address) 8289 }; 8290 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 8291 cbar.access = PL1_R; 8292 cbar.fieldoffset = 0; 8293 cbar.type = ARM_CP_CONST; 8294 } 8295 define_one_arm_cp_reg(cpu, &cbar); 8296 } 8297 } 8298 8299 if (arm_feature(env, ARM_FEATURE_VBAR)) { 8300 static const ARMCPRegInfo vbar_cp_reginfo[] = { 8301 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 8302 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 8303 .access = PL1_RW, .writefn = vbar_write, 8304 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 8305 offsetof(CPUARMState, cp15.vbar_ns) }, 8306 .resetvalue = 0 }, 8307 }; 8308 define_arm_cp_regs(cpu, vbar_cp_reginfo); 8309 } 8310 8311 /* Generic registers whose values depend on the implementation */ 8312 { 8313 ARMCPRegInfo sctlr = { 8314 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 8315 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 8316 .access = PL1_RW, .accessfn = access_tvm_trvm, 8317 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 8318 offsetof(CPUARMState, cp15.sctlr_ns) }, 8319 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 8320 .raw_writefn = raw_write, 8321 }; 8322 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 8323 /* Normally we would always end the TB on an SCTLR write, but Linux 8324 * arch/arm/mach-pxa/sleep.S expects two instructions following 8325 * an MMU enable to execute from cache. Imitate this behaviour. 8326 */ 8327 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 8328 } 8329 define_one_arm_cp_reg(cpu, &sctlr); 8330 } 8331 8332 if (cpu_isar_feature(aa64_lor, cpu)) { 8333 define_arm_cp_regs(cpu, lor_reginfo); 8334 } 8335 if (cpu_isar_feature(aa64_pan, cpu)) { 8336 define_one_arm_cp_reg(cpu, &pan_reginfo); 8337 } 8338 #ifndef CONFIG_USER_ONLY 8339 if (cpu_isar_feature(aa64_ats1e1, cpu)) { 8340 define_arm_cp_regs(cpu, ats1e1_reginfo); 8341 } 8342 if (cpu_isar_feature(aa32_ats1e1, cpu)) { 8343 define_arm_cp_regs(cpu, ats1cp_reginfo); 8344 } 8345 #endif 8346 if (cpu_isar_feature(aa64_uao, cpu)) { 8347 define_one_arm_cp_reg(cpu, &uao_reginfo); 8348 } 8349 8350 if (cpu_isar_feature(aa64_dit, cpu)) { 8351 define_one_arm_cp_reg(cpu, &dit_reginfo); 8352 } 8353 if (cpu_isar_feature(aa64_ssbs, cpu)) { 8354 define_one_arm_cp_reg(cpu, &ssbs_reginfo); 8355 } 8356 8357 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 8358 define_arm_cp_regs(cpu, vhe_reginfo); 8359 } 8360 8361 if (cpu_isar_feature(aa64_sve, cpu)) { 8362 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 8363 if (arm_feature(env, ARM_FEATURE_EL2)) { 8364 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 8365 } else { 8366 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 8367 } 8368 if (arm_feature(env, ARM_FEATURE_EL3)) { 8369 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 8370 } 8371 } 8372 8373 #ifdef TARGET_AARCH64 8374 if (cpu_isar_feature(aa64_pauth, cpu)) { 8375 define_arm_cp_regs(cpu, pauth_reginfo); 8376 } 8377 if (cpu_isar_feature(aa64_rndr, cpu)) { 8378 define_arm_cp_regs(cpu, rndr_reginfo); 8379 } 8380 if (cpu_isar_feature(aa64_tlbirange, cpu)) { 8381 define_arm_cp_regs(cpu, tlbirange_reginfo); 8382 } 8383 if (cpu_isar_feature(aa64_tlbios, cpu)) { 8384 define_arm_cp_regs(cpu, tlbios_reginfo); 8385 } 8386 #ifndef CONFIG_USER_ONLY 8387 /* Data Cache clean instructions up to PoP */ 8388 if (cpu_isar_feature(aa64_dcpop, cpu)) { 8389 define_one_arm_cp_reg(cpu, dcpop_reg); 8390 8391 if (cpu_isar_feature(aa64_dcpodp, cpu)) { 8392 define_one_arm_cp_reg(cpu, dcpodp_reg); 8393 } 8394 } 8395 #endif /*CONFIG_USER_ONLY*/ 8396 8397 /* 8398 * If full MTE is enabled, add all of the system registers. 8399 * If only "instructions available at EL0" are enabled, 8400 * then define only a RAZ/WI version of PSTATE.TCO. 8401 */ 8402 if (cpu_isar_feature(aa64_mte, cpu)) { 8403 define_arm_cp_regs(cpu, mte_reginfo); 8404 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); 8405 } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) { 8406 define_arm_cp_regs(cpu, mte_tco_ro_reginfo); 8407 define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); 8408 } 8409 #endif 8410 8411 if (cpu_isar_feature(any_predinv, cpu)) { 8412 define_arm_cp_regs(cpu, predinv_reginfo); 8413 } 8414 8415 if (cpu_isar_feature(any_ccidx, cpu)) { 8416 define_arm_cp_regs(cpu, ccsidr2_reginfo); 8417 } 8418 8419 #ifndef CONFIG_USER_ONLY 8420 /* 8421 * Register redirections and aliases must be done last, 8422 * after the registers from the other extensions have been defined. 8423 */ 8424 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 8425 define_arm_vh_e2h_redirects_aliases(cpu); 8426 } 8427 #endif 8428 } 8429 8430 /* Sort alphabetically by type name, except for "any". */ 8431 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 8432 { 8433 ObjectClass *class_a = (ObjectClass *)a; 8434 ObjectClass *class_b = (ObjectClass *)b; 8435 const char *name_a, *name_b; 8436 8437 name_a = object_class_get_name(class_a); 8438 name_b = object_class_get_name(class_b); 8439 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 8440 return 1; 8441 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 8442 return -1; 8443 } else { 8444 return strcmp(name_a, name_b); 8445 } 8446 } 8447 8448 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 8449 { 8450 ObjectClass *oc = data; 8451 const char *typename; 8452 char *name; 8453 8454 typename = object_class_get_name(oc); 8455 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 8456 qemu_printf(" %s\n", name); 8457 g_free(name); 8458 } 8459 8460 void arm_cpu_list(void) 8461 { 8462 GSList *list; 8463 8464 list = object_class_get_list(TYPE_ARM_CPU, false); 8465 list = g_slist_sort(list, arm_cpu_list_compare); 8466 qemu_printf("Available CPUs:\n"); 8467 g_slist_foreach(list, arm_cpu_list_entry, NULL); 8468 g_slist_free(list); 8469 } 8470 8471 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 8472 { 8473 ObjectClass *oc = data; 8474 CpuDefinitionInfoList **cpu_list = user_data; 8475 CpuDefinitionInfo *info; 8476 const char *typename; 8477 8478 typename = object_class_get_name(oc); 8479 info = g_malloc0(sizeof(*info)); 8480 info->name = g_strndup(typename, 8481 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 8482 info->q_typename = g_strdup(typename); 8483 8484 QAPI_LIST_PREPEND(*cpu_list, info); 8485 } 8486 8487 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 8488 { 8489 CpuDefinitionInfoList *cpu_list = NULL; 8490 GSList *list; 8491 8492 list = object_class_get_list(TYPE_ARM_CPU, false); 8493 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 8494 g_slist_free(list); 8495 8496 return cpu_list; 8497 } 8498 8499 /* 8500 * Private utility function for define_one_arm_cp_reg_with_opaque(): 8501 * add a single reginfo struct to the hash table. 8502 */ 8503 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 8504 void *opaque, CPState state, 8505 CPSecureState secstate, 8506 int crm, int opc1, int opc2, 8507 const char *name) 8508 { 8509 uint32_t key; 8510 ARMCPRegInfo *r2; 8511 bool is64 = r->type & ARM_CP_64BIT; 8512 bool ns = secstate & ARM_CP_SECSTATE_NS; 8513 int cp = r->cp; 8514 bool isbanked; 8515 size_t name_len; 8516 8517 switch (state) { 8518 case ARM_CP_STATE_AA32: 8519 /* We assume it is a cp15 register if the .cp field is left unset. */ 8520 if (cp == 0 && r->state == ARM_CP_STATE_BOTH) { 8521 cp = 15; 8522 } 8523 key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2); 8524 break; 8525 case ARM_CP_STATE_AA64: 8526 /* 8527 * To allow abbreviation of ARMCPRegInfo definitions, we treat 8528 * cp == 0 as equivalent to the value for "standard guest-visible 8529 * sysreg". STATE_BOTH definitions are also always "standard sysreg" 8530 * in their AArch64 view (the .cp value may be non-zero for the 8531 * benefit of the AArch32 view). 8532 */ 8533 if (cp == 0 || r->state == ARM_CP_STATE_BOTH) { 8534 cp = CP_REG_ARM64_SYSREG_CP; 8535 } 8536 key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2); 8537 break; 8538 default: 8539 g_assert_not_reached(); 8540 } 8541 8542 /* Overriding of an existing definition must be explicitly requested. */ 8543 if (!(r->type & ARM_CP_OVERRIDE)) { 8544 const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key); 8545 if (oldreg) { 8546 assert(oldreg->type & ARM_CP_OVERRIDE); 8547 } 8548 } 8549 8550 /* Combine cpreg and name into one allocation. */ 8551 name_len = strlen(name) + 1; 8552 r2 = g_malloc(sizeof(*r2) + name_len); 8553 *r2 = *r; 8554 r2->name = memcpy(r2 + 1, name, name_len); 8555 8556 /* 8557 * Update fields to match the instantiation, overwiting wildcards 8558 * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH. 8559 */ 8560 r2->cp = cp; 8561 r2->crm = crm; 8562 r2->opc1 = opc1; 8563 r2->opc2 = opc2; 8564 r2->state = state; 8565 r2->secure = secstate; 8566 if (opaque) { 8567 r2->opaque = opaque; 8568 } 8569 8570 isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]; 8571 if (isbanked) { 8572 /* 8573 * Register is banked (using both entries in array). 8574 * Overwriting fieldoffset as the array is only used to define 8575 * banked registers but later only fieldoffset is used. 8576 */ 8577 r2->fieldoffset = r->bank_fieldoffsets[ns]; 8578 } 8579 8580 if (state == ARM_CP_STATE_AA32) { 8581 if (isbanked) { 8582 /* 8583 * If the register is banked then we don't need to migrate or 8584 * reset the 32-bit instance in certain cases: 8585 * 8586 * 1) If the register has both 32-bit and 64-bit instances then we 8587 * can count on the 64-bit instance taking care of the 8588 * non-secure bank. 8589 * 2) If ARMv8 is enabled then we can count on a 64-bit version 8590 * taking care of the secure bank. This requires that separate 8591 * 32 and 64-bit definitions are provided. 8592 */ 8593 if ((r->state == ARM_CP_STATE_BOTH && ns) || 8594 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 8595 r2->type |= ARM_CP_ALIAS; 8596 } 8597 } else if ((secstate != r->secure) && !ns) { 8598 /* 8599 * The register is not banked so we only want to allow migration 8600 * of the non-secure instance. 8601 */ 8602 r2->type |= ARM_CP_ALIAS; 8603 } 8604 8605 if (HOST_BIG_ENDIAN && 8606 r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) { 8607 r2->fieldoffset += sizeof(uint32_t); 8608 } 8609 } 8610 8611 /* 8612 * By convention, for wildcarded registers only the first 8613 * entry is used for migration; the others are marked as 8614 * ALIAS so we don't try to transfer the register 8615 * multiple times. Special registers (ie NOP/WFI) are 8616 * never migratable and not even raw-accessible. 8617 */ 8618 if (r->type & ARM_CP_SPECIAL_MASK) { 8619 r2->type |= ARM_CP_NO_RAW; 8620 } 8621 if (((r->crm == CP_ANY) && crm != 0) || 8622 ((r->opc1 == CP_ANY) && opc1 != 0) || 8623 ((r->opc2 == CP_ANY) && opc2 != 0)) { 8624 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 8625 } 8626 8627 /* 8628 * Check that raw accesses are either forbidden or handled. Note that 8629 * we can't assert this earlier because the setup of fieldoffset for 8630 * banked registers has to be done first. 8631 */ 8632 if (!(r2->type & ARM_CP_NO_RAW)) { 8633 assert(!raw_accessors_invalid(r2)); 8634 } 8635 8636 g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2); 8637 } 8638 8639 8640 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 8641 const ARMCPRegInfo *r, void *opaque) 8642 { 8643 /* Define implementations of coprocessor registers. 8644 * We store these in a hashtable because typically 8645 * there are less than 150 registers in a space which 8646 * is 16*16*16*8*8 = 262144 in size. 8647 * Wildcarding is supported for the crm, opc1 and opc2 fields. 8648 * If a register is defined twice then the second definition is 8649 * used, so this can be used to define some generic registers and 8650 * then override them with implementation specific variations. 8651 * At least one of the original and the second definition should 8652 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 8653 * against accidental use. 8654 * 8655 * The state field defines whether the register is to be 8656 * visible in the AArch32 or AArch64 execution state. If the 8657 * state is set to ARM_CP_STATE_BOTH then we synthesise a 8658 * reginfo structure for the AArch32 view, which sees the lower 8659 * 32 bits of the 64 bit register. 8660 * 8661 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 8662 * be wildcarded. AArch64 registers are always considered to be 64 8663 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 8664 * the register, if any. 8665 */ 8666 int crm, opc1, opc2; 8667 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 8668 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 8669 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 8670 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 8671 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 8672 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 8673 CPState state; 8674 8675 /* 64 bit registers have only CRm and Opc1 fields */ 8676 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 8677 /* op0 only exists in the AArch64 encodings */ 8678 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 8679 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 8680 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 8681 /* 8682 * This API is only for Arm's system coprocessors (14 and 15) or 8683 * (M-profile or v7A-and-earlier only) for implementation defined 8684 * coprocessors in the range 0..7. Our decode assumes this, since 8685 * 8..13 can be used for other insns including VFP and Neon. See 8686 * valid_cp() in translate.c. Assert here that we haven't tried 8687 * to use an invalid coprocessor number. 8688 */ 8689 switch (r->state) { 8690 case ARM_CP_STATE_BOTH: 8691 /* 0 has a special meaning, but otherwise the same rules as AA32. */ 8692 if (r->cp == 0) { 8693 break; 8694 } 8695 /* fall through */ 8696 case ARM_CP_STATE_AA32: 8697 if (arm_feature(&cpu->env, ARM_FEATURE_V8) && 8698 !arm_feature(&cpu->env, ARM_FEATURE_M)) { 8699 assert(r->cp >= 14 && r->cp <= 15); 8700 } else { 8701 assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15)); 8702 } 8703 break; 8704 case ARM_CP_STATE_AA64: 8705 assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP); 8706 break; 8707 default: 8708 g_assert_not_reached(); 8709 } 8710 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 8711 * encodes a minimum access level for the register. We roll this 8712 * runtime check into our general permission check code, so check 8713 * here that the reginfo's specified permissions are strict enough 8714 * to encompass the generic architectural permission check. 8715 */ 8716 if (r->state != ARM_CP_STATE_AA32) { 8717 CPAccessRights mask; 8718 switch (r->opc1) { 8719 case 0: 8720 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 8721 mask = PL0U_R | PL1_RW; 8722 break; 8723 case 1: case 2: 8724 /* min_EL EL1 */ 8725 mask = PL1_RW; 8726 break; 8727 case 3: 8728 /* min_EL EL0 */ 8729 mask = PL0_RW; 8730 break; 8731 case 4: 8732 case 5: 8733 /* min_EL EL2 */ 8734 mask = PL2_RW; 8735 break; 8736 case 6: 8737 /* min_EL EL3 */ 8738 mask = PL3_RW; 8739 break; 8740 case 7: 8741 /* min_EL EL1, secure mode only (we don't check the latter) */ 8742 mask = PL1_RW; 8743 break; 8744 default: 8745 /* broken reginfo with out-of-range opc1 */ 8746 g_assert_not_reached(); 8747 } 8748 /* assert our permissions are not too lax (stricter is fine) */ 8749 assert((r->access & ~mask) == 0); 8750 } 8751 8752 /* Check that the register definition has enough info to handle 8753 * reads and writes if they are permitted. 8754 */ 8755 if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) { 8756 if (r->access & PL3_R) { 8757 assert((r->fieldoffset || 8758 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8759 r->readfn); 8760 } 8761 if (r->access & PL3_W) { 8762 assert((r->fieldoffset || 8763 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8764 r->writefn); 8765 } 8766 } 8767 8768 for (crm = crmmin; crm <= crmmax; crm++) { 8769 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 8770 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 8771 for (state = ARM_CP_STATE_AA32; 8772 state <= ARM_CP_STATE_AA64; state++) { 8773 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 8774 continue; 8775 } 8776 if (state == ARM_CP_STATE_AA32) { 8777 /* Under AArch32 CP registers can be common 8778 * (same for secure and non-secure world) or banked. 8779 */ 8780 char *name; 8781 8782 switch (r->secure) { 8783 case ARM_CP_SECSTATE_S: 8784 case ARM_CP_SECSTATE_NS: 8785 add_cpreg_to_hashtable(cpu, r, opaque, state, 8786 r->secure, crm, opc1, opc2, 8787 r->name); 8788 break; 8789 case ARM_CP_SECSTATE_BOTH: 8790 name = g_strdup_printf("%s_S", r->name); 8791 add_cpreg_to_hashtable(cpu, r, opaque, state, 8792 ARM_CP_SECSTATE_S, 8793 crm, opc1, opc2, name); 8794 g_free(name); 8795 add_cpreg_to_hashtable(cpu, r, opaque, state, 8796 ARM_CP_SECSTATE_NS, 8797 crm, opc1, opc2, r->name); 8798 break; 8799 default: 8800 g_assert_not_reached(); 8801 } 8802 } else { 8803 /* AArch64 registers get mapped to non-secure instance 8804 * of AArch32 */ 8805 add_cpreg_to_hashtable(cpu, r, opaque, state, 8806 ARM_CP_SECSTATE_NS, 8807 crm, opc1, opc2, r->name); 8808 } 8809 } 8810 } 8811 } 8812 } 8813 } 8814 8815 /* Define a whole list of registers */ 8816 void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs, 8817 void *opaque, size_t len) 8818 { 8819 size_t i; 8820 for (i = 0; i < len; ++i) { 8821 define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque); 8822 } 8823 } 8824 8825 /* 8826 * Modify ARMCPRegInfo for access from userspace. 8827 * 8828 * This is a data driven modification directed by 8829 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 8830 * user-space cannot alter any values and dynamic values pertaining to 8831 * execution state are hidden from user space view anyway. 8832 */ 8833 void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len, 8834 const ARMCPRegUserSpaceInfo *mods, 8835 size_t mods_len) 8836 { 8837 for (size_t mi = 0; mi < mods_len; ++mi) { 8838 const ARMCPRegUserSpaceInfo *m = mods + mi; 8839 GPatternSpec *pat = NULL; 8840 8841 if (m->is_glob) { 8842 pat = g_pattern_spec_new(m->name); 8843 } 8844 for (size_t ri = 0; ri < regs_len; ++ri) { 8845 ARMCPRegInfo *r = regs + ri; 8846 8847 if (pat && g_pattern_match_string(pat, r->name)) { 8848 r->type = ARM_CP_CONST; 8849 r->access = PL0U_R; 8850 r->resetvalue = 0; 8851 /* continue */ 8852 } else if (strcmp(r->name, m->name) == 0) { 8853 r->type = ARM_CP_CONST; 8854 r->access = PL0U_R; 8855 r->resetvalue &= m->exported_bits; 8856 r->resetvalue |= m->fixed_bits; 8857 break; 8858 } 8859 } 8860 if (pat) { 8861 g_pattern_spec_free(pat); 8862 } 8863 } 8864 } 8865 8866 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 8867 { 8868 return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp); 8869 } 8870 8871 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 8872 uint64_t value) 8873 { 8874 /* Helper coprocessor write function for write-ignore registers */ 8875 } 8876 8877 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 8878 { 8879 /* Helper coprocessor write function for read-as-zero registers */ 8880 return 0; 8881 } 8882 8883 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 8884 { 8885 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 8886 } 8887 8888 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 8889 { 8890 /* Return true if it is not valid for us to switch to 8891 * this CPU mode (ie all the UNPREDICTABLE cases in 8892 * the ARM ARM CPSRWriteByInstr pseudocode). 8893 */ 8894 8895 /* Changes to or from Hyp via MSR and CPS are illegal. */ 8896 if (write_type == CPSRWriteByInstr && 8897 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 8898 mode == ARM_CPU_MODE_HYP)) { 8899 return 1; 8900 } 8901 8902 switch (mode) { 8903 case ARM_CPU_MODE_USR: 8904 return 0; 8905 case ARM_CPU_MODE_SYS: 8906 case ARM_CPU_MODE_SVC: 8907 case ARM_CPU_MODE_ABT: 8908 case ARM_CPU_MODE_UND: 8909 case ARM_CPU_MODE_IRQ: 8910 case ARM_CPU_MODE_FIQ: 8911 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 8912 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 8913 */ 8914 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 8915 * and CPS are treated as illegal mode changes. 8916 */ 8917 if (write_type == CPSRWriteByInstr && 8918 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 8919 (arm_hcr_el2_eff(env) & HCR_TGE)) { 8920 return 1; 8921 } 8922 return 0; 8923 case ARM_CPU_MODE_HYP: 8924 return !arm_is_el2_enabled(env) || arm_current_el(env) < 2; 8925 case ARM_CPU_MODE_MON: 8926 return arm_current_el(env) < 3; 8927 default: 8928 return 1; 8929 } 8930 } 8931 8932 uint32_t cpsr_read(CPUARMState *env) 8933 { 8934 int ZF; 8935 ZF = (env->ZF == 0); 8936 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 8937 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 8938 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 8939 | ((env->condexec_bits & 0xfc) << 8) 8940 | (env->GE << 16) | (env->daif & CPSR_AIF); 8941 } 8942 8943 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 8944 CPSRWriteType write_type) 8945 { 8946 uint32_t changed_daif; 8947 bool rebuild_hflags = (write_type != CPSRWriteRaw) && 8948 (mask & (CPSR_M | CPSR_E | CPSR_IL)); 8949 8950 if (mask & CPSR_NZCV) { 8951 env->ZF = (~val) & CPSR_Z; 8952 env->NF = val; 8953 env->CF = (val >> 29) & 1; 8954 env->VF = (val << 3) & 0x80000000; 8955 } 8956 if (mask & CPSR_Q) 8957 env->QF = ((val & CPSR_Q) != 0); 8958 if (mask & CPSR_T) 8959 env->thumb = ((val & CPSR_T) != 0); 8960 if (mask & CPSR_IT_0_1) { 8961 env->condexec_bits &= ~3; 8962 env->condexec_bits |= (val >> 25) & 3; 8963 } 8964 if (mask & CPSR_IT_2_7) { 8965 env->condexec_bits &= 3; 8966 env->condexec_bits |= (val >> 8) & 0xfc; 8967 } 8968 if (mask & CPSR_GE) { 8969 env->GE = (val >> 16) & 0xf; 8970 } 8971 8972 /* In a V7 implementation that includes the security extensions but does 8973 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 8974 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 8975 * bits respectively. 8976 * 8977 * In a V8 implementation, it is permitted for privileged software to 8978 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 8979 */ 8980 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 8981 arm_feature(env, ARM_FEATURE_EL3) && 8982 !arm_feature(env, ARM_FEATURE_EL2) && 8983 !arm_is_secure(env)) { 8984 8985 changed_daif = (env->daif ^ val) & mask; 8986 8987 if (changed_daif & CPSR_A) { 8988 /* Check to see if we are allowed to change the masking of async 8989 * abort exceptions from a non-secure state. 8990 */ 8991 if (!(env->cp15.scr_el3 & SCR_AW)) { 8992 qemu_log_mask(LOG_GUEST_ERROR, 8993 "Ignoring attempt to switch CPSR_A flag from " 8994 "non-secure world with SCR.AW bit clear\n"); 8995 mask &= ~CPSR_A; 8996 } 8997 } 8998 8999 if (changed_daif & CPSR_F) { 9000 /* Check to see if we are allowed to change the masking of FIQ 9001 * exceptions from a non-secure state. 9002 */ 9003 if (!(env->cp15.scr_el3 & SCR_FW)) { 9004 qemu_log_mask(LOG_GUEST_ERROR, 9005 "Ignoring attempt to switch CPSR_F flag from " 9006 "non-secure world with SCR.FW bit clear\n"); 9007 mask &= ~CPSR_F; 9008 } 9009 9010 /* Check whether non-maskable FIQ (NMFI) support is enabled. 9011 * If this bit is set software is not allowed to mask 9012 * FIQs, but is allowed to set CPSR_F to 0. 9013 */ 9014 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 9015 (val & CPSR_F)) { 9016 qemu_log_mask(LOG_GUEST_ERROR, 9017 "Ignoring attempt to enable CPSR_F flag " 9018 "(non-maskable FIQ [NMFI] support enabled)\n"); 9019 mask &= ~CPSR_F; 9020 } 9021 } 9022 } 9023 9024 env->daif &= ~(CPSR_AIF & mask); 9025 env->daif |= val & CPSR_AIF & mask; 9026 9027 if (write_type != CPSRWriteRaw && 9028 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 9029 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 9030 /* Note that we can only get here in USR mode if this is a 9031 * gdb stub write; for this case we follow the architectural 9032 * behaviour for guest writes in USR mode of ignoring an attempt 9033 * to switch mode. (Those are caught by translate.c for writes 9034 * triggered by guest instructions.) 9035 */ 9036 mask &= ~CPSR_M; 9037 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 9038 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 9039 * v7, and has defined behaviour in v8: 9040 * + leave CPSR.M untouched 9041 * + allow changes to the other CPSR fields 9042 * + set PSTATE.IL 9043 * For user changes via the GDB stub, we don't set PSTATE.IL, 9044 * as this would be unnecessarily harsh for a user error. 9045 */ 9046 mask &= ~CPSR_M; 9047 if (write_type != CPSRWriteByGDBStub && 9048 arm_feature(env, ARM_FEATURE_V8)) { 9049 mask |= CPSR_IL; 9050 val |= CPSR_IL; 9051 } 9052 qemu_log_mask(LOG_GUEST_ERROR, 9053 "Illegal AArch32 mode switch attempt from %s to %s\n", 9054 aarch32_mode_name(env->uncached_cpsr), 9055 aarch32_mode_name(val)); 9056 } else { 9057 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 9058 write_type == CPSRWriteExceptionReturn ? 9059 "Exception return from AArch32" : 9060 "AArch32 mode switch from", 9061 aarch32_mode_name(env->uncached_cpsr), 9062 aarch32_mode_name(val), env->regs[15]); 9063 switch_mode(env, val & CPSR_M); 9064 } 9065 } 9066 mask &= ~CACHED_CPSR_BITS; 9067 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 9068 if (rebuild_hflags) { 9069 arm_rebuild_hflags(env); 9070 } 9071 } 9072 9073 /* Sign/zero extend */ 9074 uint32_t HELPER(sxtb16)(uint32_t x) 9075 { 9076 uint32_t res; 9077 res = (uint16_t)(int8_t)x; 9078 res |= (uint32_t)(int8_t)(x >> 16) << 16; 9079 return res; 9080 } 9081 9082 static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) 9083 { 9084 /* 9085 * Take a division-by-zero exception if necessary; otherwise return 9086 * to get the usual non-trapping division behaviour (result of 0) 9087 */ 9088 if (arm_feature(env, ARM_FEATURE_M) 9089 && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { 9090 raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); 9091 } 9092 } 9093 9094 uint32_t HELPER(uxtb16)(uint32_t x) 9095 { 9096 uint32_t res; 9097 res = (uint16_t)(uint8_t)x; 9098 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 9099 return res; 9100 } 9101 9102 int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) 9103 { 9104 if (den == 0) { 9105 handle_possible_div0_trap(env, GETPC()); 9106 return 0; 9107 } 9108 if (num == INT_MIN && den == -1) { 9109 return INT_MIN; 9110 } 9111 return num / den; 9112 } 9113 9114 uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) 9115 { 9116 if (den == 0) { 9117 handle_possible_div0_trap(env, GETPC()); 9118 return 0; 9119 } 9120 return num / den; 9121 } 9122 9123 uint32_t HELPER(rbit)(uint32_t x) 9124 { 9125 return revbit32(x); 9126 } 9127 9128 #ifdef CONFIG_USER_ONLY 9129 9130 static void switch_mode(CPUARMState *env, int mode) 9131 { 9132 ARMCPU *cpu = env_archcpu(env); 9133 9134 if (mode != ARM_CPU_MODE_USR) { 9135 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 9136 } 9137 } 9138 9139 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 9140 uint32_t cur_el, bool secure) 9141 { 9142 return 1; 9143 } 9144 9145 void aarch64_sync_64_to_32(CPUARMState *env) 9146 { 9147 g_assert_not_reached(); 9148 } 9149 9150 #else 9151 9152 static void switch_mode(CPUARMState *env, int mode) 9153 { 9154 int old_mode; 9155 int i; 9156 9157 old_mode = env->uncached_cpsr & CPSR_M; 9158 if (mode == old_mode) 9159 return; 9160 9161 if (old_mode == ARM_CPU_MODE_FIQ) { 9162 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 9163 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 9164 } else if (mode == ARM_CPU_MODE_FIQ) { 9165 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 9166 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 9167 } 9168 9169 i = bank_number(old_mode); 9170 env->banked_r13[i] = env->regs[13]; 9171 env->banked_spsr[i] = env->spsr; 9172 9173 i = bank_number(mode); 9174 env->regs[13] = env->banked_r13[i]; 9175 env->spsr = env->banked_spsr[i]; 9176 9177 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 9178 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 9179 } 9180 9181 /* Physical Interrupt Target EL Lookup Table 9182 * 9183 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 9184 * 9185 * The below multi-dimensional table is used for looking up the target 9186 * exception level given numerous condition criteria. Specifically, the 9187 * target EL is based on SCR and HCR routing controls as well as the 9188 * currently executing EL and secure state. 9189 * 9190 * Dimensions: 9191 * target_el_table[2][2][2][2][2][4] 9192 * | | | | | +--- Current EL 9193 * | | | | +------ Non-secure(0)/Secure(1) 9194 * | | | +--------- HCR mask override 9195 * | | +------------ SCR exec state control 9196 * | +--------------- SCR mask override 9197 * +------------------ 32-bit(0)/64-bit(1) EL3 9198 * 9199 * The table values are as such: 9200 * 0-3 = EL0-EL3 9201 * -1 = Cannot occur 9202 * 9203 * The ARM ARM target EL table includes entries indicating that an "exception 9204 * is not taken". The two cases where this is applicable are: 9205 * 1) An exception is taken from EL3 but the SCR does not have the exception 9206 * routed to EL3. 9207 * 2) An exception is taken from EL2 but the HCR does not have the exception 9208 * routed to EL2. 9209 * In these two cases, the below table contain a target of EL1. This value is 9210 * returned as it is expected that the consumer of the table data will check 9211 * for "target EL >= current EL" to ensure the exception is not taken. 9212 * 9213 * SCR HCR 9214 * 64 EA AMO From 9215 * BIT IRQ IMO Non-secure Secure 9216 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 9217 */ 9218 static const int8_t target_el_table[2][2][2][2][2][4] = { 9219 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 9220 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 9221 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 9222 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 9223 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 9224 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 9225 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 9226 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 9227 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 9228 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},}, 9229 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },}, 9230 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},}, 9231 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 9232 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 9233 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },}, 9234 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},}, 9235 }; 9236 9237 /* 9238 * Determine the target EL for physical exceptions 9239 */ 9240 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 9241 uint32_t cur_el, bool secure) 9242 { 9243 CPUARMState *env = cs->env_ptr; 9244 bool rw; 9245 bool scr; 9246 bool hcr; 9247 int target_el; 9248 /* Is the highest EL AArch64? */ 9249 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 9250 uint64_t hcr_el2; 9251 9252 if (arm_feature(env, ARM_FEATURE_EL3)) { 9253 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 9254 } else { 9255 /* Either EL2 is the highest EL (and so the EL2 register width 9256 * is given by is64); or there is no EL2 or EL3, in which case 9257 * the value of 'rw' does not affect the table lookup anyway. 9258 */ 9259 rw = is64; 9260 } 9261 9262 hcr_el2 = arm_hcr_el2_eff(env); 9263 switch (excp_idx) { 9264 case EXCP_IRQ: 9265 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 9266 hcr = hcr_el2 & HCR_IMO; 9267 break; 9268 case EXCP_FIQ: 9269 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 9270 hcr = hcr_el2 & HCR_FMO; 9271 break; 9272 default: 9273 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 9274 hcr = hcr_el2 & HCR_AMO; 9275 break; 9276 }; 9277 9278 /* 9279 * For these purposes, TGE and AMO/IMO/FMO both force the 9280 * interrupt to EL2. Fold TGE into the bit extracted above. 9281 */ 9282 hcr |= (hcr_el2 & HCR_TGE) != 0; 9283 9284 /* Perform a table-lookup for the target EL given the current state */ 9285 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 9286 9287 assert(target_el > 0); 9288 9289 return target_el; 9290 } 9291 9292 void arm_log_exception(CPUState *cs) 9293 { 9294 int idx = cs->exception_index; 9295 9296 if (qemu_loglevel_mask(CPU_LOG_INT)) { 9297 const char *exc = NULL; 9298 static const char * const excnames[] = { 9299 [EXCP_UDEF] = "Undefined Instruction", 9300 [EXCP_SWI] = "SVC", 9301 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 9302 [EXCP_DATA_ABORT] = "Data Abort", 9303 [EXCP_IRQ] = "IRQ", 9304 [EXCP_FIQ] = "FIQ", 9305 [EXCP_BKPT] = "Breakpoint", 9306 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 9307 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 9308 [EXCP_HVC] = "Hypervisor Call", 9309 [EXCP_HYP_TRAP] = "Hypervisor Trap", 9310 [EXCP_SMC] = "Secure Monitor Call", 9311 [EXCP_VIRQ] = "Virtual IRQ", 9312 [EXCP_VFIQ] = "Virtual FIQ", 9313 [EXCP_SEMIHOST] = "Semihosting call", 9314 [EXCP_NOCP] = "v7M NOCP UsageFault", 9315 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 9316 [EXCP_STKOF] = "v8M STKOF UsageFault", 9317 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", 9318 [EXCP_LSERR] = "v8M LSERR UsageFault", 9319 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", 9320 [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault", 9321 }; 9322 9323 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 9324 exc = excnames[idx]; 9325 } 9326 if (!exc) { 9327 exc = "unknown"; 9328 } 9329 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n", 9330 idx, exc, cs->cpu_index); 9331 } 9332 } 9333 9334 /* 9335 * Function used to synchronize QEMU's AArch64 register set with AArch32 9336 * register set. This is necessary when switching between AArch32 and AArch64 9337 * execution state. 9338 */ 9339 void aarch64_sync_32_to_64(CPUARMState *env) 9340 { 9341 int i; 9342 uint32_t mode = env->uncached_cpsr & CPSR_M; 9343 9344 /* We can blanket copy R[0:7] to X[0:7] */ 9345 for (i = 0; i < 8; i++) { 9346 env->xregs[i] = env->regs[i]; 9347 } 9348 9349 /* 9350 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 9351 * Otherwise, they come from the banked user regs. 9352 */ 9353 if (mode == ARM_CPU_MODE_FIQ) { 9354 for (i = 8; i < 13; i++) { 9355 env->xregs[i] = env->usr_regs[i - 8]; 9356 } 9357 } else { 9358 for (i = 8; i < 13; i++) { 9359 env->xregs[i] = env->regs[i]; 9360 } 9361 } 9362 9363 /* 9364 * Registers x13-x23 are the various mode SP and FP registers. Registers 9365 * r13 and r14 are only copied if we are in that mode, otherwise we copy 9366 * from the mode banked register. 9367 */ 9368 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 9369 env->xregs[13] = env->regs[13]; 9370 env->xregs[14] = env->regs[14]; 9371 } else { 9372 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 9373 /* HYP is an exception in that it is copied from r14 */ 9374 if (mode == ARM_CPU_MODE_HYP) { 9375 env->xregs[14] = env->regs[14]; 9376 } else { 9377 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 9378 } 9379 } 9380 9381 if (mode == ARM_CPU_MODE_HYP) { 9382 env->xregs[15] = env->regs[13]; 9383 } else { 9384 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 9385 } 9386 9387 if (mode == ARM_CPU_MODE_IRQ) { 9388 env->xregs[16] = env->regs[14]; 9389 env->xregs[17] = env->regs[13]; 9390 } else { 9391 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 9392 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 9393 } 9394 9395 if (mode == ARM_CPU_MODE_SVC) { 9396 env->xregs[18] = env->regs[14]; 9397 env->xregs[19] = env->regs[13]; 9398 } else { 9399 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 9400 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 9401 } 9402 9403 if (mode == ARM_CPU_MODE_ABT) { 9404 env->xregs[20] = env->regs[14]; 9405 env->xregs[21] = env->regs[13]; 9406 } else { 9407 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 9408 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 9409 } 9410 9411 if (mode == ARM_CPU_MODE_UND) { 9412 env->xregs[22] = env->regs[14]; 9413 env->xregs[23] = env->regs[13]; 9414 } else { 9415 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 9416 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 9417 } 9418 9419 /* 9420 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9421 * mode, then we can copy from r8-r14. Otherwise, we copy from the 9422 * FIQ bank for r8-r14. 9423 */ 9424 if (mode == ARM_CPU_MODE_FIQ) { 9425 for (i = 24; i < 31; i++) { 9426 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 9427 } 9428 } else { 9429 for (i = 24; i < 29; i++) { 9430 env->xregs[i] = env->fiq_regs[i - 24]; 9431 } 9432 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 9433 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 9434 } 9435 9436 env->pc = env->regs[15]; 9437 } 9438 9439 /* 9440 * Function used to synchronize QEMU's AArch32 register set with AArch64 9441 * register set. This is necessary when switching between AArch32 and AArch64 9442 * execution state. 9443 */ 9444 void aarch64_sync_64_to_32(CPUARMState *env) 9445 { 9446 int i; 9447 uint32_t mode = env->uncached_cpsr & CPSR_M; 9448 9449 /* We can blanket copy X[0:7] to R[0:7] */ 9450 for (i = 0; i < 8; i++) { 9451 env->regs[i] = env->xregs[i]; 9452 } 9453 9454 /* 9455 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 9456 * Otherwise, we copy x8-x12 into the banked user regs. 9457 */ 9458 if (mode == ARM_CPU_MODE_FIQ) { 9459 for (i = 8; i < 13; i++) { 9460 env->usr_regs[i - 8] = env->xregs[i]; 9461 } 9462 } else { 9463 for (i = 8; i < 13; i++) { 9464 env->regs[i] = env->xregs[i]; 9465 } 9466 } 9467 9468 /* 9469 * Registers r13 & r14 depend on the current mode. 9470 * If we are in a given mode, we copy the corresponding x registers to r13 9471 * and r14. Otherwise, we copy the x register to the banked r13 and r14 9472 * for the mode. 9473 */ 9474 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 9475 env->regs[13] = env->xregs[13]; 9476 env->regs[14] = env->xregs[14]; 9477 } else { 9478 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 9479 9480 /* 9481 * HYP is an exception in that it does not have its own banked r14 but 9482 * shares the USR r14 9483 */ 9484 if (mode == ARM_CPU_MODE_HYP) { 9485 env->regs[14] = env->xregs[14]; 9486 } else { 9487 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 9488 } 9489 } 9490 9491 if (mode == ARM_CPU_MODE_HYP) { 9492 env->regs[13] = env->xregs[15]; 9493 } else { 9494 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 9495 } 9496 9497 if (mode == ARM_CPU_MODE_IRQ) { 9498 env->regs[14] = env->xregs[16]; 9499 env->regs[13] = env->xregs[17]; 9500 } else { 9501 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 9502 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 9503 } 9504 9505 if (mode == ARM_CPU_MODE_SVC) { 9506 env->regs[14] = env->xregs[18]; 9507 env->regs[13] = env->xregs[19]; 9508 } else { 9509 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 9510 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 9511 } 9512 9513 if (mode == ARM_CPU_MODE_ABT) { 9514 env->regs[14] = env->xregs[20]; 9515 env->regs[13] = env->xregs[21]; 9516 } else { 9517 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 9518 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 9519 } 9520 9521 if (mode == ARM_CPU_MODE_UND) { 9522 env->regs[14] = env->xregs[22]; 9523 env->regs[13] = env->xregs[23]; 9524 } else { 9525 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 9526 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 9527 } 9528 9529 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9530 * mode, then we can copy to r8-r14. Otherwise, we copy to the 9531 * FIQ bank for r8-r14. 9532 */ 9533 if (mode == ARM_CPU_MODE_FIQ) { 9534 for (i = 24; i < 31; i++) { 9535 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 9536 } 9537 } else { 9538 for (i = 24; i < 29; i++) { 9539 env->fiq_regs[i - 24] = env->xregs[i]; 9540 } 9541 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 9542 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 9543 } 9544 9545 env->regs[15] = env->pc; 9546 } 9547 9548 static void take_aarch32_exception(CPUARMState *env, int new_mode, 9549 uint32_t mask, uint32_t offset, 9550 uint32_t newpc) 9551 { 9552 int new_el; 9553 9554 /* Change the CPU state so as to actually take the exception. */ 9555 switch_mode(env, new_mode); 9556 9557 /* 9558 * For exceptions taken to AArch32 we must clear the SS bit in both 9559 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 9560 */ 9561 env->pstate &= ~PSTATE_SS; 9562 env->spsr = cpsr_read(env); 9563 /* Clear IT bits. */ 9564 env->condexec_bits = 0; 9565 /* Switch to the new mode, and to the correct instruction set. */ 9566 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 9567 9568 /* This must be after mode switching. */ 9569 new_el = arm_current_el(env); 9570 9571 /* Set new mode endianness */ 9572 env->uncached_cpsr &= ~CPSR_E; 9573 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { 9574 env->uncached_cpsr |= CPSR_E; 9575 } 9576 /* J and IL must always be cleared for exception entry */ 9577 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 9578 env->daif |= mask; 9579 9580 if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) { 9581 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) { 9582 env->uncached_cpsr |= CPSR_SSBS; 9583 } else { 9584 env->uncached_cpsr &= ~CPSR_SSBS; 9585 } 9586 } 9587 9588 if (new_mode == ARM_CPU_MODE_HYP) { 9589 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 9590 env->elr_el[2] = env->regs[15]; 9591 } else { 9592 /* CPSR.PAN is normally preserved preserved unless... */ 9593 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { 9594 switch (new_el) { 9595 case 3: 9596 if (!arm_is_secure_below_el3(env)) { 9597 /* ... the target is EL3, from non-secure state. */ 9598 env->uncached_cpsr &= ~CPSR_PAN; 9599 break; 9600 } 9601 /* ... the target is EL3, from secure state ... */ 9602 /* fall through */ 9603 case 1: 9604 /* ... the target is EL1 and SCTLR.SPAN is 0. */ 9605 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { 9606 env->uncached_cpsr |= CPSR_PAN; 9607 } 9608 break; 9609 } 9610 } 9611 /* 9612 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 9613 * and we should just guard the thumb mode on V4 9614 */ 9615 if (arm_feature(env, ARM_FEATURE_V4T)) { 9616 env->thumb = 9617 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 9618 } 9619 env->regs[14] = env->regs[15] + offset; 9620 } 9621 env->regs[15] = newpc; 9622 arm_rebuild_hflags(env); 9623 } 9624 9625 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 9626 { 9627 /* 9628 * Handle exception entry to Hyp mode; this is sufficiently 9629 * different to entry to other AArch32 modes that we handle it 9630 * separately here. 9631 * 9632 * The vector table entry used is always the 0x14 Hyp mode entry point, 9633 * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp. 9634 * The offset applied to the preferred return address is always zero 9635 * (see DDI0487C.a section G1.12.3). 9636 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 9637 */ 9638 uint32_t addr, mask; 9639 ARMCPU *cpu = ARM_CPU(cs); 9640 CPUARMState *env = &cpu->env; 9641 9642 switch (cs->exception_index) { 9643 case EXCP_UDEF: 9644 addr = 0x04; 9645 break; 9646 case EXCP_SWI: 9647 addr = 0x08; 9648 break; 9649 case EXCP_BKPT: 9650 /* Fall through to prefetch abort. */ 9651 case EXCP_PREFETCH_ABORT: 9652 env->cp15.ifar_s = env->exception.vaddress; 9653 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 9654 (uint32_t)env->exception.vaddress); 9655 addr = 0x0c; 9656 break; 9657 case EXCP_DATA_ABORT: 9658 env->cp15.dfar_s = env->exception.vaddress; 9659 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 9660 (uint32_t)env->exception.vaddress); 9661 addr = 0x10; 9662 break; 9663 case EXCP_IRQ: 9664 addr = 0x18; 9665 break; 9666 case EXCP_FIQ: 9667 addr = 0x1c; 9668 break; 9669 case EXCP_HVC: 9670 addr = 0x08; 9671 break; 9672 case EXCP_HYP_TRAP: 9673 addr = 0x14; 9674 break; 9675 default: 9676 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9677 } 9678 9679 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 9680 if (!arm_feature(env, ARM_FEATURE_V8)) { 9681 /* 9682 * QEMU syndrome values are v8-style. v7 has the IL bit 9683 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 9684 * If this is a v7 CPU, squash the IL bit in those cases. 9685 */ 9686 if (cs->exception_index == EXCP_PREFETCH_ABORT || 9687 (cs->exception_index == EXCP_DATA_ABORT && 9688 !(env->exception.syndrome & ARM_EL_ISV)) || 9689 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 9690 env->exception.syndrome &= ~ARM_EL_IL; 9691 } 9692 } 9693 env->cp15.esr_el[2] = env->exception.syndrome; 9694 } 9695 9696 if (arm_current_el(env) != 2 && addr < 0x14) { 9697 addr = 0x14; 9698 } 9699 9700 mask = 0; 9701 if (!(env->cp15.scr_el3 & SCR_EA)) { 9702 mask |= CPSR_A; 9703 } 9704 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 9705 mask |= CPSR_I; 9706 } 9707 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 9708 mask |= CPSR_F; 9709 } 9710 9711 addr += env->cp15.hvbar; 9712 9713 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 9714 } 9715 9716 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 9717 { 9718 ARMCPU *cpu = ARM_CPU(cs); 9719 CPUARMState *env = &cpu->env; 9720 uint32_t addr; 9721 uint32_t mask; 9722 int new_mode; 9723 uint32_t offset; 9724 uint32_t moe; 9725 9726 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 9727 switch (syn_get_ec(env->exception.syndrome)) { 9728 case EC_BREAKPOINT: 9729 case EC_BREAKPOINT_SAME_EL: 9730 moe = 1; 9731 break; 9732 case EC_WATCHPOINT: 9733 case EC_WATCHPOINT_SAME_EL: 9734 moe = 10; 9735 break; 9736 case EC_AA32_BKPT: 9737 moe = 3; 9738 break; 9739 case EC_VECTORCATCH: 9740 moe = 5; 9741 break; 9742 default: 9743 moe = 0; 9744 break; 9745 } 9746 9747 if (moe) { 9748 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 9749 } 9750 9751 if (env->exception.target_el == 2) { 9752 arm_cpu_do_interrupt_aarch32_hyp(cs); 9753 return; 9754 } 9755 9756 switch (cs->exception_index) { 9757 case EXCP_UDEF: 9758 new_mode = ARM_CPU_MODE_UND; 9759 addr = 0x04; 9760 mask = CPSR_I; 9761 if (env->thumb) 9762 offset = 2; 9763 else 9764 offset = 4; 9765 break; 9766 case EXCP_SWI: 9767 new_mode = ARM_CPU_MODE_SVC; 9768 addr = 0x08; 9769 mask = CPSR_I; 9770 /* The PC already points to the next instruction. */ 9771 offset = 0; 9772 break; 9773 case EXCP_BKPT: 9774 /* Fall through to prefetch abort. */ 9775 case EXCP_PREFETCH_ABORT: 9776 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 9777 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 9778 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 9779 env->exception.fsr, (uint32_t)env->exception.vaddress); 9780 new_mode = ARM_CPU_MODE_ABT; 9781 addr = 0x0c; 9782 mask = CPSR_A | CPSR_I; 9783 offset = 4; 9784 break; 9785 case EXCP_DATA_ABORT: 9786 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 9787 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 9788 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 9789 env->exception.fsr, 9790 (uint32_t)env->exception.vaddress); 9791 new_mode = ARM_CPU_MODE_ABT; 9792 addr = 0x10; 9793 mask = CPSR_A | CPSR_I; 9794 offset = 8; 9795 break; 9796 case EXCP_IRQ: 9797 new_mode = ARM_CPU_MODE_IRQ; 9798 addr = 0x18; 9799 /* Disable IRQ and imprecise data aborts. */ 9800 mask = CPSR_A | CPSR_I; 9801 offset = 4; 9802 if (env->cp15.scr_el3 & SCR_IRQ) { 9803 /* IRQ routed to monitor mode */ 9804 new_mode = ARM_CPU_MODE_MON; 9805 mask |= CPSR_F; 9806 } 9807 break; 9808 case EXCP_FIQ: 9809 new_mode = ARM_CPU_MODE_FIQ; 9810 addr = 0x1c; 9811 /* Disable FIQ, IRQ and imprecise data aborts. */ 9812 mask = CPSR_A | CPSR_I | CPSR_F; 9813 if (env->cp15.scr_el3 & SCR_FIQ) { 9814 /* FIQ routed to monitor mode */ 9815 new_mode = ARM_CPU_MODE_MON; 9816 } 9817 offset = 4; 9818 break; 9819 case EXCP_VIRQ: 9820 new_mode = ARM_CPU_MODE_IRQ; 9821 addr = 0x18; 9822 /* Disable IRQ and imprecise data aborts. */ 9823 mask = CPSR_A | CPSR_I; 9824 offset = 4; 9825 break; 9826 case EXCP_VFIQ: 9827 new_mode = ARM_CPU_MODE_FIQ; 9828 addr = 0x1c; 9829 /* Disable FIQ, IRQ and imprecise data aborts. */ 9830 mask = CPSR_A | CPSR_I | CPSR_F; 9831 offset = 4; 9832 break; 9833 case EXCP_SMC: 9834 new_mode = ARM_CPU_MODE_MON; 9835 addr = 0x08; 9836 mask = CPSR_A | CPSR_I | CPSR_F; 9837 offset = 0; 9838 break; 9839 default: 9840 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9841 return; /* Never happens. Keep compiler happy. */ 9842 } 9843 9844 if (new_mode == ARM_CPU_MODE_MON) { 9845 addr += env->cp15.mvbar; 9846 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 9847 /* High vectors. When enabled, base address cannot be remapped. */ 9848 addr += 0xffff0000; 9849 } else { 9850 /* ARM v7 architectures provide a vector base address register to remap 9851 * the interrupt vector table. 9852 * This register is only followed in non-monitor mode, and is banked. 9853 * Note: only bits 31:5 are valid. 9854 */ 9855 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 9856 } 9857 9858 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 9859 env->cp15.scr_el3 &= ~SCR_NS; 9860 } 9861 9862 take_aarch32_exception(env, new_mode, mask, offset, addr); 9863 } 9864 9865 static int aarch64_regnum(CPUARMState *env, int aarch32_reg) 9866 { 9867 /* 9868 * Return the register number of the AArch64 view of the AArch32 9869 * register @aarch32_reg. The CPUARMState CPSR is assumed to still 9870 * be that of the AArch32 mode the exception came from. 9871 */ 9872 int mode = env->uncached_cpsr & CPSR_M; 9873 9874 switch (aarch32_reg) { 9875 case 0 ... 7: 9876 return aarch32_reg; 9877 case 8 ... 12: 9878 return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg; 9879 case 13: 9880 switch (mode) { 9881 case ARM_CPU_MODE_USR: 9882 case ARM_CPU_MODE_SYS: 9883 return 13; 9884 case ARM_CPU_MODE_HYP: 9885 return 15; 9886 case ARM_CPU_MODE_IRQ: 9887 return 17; 9888 case ARM_CPU_MODE_SVC: 9889 return 19; 9890 case ARM_CPU_MODE_ABT: 9891 return 21; 9892 case ARM_CPU_MODE_UND: 9893 return 23; 9894 case ARM_CPU_MODE_FIQ: 9895 return 29; 9896 default: 9897 g_assert_not_reached(); 9898 } 9899 case 14: 9900 switch (mode) { 9901 case ARM_CPU_MODE_USR: 9902 case ARM_CPU_MODE_SYS: 9903 case ARM_CPU_MODE_HYP: 9904 return 14; 9905 case ARM_CPU_MODE_IRQ: 9906 return 16; 9907 case ARM_CPU_MODE_SVC: 9908 return 18; 9909 case ARM_CPU_MODE_ABT: 9910 return 20; 9911 case ARM_CPU_MODE_UND: 9912 return 22; 9913 case ARM_CPU_MODE_FIQ: 9914 return 30; 9915 default: 9916 g_assert_not_reached(); 9917 } 9918 case 15: 9919 return 31; 9920 default: 9921 g_assert_not_reached(); 9922 } 9923 } 9924 9925 static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env) 9926 { 9927 uint32_t ret = cpsr_read(env); 9928 9929 /* Move DIT to the correct location for SPSR_ELx */ 9930 if (ret & CPSR_DIT) { 9931 ret &= ~CPSR_DIT; 9932 ret |= PSTATE_DIT; 9933 } 9934 /* Merge PSTATE.SS into SPSR_ELx */ 9935 ret |= env->pstate & PSTATE_SS; 9936 9937 return ret; 9938 } 9939 9940 /* Handle exception entry to a target EL which is using AArch64 */ 9941 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 9942 { 9943 ARMCPU *cpu = ARM_CPU(cs); 9944 CPUARMState *env = &cpu->env; 9945 unsigned int new_el = env->exception.target_el; 9946 target_ulong addr = env->cp15.vbar_el[new_el]; 9947 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 9948 unsigned int old_mode; 9949 unsigned int cur_el = arm_current_el(env); 9950 int rt; 9951 9952 /* 9953 * Note that new_el can never be 0. If cur_el is 0, then 9954 * el0_a64 is is_a64(), else el0_a64 is ignored. 9955 */ 9956 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 9957 9958 if (cur_el < new_el) { 9959 /* Entry vector offset depends on whether the implemented EL 9960 * immediately lower than the target level is using AArch32 or AArch64 9961 */ 9962 bool is_aa64; 9963 uint64_t hcr; 9964 9965 switch (new_el) { 9966 case 3: 9967 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 9968 break; 9969 case 2: 9970 hcr = arm_hcr_el2_eff(env); 9971 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 9972 is_aa64 = (hcr & HCR_RW) != 0; 9973 break; 9974 } 9975 /* fall through */ 9976 case 1: 9977 is_aa64 = is_a64(env); 9978 break; 9979 default: 9980 g_assert_not_reached(); 9981 } 9982 9983 if (is_aa64) { 9984 addr += 0x400; 9985 } else { 9986 addr += 0x600; 9987 } 9988 } else if (pstate_read(env) & PSTATE_SP) { 9989 addr += 0x200; 9990 } 9991 9992 switch (cs->exception_index) { 9993 case EXCP_PREFETCH_ABORT: 9994 case EXCP_DATA_ABORT: 9995 env->cp15.far_el[new_el] = env->exception.vaddress; 9996 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 9997 env->cp15.far_el[new_el]); 9998 /* fall through */ 9999 case EXCP_BKPT: 10000 case EXCP_UDEF: 10001 case EXCP_SWI: 10002 case EXCP_HVC: 10003 case EXCP_HYP_TRAP: 10004 case EXCP_SMC: 10005 switch (syn_get_ec(env->exception.syndrome)) { 10006 case EC_ADVSIMDFPACCESSTRAP: 10007 /* 10008 * QEMU internal FP/SIMD syndromes from AArch32 include the 10009 * TA and coproc fields which are only exposed if the exception 10010 * is taken to AArch32 Hyp mode. Mask them out to get a valid 10011 * AArch64 format syndrome. 10012 */ 10013 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 10014 break; 10015 case EC_CP14RTTRAP: 10016 case EC_CP15RTTRAP: 10017 case EC_CP14DTTRAP: 10018 /* 10019 * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently 10020 * the raw register field from the insn; when taking this to 10021 * AArch64 we must convert it to the AArch64 view of the register 10022 * number. Notice that we read a 4-bit AArch32 register number and 10023 * write back a 5-bit AArch64 one. 10024 */ 10025 rt = extract32(env->exception.syndrome, 5, 4); 10026 rt = aarch64_regnum(env, rt); 10027 env->exception.syndrome = deposit32(env->exception.syndrome, 10028 5, 5, rt); 10029 break; 10030 case EC_CP15RRTTRAP: 10031 case EC_CP14RRTTRAP: 10032 /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */ 10033 rt = extract32(env->exception.syndrome, 5, 4); 10034 rt = aarch64_regnum(env, rt); 10035 env->exception.syndrome = deposit32(env->exception.syndrome, 10036 5, 5, rt); 10037 rt = extract32(env->exception.syndrome, 10, 4); 10038 rt = aarch64_regnum(env, rt); 10039 env->exception.syndrome = deposit32(env->exception.syndrome, 10040 10, 5, rt); 10041 break; 10042 } 10043 env->cp15.esr_el[new_el] = env->exception.syndrome; 10044 break; 10045 case EXCP_IRQ: 10046 case EXCP_VIRQ: 10047 addr += 0x80; 10048 break; 10049 case EXCP_FIQ: 10050 case EXCP_VFIQ: 10051 addr += 0x100; 10052 break; 10053 default: 10054 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 10055 } 10056 10057 if (is_a64(env)) { 10058 old_mode = pstate_read(env); 10059 aarch64_save_sp(env, arm_current_el(env)); 10060 env->elr_el[new_el] = env->pc; 10061 } else { 10062 old_mode = cpsr_read_for_spsr_elx(env); 10063 env->elr_el[new_el] = env->regs[15]; 10064 10065 aarch64_sync_32_to_64(env); 10066 10067 env->condexec_bits = 0; 10068 } 10069 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; 10070 10071 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 10072 env->elr_el[new_el]); 10073 10074 if (cpu_isar_feature(aa64_pan, cpu)) { 10075 /* The value of PSTATE.PAN is normally preserved, except when ... */ 10076 new_mode |= old_mode & PSTATE_PAN; 10077 switch (new_el) { 10078 case 2: 10079 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ 10080 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) 10081 != (HCR_E2H | HCR_TGE)) { 10082 break; 10083 } 10084 /* fall through */ 10085 case 1: 10086 /* ... the target is EL1 ... */ 10087 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ 10088 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { 10089 new_mode |= PSTATE_PAN; 10090 } 10091 break; 10092 } 10093 } 10094 if (cpu_isar_feature(aa64_mte, cpu)) { 10095 new_mode |= PSTATE_TCO; 10096 } 10097 10098 if (cpu_isar_feature(aa64_ssbs, cpu)) { 10099 if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) { 10100 new_mode |= PSTATE_SSBS; 10101 } else { 10102 new_mode &= ~PSTATE_SSBS; 10103 } 10104 } 10105 10106 pstate_write(env, PSTATE_DAIF | new_mode); 10107 env->aarch64 = true; 10108 aarch64_restore_sp(env, new_el); 10109 helper_rebuild_hflags_a64(env, new_el); 10110 10111 env->pc = addr; 10112 10113 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 10114 new_el, env->pc, pstate_read(env)); 10115 } 10116 10117 /* 10118 * Do semihosting call and set the appropriate return value. All the 10119 * permission and validity checks have been done at translate time. 10120 * 10121 * We only see semihosting exceptions in TCG only as they are not 10122 * trapped to the hypervisor in KVM. 10123 */ 10124 #ifdef CONFIG_TCG 10125 static void handle_semihosting(CPUState *cs) 10126 { 10127 ARMCPU *cpu = ARM_CPU(cs); 10128 CPUARMState *env = &cpu->env; 10129 10130 if (is_a64(env)) { 10131 qemu_log_mask(CPU_LOG_INT, 10132 "...handling as semihosting call 0x%" PRIx64 "\n", 10133 env->xregs[0]); 10134 env->xregs[0] = do_common_semihosting(cs); 10135 env->pc += 4; 10136 } else { 10137 qemu_log_mask(CPU_LOG_INT, 10138 "...handling as semihosting call 0x%x\n", 10139 env->regs[0]); 10140 env->regs[0] = do_common_semihosting(cs); 10141 env->regs[15] += env->thumb ? 2 : 4; 10142 } 10143 } 10144 #endif 10145 10146 /* Handle a CPU exception for A and R profile CPUs. 10147 * Do any appropriate logging, handle PSCI calls, and then hand off 10148 * to the AArch64-entry or AArch32-entry function depending on the 10149 * target exception level's register width. 10150 * 10151 * Note: this is used for both TCG (as the do_interrupt tcg op), 10152 * and KVM to re-inject guest debug exceptions, and to 10153 * inject a Synchronous-External-Abort. 10154 */ 10155 void arm_cpu_do_interrupt(CPUState *cs) 10156 { 10157 ARMCPU *cpu = ARM_CPU(cs); 10158 CPUARMState *env = &cpu->env; 10159 unsigned int new_el = env->exception.target_el; 10160 10161 assert(!arm_feature(env, ARM_FEATURE_M)); 10162 10163 arm_log_exception(cs); 10164 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 10165 new_el); 10166 if (qemu_loglevel_mask(CPU_LOG_INT) 10167 && !excp_is_internal(cs->exception_index)) { 10168 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 10169 syn_get_ec(env->exception.syndrome), 10170 env->exception.syndrome); 10171 } 10172 10173 if (arm_is_psci_call(cpu, cs->exception_index)) { 10174 arm_handle_psci_call(cpu); 10175 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 10176 return; 10177 } 10178 10179 /* 10180 * Semihosting semantics depend on the register width of the code 10181 * that caused the exception, not the target exception level, so 10182 * must be handled here. 10183 */ 10184 #ifdef CONFIG_TCG 10185 if (cs->exception_index == EXCP_SEMIHOST) { 10186 handle_semihosting(cs); 10187 return; 10188 } 10189 #endif 10190 10191 /* Hooks may change global state so BQL should be held, also the 10192 * BQL needs to be held for any modification of 10193 * cs->interrupt_request. 10194 */ 10195 g_assert(qemu_mutex_iothread_locked()); 10196 10197 arm_call_pre_el_change_hook(cpu); 10198 10199 assert(!excp_is_internal(cs->exception_index)); 10200 if (arm_el_is_aa64(env, new_el)) { 10201 arm_cpu_do_interrupt_aarch64(cs); 10202 } else { 10203 arm_cpu_do_interrupt_aarch32(cs); 10204 } 10205 10206 arm_call_el_change_hook(cpu); 10207 10208 if (!kvm_enabled()) { 10209 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 10210 } 10211 } 10212 #endif /* !CONFIG_USER_ONLY */ 10213 10214 uint64_t arm_sctlr(CPUARMState *env, int el) 10215 { 10216 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ 10217 if (el == 0) { 10218 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); 10219 el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0) 10220 ? 2 : 1; 10221 } 10222 return env->cp15.sctlr_el[el]; 10223 } 10224 10225 /* Return the SCTLR value which controls this address translation regime */ 10226 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 10227 { 10228 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 10229 } 10230 10231 #ifndef CONFIG_USER_ONLY 10232 10233 /* Return true if the specified stage of address translation is disabled */ 10234 static inline bool regime_translation_disabled(CPUARMState *env, 10235 ARMMMUIdx mmu_idx) 10236 { 10237 uint64_t hcr_el2; 10238 10239 if (arm_feature(env, ARM_FEATURE_M)) { 10240 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 10241 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 10242 case R_V7M_MPU_CTRL_ENABLE_MASK: 10243 /* Enabled, but not for HardFault and NMI */ 10244 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 10245 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 10246 /* Enabled for all cases */ 10247 return false; 10248 case 0: 10249 default: 10250 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 10251 * we warned about that in armv7m_nvic.c when the guest set it. 10252 */ 10253 return true; 10254 } 10255 } 10256 10257 hcr_el2 = arm_hcr_el2_eff(env); 10258 10259 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 10260 /* HCR.DC means HCR.VM behaves as 1 */ 10261 return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; 10262 } 10263 10264 if (hcr_el2 & HCR_TGE) { 10265 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 10266 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 10267 return true; 10268 } 10269 } 10270 10271 if ((hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 10272 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 10273 return true; 10274 } 10275 10276 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 10277 } 10278 10279 static inline bool regime_translation_big_endian(CPUARMState *env, 10280 ARMMMUIdx mmu_idx) 10281 { 10282 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 10283 } 10284 10285 /* Return the TTBR associated with this translation regime */ 10286 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 10287 int ttbrn) 10288 { 10289 if (mmu_idx == ARMMMUIdx_Stage2) { 10290 return env->cp15.vttbr_el2; 10291 } 10292 if (mmu_idx == ARMMMUIdx_Stage2_S) { 10293 return env->cp15.vsttbr_el2; 10294 } 10295 if (ttbrn == 0) { 10296 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 10297 } else { 10298 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 10299 } 10300 } 10301 10302 #endif /* !CONFIG_USER_ONLY */ 10303 10304 /* Convert a possible stage1+2 MMU index into the appropriate 10305 * stage 1 MMU index 10306 */ 10307 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 10308 { 10309 switch (mmu_idx) { 10310 case ARMMMUIdx_SE10_0: 10311 return ARMMMUIdx_Stage1_SE0; 10312 case ARMMMUIdx_SE10_1: 10313 return ARMMMUIdx_Stage1_SE1; 10314 case ARMMMUIdx_SE10_1_PAN: 10315 return ARMMMUIdx_Stage1_SE1_PAN; 10316 case ARMMMUIdx_E10_0: 10317 return ARMMMUIdx_Stage1_E0; 10318 case ARMMMUIdx_E10_1: 10319 return ARMMMUIdx_Stage1_E1; 10320 case ARMMMUIdx_E10_1_PAN: 10321 return ARMMMUIdx_Stage1_E1_PAN; 10322 default: 10323 return mmu_idx; 10324 } 10325 } 10326 10327 /* Return true if the translation regime is using LPAE format page tables */ 10328 static inline bool regime_using_lpae_format(CPUARMState *env, 10329 ARMMMUIdx mmu_idx) 10330 { 10331 int el = regime_el(env, mmu_idx); 10332 if (el == 2 || arm_el_is_aa64(env, el)) { 10333 return true; 10334 } 10335 if (arm_feature(env, ARM_FEATURE_LPAE) 10336 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 10337 return true; 10338 } 10339 return false; 10340 } 10341 10342 /* Returns true if the stage 1 translation regime is using LPAE format page 10343 * tables. Used when raising alignment exceptions, whose FSR changes depending 10344 * on whether the long or short descriptor format is in use. */ 10345 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 10346 { 10347 mmu_idx = stage_1_mmu_idx(mmu_idx); 10348 10349 return regime_using_lpae_format(env, mmu_idx); 10350 } 10351 10352 #ifndef CONFIG_USER_ONLY 10353 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 10354 { 10355 switch (mmu_idx) { 10356 case ARMMMUIdx_SE10_0: 10357 case ARMMMUIdx_E20_0: 10358 case ARMMMUIdx_SE20_0: 10359 case ARMMMUIdx_Stage1_E0: 10360 case ARMMMUIdx_Stage1_SE0: 10361 case ARMMMUIdx_MUser: 10362 case ARMMMUIdx_MSUser: 10363 case ARMMMUIdx_MUserNegPri: 10364 case ARMMMUIdx_MSUserNegPri: 10365 return true; 10366 default: 10367 return false; 10368 case ARMMMUIdx_E10_0: 10369 case ARMMMUIdx_E10_1: 10370 case ARMMMUIdx_E10_1_PAN: 10371 g_assert_not_reached(); 10372 } 10373 } 10374 10375 /* Translate section/page access permissions to page 10376 * R/W protection flags 10377 * 10378 * @env: CPUARMState 10379 * @mmu_idx: MMU index indicating required translation regime 10380 * @ap: The 3-bit access permissions (AP[2:0]) 10381 * @domain_prot: The 2-bit domain access permissions 10382 */ 10383 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 10384 int ap, int domain_prot) 10385 { 10386 bool is_user = regime_is_user(env, mmu_idx); 10387 10388 if (domain_prot == 3) { 10389 return PAGE_READ | PAGE_WRITE; 10390 } 10391 10392 switch (ap) { 10393 case 0: 10394 if (arm_feature(env, ARM_FEATURE_V7)) { 10395 return 0; 10396 } 10397 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 10398 case SCTLR_S: 10399 return is_user ? 0 : PAGE_READ; 10400 case SCTLR_R: 10401 return PAGE_READ; 10402 default: 10403 return 0; 10404 } 10405 case 1: 10406 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 10407 case 2: 10408 if (is_user) { 10409 return PAGE_READ; 10410 } else { 10411 return PAGE_READ | PAGE_WRITE; 10412 } 10413 case 3: 10414 return PAGE_READ | PAGE_WRITE; 10415 case 4: /* Reserved. */ 10416 return 0; 10417 case 5: 10418 return is_user ? 0 : PAGE_READ; 10419 case 6: 10420 return PAGE_READ; 10421 case 7: 10422 if (!arm_feature(env, ARM_FEATURE_V6K)) { 10423 return 0; 10424 } 10425 return PAGE_READ; 10426 default: 10427 g_assert_not_reached(); 10428 } 10429 } 10430 10431 /* Translate section/page access permissions to page 10432 * R/W protection flags. 10433 * 10434 * @ap: The 2-bit simple AP (AP[2:1]) 10435 * @is_user: TRUE if accessing from PL0 10436 */ 10437 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 10438 { 10439 switch (ap) { 10440 case 0: 10441 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 10442 case 1: 10443 return PAGE_READ | PAGE_WRITE; 10444 case 2: 10445 return is_user ? 0 : PAGE_READ; 10446 case 3: 10447 return PAGE_READ; 10448 default: 10449 g_assert_not_reached(); 10450 } 10451 } 10452 10453 static inline int 10454 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 10455 { 10456 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 10457 } 10458 10459 /* Translate S2 section/page access permissions to protection flags 10460 * 10461 * @env: CPUARMState 10462 * @s2ap: The 2-bit stage2 access permissions (S2AP) 10463 * @xn: XN (execute-never) bits 10464 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 10465 */ 10466 static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) 10467 { 10468 int prot = 0; 10469 10470 if (s2ap & 1) { 10471 prot |= PAGE_READ; 10472 } 10473 if (s2ap & 2) { 10474 prot |= PAGE_WRITE; 10475 } 10476 10477 if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { 10478 switch (xn) { 10479 case 0: 10480 prot |= PAGE_EXEC; 10481 break; 10482 case 1: 10483 if (s1_is_el0) { 10484 prot |= PAGE_EXEC; 10485 } 10486 break; 10487 case 2: 10488 break; 10489 case 3: 10490 if (!s1_is_el0) { 10491 prot |= PAGE_EXEC; 10492 } 10493 break; 10494 default: 10495 g_assert_not_reached(); 10496 } 10497 } else { 10498 if (!extract32(xn, 1, 1)) { 10499 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 10500 prot |= PAGE_EXEC; 10501 } 10502 } 10503 } 10504 return prot; 10505 } 10506 10507 /* Translate section/page access permissions to protection flags 10508 * 10509 * @env: CPUARMState 10510 * @mmu_idx: MMU index indicating required translation regime 10511 * @is_aa64: TRUE if AArch64 10512 * @ap: The 2-bit simple AP (AP[2:1]) 10513 * @ns: NS (non-secure) bit 10514 * @xn: XN (execute-never) bit 10515 * @pxn: PXN (privileged execute-never) bit 10516 */ 10517 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 10518 int ap, int ns, int xn, int pxn) 10519 { 10520 bool is_user = regime_is_user(env, mmu_idx); 10521 int prot_rw, user_rw; 10522 bool have_wxn; 10523 int wxn = 0; 10524 10525 assert(mmu_idx != ARMMMUIdx_Stage2); 10526 assert(mmu_idx != ARMMMUIdx_Stage2_S); 10527 10528 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 10529 if (is_user) { 10530 prot_rw = user_rw; 10531 } else { 10532 if (user_rw && regime_is_pan(env, mmu_idx)) { 10533 /* PAN forbids data accesses but doesn't affect insn fetch */ 10534 prot_rw = 0; 10535 } else { 10536 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 10537 } 10538 } 10539 10540 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 10541 return prot_rw; 10542 } 10543 10544 /* TODO have_wxn should be replaced with 10545 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 10546 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 10547 * compatible processors have EL2, which is required for [U]WXN. 10548 */ 10549 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 10550 10551 if (have_wxn) { 10552 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 10553 } 10554 10555 if (is_aa64) { 10556 if (regime_has_2_ranges(mmu_idx) && !is_user) { 10557 xn = pxn || (user_rw & PAGE_WRITE); 10558 } 10559 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10560 switch (regime_el(env, mmu_idx)) { 10561 case 1: 10562 case 3: 10563 if (is_user) { 10564 xn = xn || !(user_rw & PAGE_READ); 10565 } else { 10566 int uwxn = 0; 10567 if (have_wxn) { 10568 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 10569 } 10570 xn = xn || !(prot_rw & PAGE_READ) || pxn || 10571 (uwxn && (user_rw & PAGE_WRITE)); 10572 } 10573 break; 10574 case 2: 10575 break; 10576 } 10577 } else { 10578 xn = wxn = 0; 10579 } 10580 10581 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 10582 return prot_rw; 10583 } 10584 return prot_rw | PAGE_EXEC; 10585 } 10586 10587 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 10588 uint32_t *table, uint32_t address) 10589 { 10590 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 10591 TCR *tcr = regime_tcr(env, mmu_idx); 10592 10593 if (address & tcr->mask) { 10594 if (tcr->raw_tcr & TTBCR_PD1) { 10595 /* Translation table walk disabled for TTBR1 */ 10596 return false; 10597 } 10598 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 10599 } else { 10600 if (tcr->raw_tcr & TTBCR_PD0) { 10601 /* Translation table walk disabled for TTBR0 */ 10602 return false; 10603 } 10604 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 10605 } 10606 *table |= (address >> 18) & 0x3ffc; 10607 return true; 10608 } 10609 10610 /* Translate a S1 pagetable walk through S2 if needed. */ 10611 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 10612 hwaddr addr, bool *is_secure, 10613 ARMMMUFaultInfo *fi) 10614 { 10615 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && 10616 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 10617 target_ulong s2size; 10618 hwaddr s2pa; 10619 int s2prot; 10620 int ret; 10621 ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S 10622 : ARMMMUIdx_Stage2; 10623 ARMCacheAttrs cacheattrs = {}; 10624 MemTxAttrs txattrs = {}; 10625 10626 ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false, 10627 &s2pa, &txattrs, &s2prot, &s2size, fi, 10628 &cacheattrs); 10629 if (ret) { 10630 assert(fi->type != ARMFault_None); 10631 fi->s2addr = addr; 10632 fi->stage2 = true; 10633 fi->s1ptw = true; 10634 fi->s1ns = !*is_secure; 10635 return ~0; 10636 } 10637 if ((arm_hcr_el2_eff(env) & HCR_PTW) && 10638 (cacheattrs.attrs & 0xf0) == 0) { 10639 /* 10640 * PTW set and S1 walk touched S2 Device memory: 10641 * generate Permission fault. 10642 */ 10643 fi->type = ARMFault_Permission; 10644 fi->s2addr = addr; 10645 fi->stage2 = true; 10646 fi->s1ptw = true; 10647 fi->s1ns = !*is_secure; 10648 return ~0; 10649 } 10650 10651 if (arm_is_secure_below_el3(env)) { 10652 /* Check if page table walk is to secure or non-secure PA space. */ 10653 if (*is_secure) { 10654 *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); 10655 } else { 10656 *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); 10657 } 10658 } else { 10659 assert(!*is_secure); 10660 } 10661 10662 addr = s2pa; 10663 } 10664 return addr; 10665 } 10666 10667 /* All loads done in the course of a page table walk go through here. */ 10668 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10669 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10670 { 10671 ARMCPU *cpu = ARM_CPU(cs); 10672 CPUARMState *env = &cpu->env; 10673 MemTxAttrs attrs = {}; 10674 MemTxResult result = MEMTX_OK; 10675 AddressSpace *as; 10676 uint32_t data; 10677 10678 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); 10679 attrs.secure = is_secure; 10680 as = arm_addressspace(cs, attrs); 10681 if (fi->s1ptw) { 10682 return 0; 10683 } 10684 if (regime_translation_big_endian(env, mmu_idx)) { 10685 data = address_space_ldl_be(as, addr, attrs, &result); 10686 } else { 10687 data = address_space_ldl_le(as, addr, attrs, &result); 10688 } 10689 if (result == MEMTX_OK) { 10690 return data; 10691 } 10692 fi->type = ARMFault_SyncExternalOnWalk; 10693 fi->ea = arm_extabort_type(result); 10694 return 0; 10695 } 10696 10697 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10698 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10699 { 10700 ARMCPU *cpu = ARM_CPU(cs); 10701 CPUARMState *env = &cpu->env; 10702 MemTxAttrs attrs = {}; 10703 MemTxResult result = MEMTX_OK; 10704 AddressSpace *as; 10705 uint64_t data; 10706 10707 addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); 10708 attrs.secure = is_secure; 10709 as = arm_addressspace(cs, attrs); 10710 if (fi->s1ptw) { 10711 return 0; 10712 } 10713 if (regime_translation_big_endian(env, mmu_idx)) { 10714 data = address_space_ldq_be(as, addr, attrs, &result); 10715 } else { 10716 data = address_space_ldq_le(as, addr, attrs, &result); 10717 } 10718 if (result == MEMTX_OK) { 10719 return data; 10720 } 10721 fi->type = ARMFault_SyncExternalOnWalk; 10722 fi->ea = arm_extabort_type(result); 10723 return 0; 10724 } 10725 10726 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 10727 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10728 hwaddr *phys_ptr, int *prot, 10729 target_ulong *page_size, 10730 ARMMMUFaultInfo *fi) 10731 { 10732 CPUState *cs = env_cpu(env); 10733 int level = 1; 10734 uint32_t table; 10735 uint32_t desc; 10736 int type; 10737 int ap; 10738 int domain = 0; 10739 int domain_prot; 10740 hwaddr phys_addr; 10741 uint32_t dacr; 10742 10743 /* Pagetable walk. */ 10744 /* Lookup l1 descriptor. */ 10745 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10746 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10747 fi->type = ARMFault_Translation; 10748 goto do_fault; 10749 } 10750 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10751 mmu_idx, fi); 10752 if (fi->type != ARMFault_None) { 10753 goto do_fault; 10754 } 10755 type = (desc & 3); 10756 domain = (desc >> 5) & 0x0f; 10757 if (regime_el(env, mmu_idx) == 1) { 10758 dacr = env->cp15.dacr_ns; 10759 } else { 10760 dacr = env->cp15.dacr_s; 10761 } 10762 domain_prot = (dacr >> (domain * 2)) & 3; 10763 if (type == 0) { 10764 /* Section translation fault. */ 10765 fi->type = ARMFault_Translation; 10766 goto do_fault; 10767 } 10768 if (type != 2) { 10769 level = 2; 10770 } 10771 if (domain_prot == 0 || domain_prot == 2) { 10772 fi->type = ARMFault_Domain; 10773 goto do_fault; 10774 } 10775 if (type == 2) { 10776 /* 1Mb section. */ 10777 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10778 ap = (desc >> 10) & 3; 10779 *page_size = 1024 * 1024; 10780 } else { 10781 /* Lookup l2 entry. */ 10782 if (type == 1) { 10783 /* Coarse pagetable. */ 10784 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10785 } else { 10786 /* Fine pagetable. */ 10787 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 10788 } 10789 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10790 mmu_idx, fi); 10791 if (fi->type != ARMFault_None) { 10792 goto do_fault; 10793 } 10794 switch (desc & 3) { 10795 case 0: /* Page translation fault. */ 10796 fi->type = ARMFault_Translation; 10797 goto do_fault; 10798 case 1: /* 64k page. */ 10799 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10800 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 10801 *page_size = 0x10000; 10802 break; 10803 case 2: /* 4k page. */ 10804 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10805 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 10806 *page_size = 0x1000; 10807 break; 10808 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 10809 if (type == 1) { 10810 /* ARMv6/XScale extended small page format */ 10811 if (arm_feature(env, ARM_FEATURE_XSCALE) 10812 || arm_feature(env, ARM_FEATURE_V6)) { 10813 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10814 *page_size = 0x1000; 10815 } else { 10816 /* UNPREDICTABLE in ARMv5; we choose to take a 10817 * page translation fault. 10818 */ 10819 fi->type = ARMFault_Translation; 10820 goto do_fault; 10821 } 10822 } else { 10823 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 10824 *page_size = 0x400; 10825 } 10826 ap = (desc >> 4) & 3; 10827 break; 10828 default: 10829 /* Never happens, but compiler isn't smart enough to tell. */ 10830 g_assert_not_reached(); 10831 } 10832 } 10833 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10834 *prot |= *prot ? PAGE_EXEC : 0; 10835 if (!(*prot & (1 << access_type))) { 10836 /* Access permission fault. */ 10837 fi->type = ARMFault_Permission; 10838 goto do_fault; 10839 } 10840 *phys_ptr = phys_addr; 10841 return false; 10842 do_fault: 10843 fi->domain = domain; 10844 fi->level = level; 10845 return true; 10846 } 10847 10848 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 10849 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10850 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10851 target_ulong *page_size, ARMMMUFaultInfo *fi) 10852 { 10853 CPUState *cs = env_cpu(env); 10854 ARMCPU *cpu = env_archcpu(env); 10855 int level = 1; 10856 uint32_t table; 10857 uint32_t desc; 10858 uint32_t xn; 10859 uint32_t pxn = 0; 10860 int type; 10861 int ap; 10862 int domain = 0; 10863 int domain_prot; 10864 hwaddr phys_addr; 10865 uint32_t dacr; 10866 bool ns; 10867 10868 /* Pagetable walk. */ 10869 /* Lookup l1 descriptor. */ 10870 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10871 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10872 fi->type = ARMFault_Translation; 10873 goto do_fault; 10874 } 10875 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10876 mmu_idx, fi); 10877 if (fi->type != ARMFault_None) { 10878 goto do_fault; 10879 } 10880 type = (desc & 3); 10881 if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) { 10882 /* Section translation fault, or attempt to use the encoding 10883 * which is Reserved on implementations without PXN. 10884 */ 10885 fi->type = ARMFault_Translation; 10886 goto do_fault; 10887 } 10888 if ((type == 1) || !(desc & (1 << 18))) { 10889 /* Page or Section. */ 10890 domain = (desc >> 5) & 0x0f; 10891 } 10892 if (regime_el(env, mmu_idx) == 1) { 10893 dacr = env->cp15.dacr_ns; 10894 } else { 10895 dacr = env->cp15.dacr_s; 10896 } 10897 if (type == 1) { 10898 level = 2; 10899 } 10900 domain_prot = (dacr >> (domain * 2)) & 3; 10901 if (domain_prot == 0 || domain_prot == 2) { 10902 /* Section or Page domain fault */ 10903 fi->type = ARMFault_Domain; 10904 goto do_fault; 10905 } 10906 if (type != 1) { 10907 if (desc & (1 << 18)) { 10908 /* Supersection. */ 10909 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 10910 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 10911 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 10912 *page_size = 0x1000000; 10913 } else { 10914 /* Section. */ 10915 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10916 *page_size = 0x100000; 10917 } 10918 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 10919 xn = desc & (1 << 4); 10920 pxn = desc & 1; 10921 ns = extract32(desc, 19, 1); 10922 } else { 10923 if (cpu_isar_feature(aa32_pxn, cpu)) { 10924 pxn = (desc >> 2) & 1; 10925 } 10926 ns = extract32(desc, 3, 1); 10927 /* Lookup l2 entry. */ 10928 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10929 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10930 mmu_idx, fi); 10931 if (fi->type != ARMFault_None) { 10932 goto do_fault; 10933 } 10934 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 10935 switch (desc & 3) { 10936 case 0: /* Page translation fault. */ 10937 fi->type = ARMFault_Translation; 10938 goto do_fault; 10939 case 1: /* 64k page. */ 10940 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10941 xn = desc & (1 << 15); 10942 *page_size = 0x10000; 10943 break; 10944 case 2: case 3: /* 4k page. */ 10945 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10946 xn = desc & 1; 10947 *page_size = 0x1000; 10948 break; 10949 default: 10950 /* Never happens, but compiler isn't smart enough to tell. */ 10951 g_assert_not_reached(); 10952 } 10953 } 10954 if (domain_prot == 3) { 10955 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10956 } else { 10957 if (pxn && !regime_is_user(env, mmu_idx)) { 10958 xn = 1; 10959 } 10960 if (xn && access_type == MMU_INST_FETCH) { 10961 fi->type = ARMFault_Permission; 10962 goto do_fault; 10963 } 10964 10965 if (arm_feature(env, ARM_FEATURE_V6K) && 10966 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 10967 /* The simplified model uses AP[0] as an access control bit. */ 10968 if ((ap & 1) == 0) { 10969 /* Access flag fault. */ 10970 fi->type = ARMFault_AccessFlag; 10971 goto do_fault; 10972 } 10973 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 10974 } else { 10975 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10976 } 10977 if (*prot && !xn) { 10978 *prot |= PAGE_EXEC; 10979 } 10980 if (!(*prot & (1 << access_type))) { 10981 /* Access permission fault. */ 10982 fi->type = ARMFault_Permission; 10983 goto do_fault; 10984 } 10985 } 10986 if (ns) { 10987 /* The NS bit will (as required by the architecture) have no effect if 10988 * the CPU doesn't support TZ or this is a non-secure translation 10989 * regime, because the attribute will already be non-secure. 10990 */ 10991 attrs->secure = false; 10992 } 10993 *phys_ptr = phys_addr; 10994 return false; 10995 do_fault: 10996 fi->domain = domain; 10997 fi->level = level; 10998 return true; 10999 } 11000 11001 /* 11002 * check_s2_mmu_setup 11003 * @cpu: ARMCPU 11004 * @is_aa64: True if the translation regime is in AArch64 state 11005 * @startlevel: Suggested starting level 11006 * @inputsize: Bitsize of IPAs 11007 * @stride: Page-table stride (See the ARM ARM) 11008 * 11009 * Returns true if the suggested S2 translation parameters are OK and 11010 * false otherwise. 11011 */ 11012 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 11013 int inputsize, int stride, int outputsize) 11014 { 11015 const int grainsize = stride + 3; 11016 int startsizecheck; 11017 11018 /* 11019 * Negative levels are usually not allowed... 11020 * Except for FEAT_LPA2, 4k page table, 52-bit address space, which 11021 * begins with level -1. Note that previous feature tests will have 11022 * eliminated this combination if it is not enabled. 11023 */ 11024 if (level < (inputsize == 52 && stride == 9 ? -1 : 0)) { 11025 return false; 11026 } 11027 11028 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 11029 if (startsizecheck < 1 || startsizecheck > stride + 4) { 11030 return false; 11031 } 11032 11033 if (is_aa64) { 11034 switch (stride) { 11035 case 13: /* 64KB Pages. */ 11036 if (level == 0 || (level == 1 && outputsize <= 42)) { 11037 return false; 11038 } 11039 break; 11040 case 11: /* 16KB Pages. */ 11041 if (level == 0 || (level == 1 && outputsize <= 40)) { 11042 return false; 11043 } 11044 break; 11045 case 9: /* 4KB Pages. */ 11046 if (level == 0 && outputsize <= 42) { 11047 return false; 11048 } 11049 break; 11050 default: 11051 g_assert_not_reached(); 11052 } 11053 11054 /* Inputsize checks. */ 11055 if (inputsize > outputsize && 11056 (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) { 11057 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 11058 return false; 11059 } 11060 } else { 11061 /* AArch32 only supports 4KB pages. Assert on that. */ 11062 assert(stride == 9); 11063 11064 if (level == 0) { 11065 return false; 11066 } 11067 } 11068 return true; 11069 } 11070 11071 /* Translate from the 4-bit stage 2 representation of 11072 * memory attributes (without cache-allocation hints) to 11073 * the 8-bit representation of the stage 1 MAIR registers 11074 * (which includes allocation hints). 11075 * 11076 * ref: shared/translation/attrs/S2AttrDecode() 11077 * .../S2ConvertAttrsHints() 11078 */ 11079 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 11080 { 11081 uint8_t hiattr = extract32(s2attrs, 2, 2); 11082 uint8_t loattr = extract32(s2attrs, 0, 2); 11083 uint8_t hihint = 0, lohint = 0; 11084 11085 if (hiattr != 0) { /* normal memory */ 11086 if (arm_hcr_el2_eff(env) & HCR_CD) { /* cache disabled */ 11087 hiattr = loattr = 1; /* non-cacheable */ 11088 } else { 11089 if (hiattr != 1) { /* Write-through or write-back */ 11090 hihint = 3; /* RW allocate */ 11091 } 11092 if (loattr != 1) { /* Write-through or write-back */ 11093 lohint = 3; /* RW allocate */ 11094 } 11095 } 11096 } 11097 11098 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 11099 } 11100 #endif /* !CONFIG_USER_ONLY */ 11101 11102 /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ 11103 static const uint8_t pamax_map[] = { 11104 [0] = 32, 11105 [1] = 36, 11106 [2] = 40, 11107 [3] = 42, 11108 [4] = 44, 11109 [5] = 48, 11110 [6] = 52, 11111 }; 11112 11113 /* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */ 11114 unsigned int arm_pamax(ARMCPU *cpu) 11115 { 11116 unsigned int parange = 11117 FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); 11118 11119 /* 11120 * id_aa64mmfr0 is a read-only register so values outside of the 11121 * supported mappings can be considered an implementation error. 11122 */ 11123 assert(parange < ARRAY_SIZE(pamax_map)); 11124 return pamax_map[parange]; 11125 } 11126 11127 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) 11128 { 11129 if (regime_has_2_ranges(mmu_idx)) { 11130 return extract64(tcr, 37, 2); 11131 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11132 return 0; /* VTCR_EL2 */ 11133 } else { 11134 /* Replicate the single TBI bit so we always have 2 bits. */ 11135 return extract32(tcr, 20, 1) * 3; 11136 } 11137 } 11138 11139 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) 11140 { 11141 if (regime_has_2_ranges(mmu_idx)) { 11142 return extract64(tcr, 51, 2); 11143 } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11144 return 0; /* VTCR_EL2 */ 11145 } else { 11146 /* Replicate the single TBID bit so we always have 2 bits. */ 11147 return extract32(tcr, 29, 1) * 3; 11148 } 11149 } 11150 11151 static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx) 11152 { 11153 if (regime_has_2_ranges(mmu_idx)) { 11154 return extract64(tcr, 57, 2); 11155 } else { 11156 /* Replicate the single TCMA bit so we always have 2 bits. */ 11157 return extract32(tcr, 30, 1) * 3; 11158 } 11159 } 11160 11161 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 11162 ARMMMUIdx mmu_idx, bool data) 11163 { 11164 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 11165 bool epd, hpd, using16k, using64k, tsz_oob, ds; 11166 int select, tsz, tbi, max_tsz, min_tsz, ps, sh; 11167 ARMCPU *cpu = env_archcpu(env); 11168 11169 if (!regime_has_2_ranges(mmu_idx)) { 11170 select = 0; 11171 tsz = extract32(tcr, 0, 6); 11172 using64k = extract32(tcr, 14, 1); 11173 using16k = extract32(tcr, 15, 1); 11174 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11175 /* VTCR_EL2 */ 11176 hpd = false; 11177 } else { 11178 hpd = extract32(tcr, 24, 1); 11179 } 11180 epd = false; 11181 sh = extract32(tcr, 12, 2); 11182 ps = extract32(tcr, 16, 3); 11183 ds = extract64(tcr, 32, 1); 11184 } else { 11185 /* 11186 * Bit 55 is always between the two regions, and is canonical for 11187 * determining if address tagging is enabled. 11188 */ 11189 select = extract64(va, 55, 1); 11190 if (!select) { 11191 tsz = extract32(tcr, 0, 6); 11192 epd = extract32(tcr, 7, 1); 11193 sh = extract32(tcr, 12, 2); 11194 using64k = extract32(tcr, 14, 1); 11195 using16k = extract32(tcr, 15, 1); 11196 hpd = extract64(tcr, 41, 1); 11197 } else { 11198 int tg = extract32(tcr, 30, 2); 11199 using16k = tg == 1; 11200 using64k = tg == 3; 11201 tsz = extract32(tcr, 16, 6); 11202 epd = extract32(tcr, 23, 1); 11203 sh = extract32(tcr, 28, 2); 11204 hpd = extract64(tcr, 42, 1); 11205 } 11206 ps = extract64(tcr, 32, 3); 11207 ds = extract64(tcr, 59, 1); 11208 } 11209 11210 if (cpu_isar_feature(aa64_st, cpu)) { 11211 max_tsz = 48 - using64k; 11212 } else { 11213 max_tsz = 39; 11214 } 11215 11216 /* 11217 * DS is RES0 unless FEAT_LPA2 is supported for the given page size; 11218 * adjust the effective value of DS, as documented. 11219 */ 11220 min_tsz = 16; 11221 if (using64k) { 11222 if (cpu_isar_feature(aa64_lva, cpu)) { 11223 min_tsz = 12; 11224 } 11225 ds = false; 11226 } else if (ds) { 11227 switch (mmu_idx) { 11228 case ARMMMUIdx_Stage2: 11229 case ARMMMUIdx_Stage2_S: 11230 if (using16k) { 11231 ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu); 11232 } else { 11233 ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu); 11234 } 11235 break; 11236 default: 11237 if (using16k) { 11238 ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu); 11239 } else { 11240 ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu); 11241 } 11242 break; 11243 } 11244 if (ds) { 11245 min_tsz = 12; 11246 } 11247 } 11248 11249 if (tsz > max_tsz) { 11250 tsz = max_tsz; 11251 tsz_oob = true; 11252 } else if (tsz < min_tsz) { 11253 tsz = min_tsz; 11254 tsz_oob = true; 11255 } else { 11256 tsz_oob = false; 11257 } 11258 11259 /* Present TBI as a composite with TBID. */ 11260 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 11261 if (!data) { 11262 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 11263 } 11264 tbi = (tbi >> select) & 1; 11265 11266 return (ARMVAParameters) { 11267 .tsz = tsz, 11268 .ps = ps, 11269 .sh = sh, 11270 .select = select, 11271 .tbi = tbi, 11272 .epd = epd, 11273 .hpd = hpd, 11274 .using16k = using16k, 11275 .using64k = using64k, 11276 .tsz_oob = tsz_oob, 11277 .ds = ds, 11278 }; 11279 } 11280 11281 #ifndef CONFIG_USER_ONLY 11282 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 11283 ARMMMUIdx mmu_idx) 11284 { 11285 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 11286 uint32_t el = regime_el(env, mmu_idx); 11287 int select, tsz; 11288 bool epd, hpd; 11289 11290 assert(mmu_idx != ARMMMUIdx_Stage2_S); 11291 11292 if (mmu_idx == ARMMMUIdx_Stage2) { 11293 /* VTCR */ 11294 bool sext = extract32(tcr, 4, 1); 11295 bool sign = extract32(tcr, 3, 1); 11296 11297 /* 11298 * If the sign-extend bit is not the same as t0sz[3], the result 11299 * is unpredictable. Flag this as a guest error. 11300 */ 11301 if (sign != sext) { 11302 qemu_log_mask(LOG_GUEST_ERROR, 11303 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 11304 } 11305 tsz = sextract32(tcr, 0, 4) + 8; 11306 select = 0; 11307 hpd = false; 11308 epd = false; 11309 } else if (el == 2) { 11310 /* HTCR */ 11311 tsz = extract32(tcr, 0, 3); 11312 select = 0; 11313 hpd = extract64(tcr, 24, 1); 11314 epd = false; 11315 } else { 11316 int t0sz = extract32(tcr, 0, 3); 11317 int t1sz = extract32(tcr, 16, 3); 11318 11319 if (t1sz == 0) { 11320 select = va > (0xffffffffu >> t0sz); 11321 } else { 11322 /* Note that we will detect errors later. */ 11323 select = va >= ~(0xffffffffu >> t1sz); 11324 } 11325 if (!select) { 11326 tsz = t0sz; 11327 epd = extract32(tcr, 7, 1); 11328 hpd = extract64(tcr, 41, 1); 11329 } else { 11330 tsz = t1sz; 11331 epd = extract32(tcr, 23, 1); 11332 hpd = extract64(tcr, 42, 1); 11333 } 11334 /* For aarch32, hpd0 is not enabled without t2e as well. */ 11335 hpd &= extract32(tcr, 6, 1); 11336 } 11337 11338 return (ARMVAParameters) { 11339 .tsz = tsz, 11340 .select = select, 11341 .epd = epd, 11342 .hpd = hpd, 11343 }; 11344 } 11345 11346 /** 11347 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format 11348 * 11349 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 11350 * prot and page_size may not be filled in, and the populated fsr value provides 11351 * information on why the translation aborted, in the format of a long-format 11352 * DFSR/IFSR fault register, with the following caveats: 11353 * * the WnR bit is never set (the caller must do this). 11354 * 11355 * @env: CPUARMState 11356 * @address: virtual address to get physical address for 11357 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH 11358 * @mmu_idx: MMU index indicating required translation regime 11359 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table 11360 * walk), must be true if this is stage 2 of a stage 1+2 walk for an 11361 * EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored. 11362 * @phys_ptr: set to the physical address corresponding to the virtual address 11363 * @attrs: set to the memory transaction attributes to use 11364 * @prot: set to the permissions for the page containing phys_ptr 11365 * @page_size_ptr: set to the size of the page containing phys_ptr 11366 * @fi: set to fault info if the translation fails 11367 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 11368 */ 11369 static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, 11370 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11371 bool s1_is_el0, 11372 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 11373 target_ulong *page_size_ptr, 11374 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11375 { 11376 ARMCPU *cpu = env_archcpu(env); 11377 CPUState *cs = CPU(cpu); 11378 /* Read an LPAE long-descriptor translation table. */ 11379 ARMFaultType fault_type = ARMFault_Translation; 11380 uint32_t level; 11381 ARMVAParameters param; 11382 uint64_t ttbr; 11383 hwaddr descaddr, indexmask, indexmask_grainsize; 11384 uint32_t tableattrs; 11385 target_ulong page_size; 11386 uint32_t attrs; 11387 int32_t stride; 11388 int addrsize, inputsize, outputsize; 11389 TCR *tcr = regime_tcr(env, mmu_idx); 11390 int ap, ns, xn, pxn; 11391 uint32_t el = regime_el(env, mmu_idx); 11392 uint64_t descaddrmask; 11393 bool aarch64 = arm_el_is_aa64(env, el); 11394 bool guarded = false; 11395 11396 /* TODO: This code does not support shareability levels. */ 11397 if (aarch64) { 11398 int ps; 11399 11400 param = aa64_va_parameters(env, address, mmu_idx, 11401 access_type != MMU_INST_FETCH); 11402 level = 0; 11403 11404 /* 11405 * If TxSZ is programmed to a value larger than the maximum, 11406 * or smaller than the effective minimum, it is IMPLEMENTATION 11407 * DEFINED whether we behave as if the field were programmed 11408 * within bounds, or if a level 0 Translation fault is generated. 11409 * 11410 * With FEAT_LVA, fault on less than minimum becomes required, 11411 * so our choice is to always raise the fault. 11412 */ 11413 if (param.tsz_oob) { 11414 fault_type = ARMFault_Translation; 11415 goto do_fault; 11416 } 11417 11418 addrsize = 64 - 8 * param.tbi; 11419 inputsize = 64 - param.tsz; 11420 11421 /* 11422 * Bound PS by PARANGE to find the effective output address size. 11423 * ID_AA64MMFR0 is a read-only register so values outside of the 11424 * supported mappings can be considered an implementation error. 11425 */ 11426 ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); 11427 ps = MIN(ps, param.ps); 11428 assert(ps < ARRAY_SIZE(pamax_map)); 11429 outputsize = pamax_map[ps]; 11430 } else { 11431 param = aa32_va_parameters(env, address, mmu_idx); 11432 level = 1; 11433 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 11434 inputsize = addrsize - param.tsz; 11435 outputsize = 40; 11436 } 11437 11438 /* 11439 * We determined the region when collecting the parameters, but we 11440 * have not yet validated that the address is valid for the region. 11441 * Extract the top bits and verify that they all match select. 11442 * 11443 * For aa32, if inputsize == addrsize, then we have selected the 11444 * region by exclusion in aa32_va_parameters and there is no more 11445 * validation to do here. 11446 */ 11447 if (inputsize < addrsize) { 11448 target_ulong top_bits = sextract64(address, inputsize, 11449 addrsize - inputsize); 11450 if (-top_bits != param.select) { 11451 /* The gap between the two regions is a Translation fault */ 11452 fault_type = ARMFault_Translation; 11453 goto do_fault; 11454 } 11455 } 11456 11457 if (param.using64k) { 11458 stride = 13; 11459 } else if (param.using16k) { 11460 stride = 11; 11461 } else { 11462 stride = 9; 11463 } 11464 11465 /* Note that QEMU ignores shareability and cacheability attributes, 11466 * so we don't need to do anything with the SH, ORGN, IRGN fields 11467 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 11468 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 11469 * implement any ASID-like capability so we can ignore it (instead 11470 * we will always flush the TLB any time the ASID is changed). 11471 */ 11472 ttbr = regime_ttbr(env, mmu_idx, param.select); 11473 11474 /* Here we should have set up all the parameters for the translation: 11475 * inputsize, ttbr, epd, stride, tbi 11476 */ 11477 11478 if (param.epd) { 11479 /* Translation table walk disabled => Translation fault on TLB miss 11480 * Note: This is always 0 on 64-bit EL2 and EL3. 11481 */ 11482 goto do_fault; 11483 } 11484 11485 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) { 11486 /* The starting level depends on the virtual address size (which can 11487 * be up to 48 bits) and the translation granule size. It indicates 11488 * the number of strides (stride bits at a time) needed to 11489 * consume the bits of the input address. In the pseudocode this is: 11490 * level = 4 - RoundUp((inputsize - grainsize) / stride) 11491 * where their 'inputsize' is our 'inputsize', 'grainsize' is 11492 * our 'stride + 3' and 'stride' is our 'stride'. 11493 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 11494 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 11495 * = 4 - (inputsize - 4) / stride; 11496 */ 11497 level = 4 - (inputsize - 4) / stride; 11498 } else { 11499 /* For stage 2 translations the starting level is specified by the 11500 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 11501 */ 11502 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 11503 uint32_t sl2 = extract64(tcr->raw_tcr, 33, 1); 11504 uint32_t startlevel; 11505 bool ok; 11506 11507 /* SL2 is RES0 unless DS=1 & 4kb granule. */ 11508 if (param.ds && stride == 9 && sl2) { 11509 if (sl0 != 0) { 11510 level = 0; 11511 fault_type = ARMFault_Translation; 11512 goto do_fault; 11513 } 11514 startlevel = -1; 11515 } else if (!aarch64 || stride == 9) { 11516 /* AArch32 or 4KB pages */ 11517 startlevel = 2 - sl0; 11518 11519 if (cpu_isar_feature(aa64_st, cpu)) { 11520 startlevel &= 3; 11521 } 11522 } else { 11523 /* 16KB or 64KB pages */ 11524 startlevel = 3 - sl0; 11525 } 11526 11527 /* Check that the starting level is valid. */ 11528 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 11529 inputsize, stride, outputsize); 11530 if (!ok) { 11531 fault_type = ARMFault_Translation; 11532 goto do_fault; 11533 } 11534 level = startlevel; 11535 } 11536 11537 indexmask_grainsize = MAKE_64BIT_MASK(0, stride + 3); 11538 indexmask = MAKE_64BIT_MASK(0, inputsize - (stride * (4 - level))); 11539 11540 /* Now we can extract the actual base address from the TTBR */ 11541 descaddr = extract64(ttbr, 0, 48); 11542 11543 /* 11544 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [5:2] of TTBR. 11545 * 11546 * Otherwise, if the base address is out of range, raise AddressSizeFault. 11547 * In the pseudocode, this is !IsZero(baseregister<47:outputsize>), 11548 * but we've just cleared the bits above 47, so simplify the test. 11549 */ 11550 if (outputsize > 48) { 11551 descaddr |= extract64(ttbr, 2, 4) << 48; 11552 } else if (descaddr >> outputsize) { 11553 level = 0; 11554 fault_type = ARMFault_AddressSize; 11555 goto do_fault; 11556 } 11557 11558 /* 11559 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR 11560 * and also to mask out CnP (bit 0) which could validly be non-zero. 11561 */ 11562 descaddr &= ~indexmask; 11563 11564 /* 11565 * For AArch32, the address field in the descriptor goes up to bit 39 11566 * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0 11567 * or an AddressSize fault is raised. So for v8 we extract those SBZ 11568 * bits as part of the address, which will be checked via outputsize. 11569 * For AArch64, the address field goes up to bit 47, or 49 with FEAT_LPA2; 11570 * the highest bits of a 52-bit output are placed elsewhere. 11571 */ 11572 if (param.ds) { 11573 descaddrmask = MAKE_64BIT_MASK(0, 50); 11574 } else if (arm_feature(env, ARM_FEATURE_V8)) { 11575 descaddrmask = MAKE_64BIT_MASK(0, 48); 11576 } else { 11577 descaddrmask = MAKE_64BIT_MASK(0, 40); 11578 } 11579 descaddrmask &= ~indexmask_grainsize; 11580 11581 /* Secure accesses start with the page table in secure memory and 11582 * can be downgraded to non-secure at any step. Non-secure accesses 11583 * remain non-secure. We implement this by just ORing in the NSTable/NS 11584 * bits at each step. 11585 */ 11586 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 11587 for (;;) { 11588 uint64_t descriptor; 11589 bool nstable; 11590 11591 descaddr |= (address >> (stride * (4 - level))) & indexmask; 11592 descaddr &= ~7ULL; 11593 nstable = extract32(tableattrs, 4, 1); 11594 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 11595 if (fi->type != ARMFault_None) { 11596 goto do_fault; 11597 } 11598 11599 if (!(descriptor & 1) || 11600 (!(descriptor & 2) && (level == 3))) { 11601 /* Invalid, or the Reserved level 3 encoding */ 11602 goto do_fault; 11603 } 11604 11605 descaddr = descriptor & descaddrmask; 11606 11607 /* 11608 * For FEAT_LPA and PS=6, bits [51:48] of descaddr are in [15:12] 11609 * of descriptor. For FEAT_LPA2 and effective DS, bits [51:50] of 11610 * descaddr are in [9:8]. Otherwise, if descaddr is out of range, 11611 * raise AddressSizeFault. 11612 */ 11613 if (outputsize > 48) { 11614 if (param.ds) { 11615 descaddr |= extract64(descriptor, 8, 2) << 50; 11616 } else { 11617 descaddr |= extract64(descriptor, 12, 4) << 48; 11618 } 11619 } else if (descaddr >> outputsize) { 11620 fault_type = ARMFault_AddressSize; 11621 goto do_fault; 11622 } 11623 11624 if ((descriptor & 2) && (level < 3)) { 11625 /* Table entry. The top five bits are attributes which may 11626 * propagate down through lower levels of the table (and 11627 * which are all arranged so that 0 means "no effect", so 11628 * we can gather them up by ORing in the bits at each level). 11629 */ 11630 tableattrs |= extract64(descriptor, 59, 5); 11631 level++; 11632 indexmask = indexmask_grainsize; 11633 continue; 11634 } 11635 /* 11636 * Block entry at level 1 or 2, or page entry at level 3. 11637 * These are basically the same thing, although the number 11638 * of bits we pull in from the vaddr varies. Note that although 11639 * descaddrmask masks enough of the low bits of the descriptor 11640 * to give a correct page or table address, the address field 11641 * in a block descriptor is smaller; so we need to explicitly 11642 * clear the lower bits here before ORing in the low vaddr bits. 11643 */ 11644 page_size = (1ULL << ((stride * (4 - level)) + 3)); 11645 descaddr &= ~(page_size - 1); 11646 descaddr |= (address & (page_size - 1)); 11647 /* Extract attributes from the descriptor */ 11648 attrs = extract64(descriptor, 2, 10) 11649 | (extract64(descriptor, 52, 12) << 10); 11650 11651 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11652 /* Stage 2 table descriptors do not include any attribute fields */ 11653 break; 11654 } 11655 /* Merge in attributes from table descriptors */ 11656 attrs |= nstable << 3; /* NS */ 11657 guarded = extract64(descriptor, 50, 1); /* GP */ 11658 if (param.hpd) { 11659 /* HPD disables all the table attributes except NSTable. */ 11660 break; 11661 } 11662 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 11663 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 11664 * means "force PL1 access only", which means forcing AP[1] to 0. 11665 */ 11666 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ 11667 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ 11668 break; 11669 } 11670 /* Here descaddr is the final physical address, and attributes 11671 * are all in attrs. 11672 */ 11673 fault_type = ARMFault_AccessFlag; 11674 if ((attrs & (1 << 8)) == 0) { 11675 /* Access flag */ 11676 goto do_fault; 11677 } 11678 11679 ap = extract32(attrs, 4, 2); 11680 11681 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11682 ns = mmu_idx == ARMMMUIdx_Stage2; 11683 xn = extract32(attrs, 11, 2); 11684 *prot = get_S2prot(env, ap, xn, s1_is_el0); 11685 } else { 11686 ns = extract32(attrs, 3, 1); 11687 xn = extract32(attrs, 12, 1); 11688 pxn = extract32(attrs, 11, 1); 11689 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 11690 } 11691 11692 fault_type = ARMFault_Permission; 11693 if (!(*prot & (1 << access_type))) { 11694 goto do_fault; 11695 } 11696 11697 if (ns) { 11698 /* The NS bit will (as required by the architecture) have no effect if 11699 * the CPU doesn't support TZ or this is a non-secure translation 11700 * regime, because the attribute will already be non-secure. 11701 */ 11702 txattrs->secure = false; 11703 } 11704 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ 11705 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { 11706 arm_tlb_bti_gp(txattrs) = true; 11707 } 11708 11709 if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) { 11710 cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4)); 11711 } else { 11712 /* Index into MAIR registers for cache attributes */ 11713 uint8_t attrindx = extract32(attrs, 0, 3); 11714 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 11715 assert(attrindx <= 7); 11716 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 11717 } 11718 11719 /* 11720 * For FEAT_LPA2 and effective DS, the SH field in the attributes 11721 * was re-purposed for output address bits. The SH attribute in 11722 * that case comes from TCR_ELx, which we extracted earlier. 11723 */ 11724 if (param.ds) { 11725 cacheattrs->shareability = param.sh; 11726 } else { 11727 cacheattrs->shareability = extract32(attrs, 6, 2); 11728 } 11729 11730 *phys_ptr = descaddr; 11731 *page_size_ptr = page_size; 11732 return false; 11733 11734 do_fault: 11735 fi->type = fault_type; 11736 fi->level = level; 11737 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 11738 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 || 11739 mmu_idx == ARMMMUIdx_Stage2_S); 11740 fi->s1ns = mmu_idx == ARMMMUIdx_Stage2; 11741 return true; 11742 } 11743 11744 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 11745 ARMMMUIdx mmu_idx, 11746 int32_t address, int *prot) 11747 { 11748 if (!arm_feature(env, ARM_FEATURE_M)) { 11749 *prot = PAGE_READ | PAGE_WRITE; 11750 switch (address) { 11751 case 0xF0000000 ... 0xFFFFFFFF: 11752 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 11753 /* hivecs execing is ok */ 11754 *prot |= PAGE_EXEC; 11755 } 11756 break; 11757 case 0x00000000 ... 0x7FFFFFFF: 11758 *prot |= PAGE_EXEC; 11759 break; 11760 } 11761 } else { 11762 /* Default system address map for M profile cores. 11763 * The architecture specifies which regions are execute-never; 11764 * at the MPU level no other checks are defined. 11765 */ 11766 switch (address) { 11767 case 0x00000000 ... 0x1fffffff: /* ROM */ 11768 case 0x20000000 ... 0x3fffffff: /* SRAM */ 11769 case 0x60000000 ... 0x7fffffff: /* RAM */ 11770 case 0x80000000 ... 0x9fffffff: /* RAM */ 11771 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11772 break; 11773 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 11774 case 0xa0000000 ... 0xbfffffff: /* Device */ 11775 case 0xc0000000 ... 0xdfffffff: /* Device */ 11776 case 0xe0000000 ... 0xffffffff: /* System */ 11777 *prot = PAGE_READ | PAGE_WRITE; 11778 break; 11779 default: 11780 g_assert_not_reached(); 11781 } 11782 } 11783 } 11784 11785 static bool pmsav7_use_background_region(ARMCPU *cpu, 11786 ARMMMUIdx mmu_idx, bool is_user) 11787 { 11788 /* Return true if we should use the default memory map as a 11789 * "background" region if there are no hits against any MPU regions. 11790 */ 11791 CPUARMState *env = &cpu->env; 11792 11793 if (is_user) { 11794 return false; 11795 } 11796 11797 if (arm_feature(env, ARM_FEATURE_M)) { 11798 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 11799 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 11800 } else { 11801 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 11802 } 11803 } 11804 11805 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 11806 { 11807 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 11808 return arm_feature(env, ARM_FEATURE_M) && 11809 extract32(address, 20, 12) == 0xe00; 11810 } 11811 11812 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 11813 { 11814 /* True if address is in the M profile system region 11815 * 0xe0000000 - 0xffffffff 11816 */ 11817 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 11818 } 11819 11820 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 11821 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11822 hwaddr *phys_ptr, int *prot, 11823 target_ulong *page_size, 11824 ARMMMUFaultInfo *fi) 11825 { 11826 ARMCPU *cpu = env_archcpu(env); 11827 int n; 11828 bool is_user = regime_is_user(env, mmu_idx); 11829 11830 *phys_ptr = address; 11831 *page_size = TARGET_PAGE_SIZE; 11832 *prot = 0; 11833 11834 if (regime_translation_disabled(env, mmu_idx) || 11835 m_is_ppb_region(env, address)) { 11836 /* MPU disabled or M profile PPB access: use default memory map. 11837 * The other case which uses the default memory map in the 11838 * v7M ARM ARM pseudocode is exception vector reads from the vector 11839 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 11840 * which always does a direct read using address_space_ldl(), rather 11841 * than going via this function, so we don't need to check that here. 11842 */ 11843 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11844 } else { /* MPU enabled */ 11845 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11846 /* region search */ 11847 uint32_t base = env->pmsav7.drbar[n]; 11848 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 11849 uint32_t rmask; 11850 bool srdis = false; 11851 11852 if (!(env->pmsav7.drsr[n] & 0x1)) { 11853 continue; 11854 } 11855 11856 if (!rsize) { 11857 qemu_log_mask(LOG_GUEST_ERROR, 11858 "DRSR[%d]: Rsize field cannot be 0\n", n); 11859 continue; 11860 } 11861 rsize++; 11862 rmask = (1ull << rsize) - 1; 11863 11864 if (base & rmask) { 11865 qemu_log_mask(LOG_GUEST_ERROR, 11866 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 11867 "to DRSR region size, mask = 0x%" PRIx32 "\n", 11868 n, base, rmask); 11869 continue; 11870 } 11871 11872 if (address < base || address > base + rmask) { 11873 /* 11874 * Address not in this region. We must check whether the 11875 * region covers addresses in the same page as our address. 11876 * In that case we must not report a size that covers the 11877 * whole page for a subsequent hit against a different MPU 11878 * region or the background region, because it would result in 11879 * incorrect TLB hits for subsequent accesses to addresses that 11880 * are in this MPU region. 11881 */ 11882 if (ranges_overlap(base, rmask, 11883 address & TARGET_PAGE_MASK, 11884 TARGET_PAGE_SIZE)) { 11885 *page_size = 1; 11886 } 11887 continue; 11888 } 11889 11890 /* Region matched */ 11891 11892 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 11893 int i, snd; 11894 uint32_t srdis_mask; 11895 11896 rsize -= 3; /* sub region size (power of 2) */ 11897 snd = ((address - base) >> rsize) & 0x7; 11898 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 11899 11900 srdis_mask = srdis ? 0x3 : 0x0; 11901 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 11902 /* This will check in groups of 2, 4 and then 8, whether 11903 * the subregion bits are consistent. rsize is incremented 11904 * back up to give the region size, considering consistent 11905 * adjacent subregions as one region. Stop testing if rsize 11906 * is already big enough for an entire QEMU page. 11907 */ 11908 int snd_rounded = snd & ~(i - 1); 11909 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 11910 snd_rounded + 8, i); 11911 if (srdis_mask ^ srdis_multi) { 11912 break; 11913 } 11914 srdis_mask = (srdis_mask << i) | srdis_mask; 11915 rsize++; 11916 } 11917 } 11918 if (srdis) { 11919 continue; 11920 } 11921 if (rsize < TARGET_PAGE_BITS) { 11922 *page_size = 1 << rsize; 11923 } 11924 break; 11925 } 11926 11927 if (n == -1) { /* no hits */ 11928 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11929 /* background fault */ 11930 fi->type = ARMFault_Background; 11931 return true; 11932 } 11933 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11934 } else { /* a MPU hit! */ 11935 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 11936 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 11937 11938 if (m_is_system_region(env, address)) { 11939 /* System space is always execute never */ 11940 xn = 1; 11941 } 11942 11943 if (is_user) { /* User mode AP bit decoding */ 11944 switch (ap) { 11945 case 0: 11946 case 1: 11947 case 5: 11948 break; /* no access */ 11949 case 3: 11950 *prot |= PAGE_WRITE; 11951 /* fall through */ 11952 case 2: 11953 case 6: 11954 *prot |= PAGE_READ | PAGE_EXEC; 11955 break; 11956 case 7: 11957 /* for v7M, same as 6; for R profile a reserved value */ 11958 if (arm_feature(env, ARM_FEATURE_M)) { 11959 *prot |= PAGE_READ | PAGE_EXEC; 11960 break; 11961 } 11962 /* fall through */ 11963 default: 11964 qemu_log_mask(LOG_GUEST_ERROR, 11965 "DRACR[%d]: Bad value for AP bits: 0x%" 11966 PRIx32 "\n", n, ap); 11967 } 11968 } else { /* Priv. mode AP bits decoding */ 11969 switch (ap) { 11970 case 0: 11971 break; /* no access */ 11972 case 1: 11973 case 2: 11974 case 3: 11975 *prot |= PAGE_WRITE; 11976 /* fall through */ 11977 case 5: 11978 case 6: 11979 *prot |= PAGE_READ | PAGE_EXEC; 11980 break; 11981 case 7: 11982 /* for v7M, same as 6; for R profile a reserved value */ 11983 if (arm_feature(env, ARM_FEATURE_M)) { 11984 *prot |= PAGE_READ | PAGE_EXEC; 11985 break; 11986 } 11987 /* fall through */ 11988 default: 11989 qemu_log_mask(LOG_GUEST_ERROR, 11990 "DRACR[%d]: Bad value for AP bits: 0x%" 11991 PRIx32 "\n", n, ap); 11992 } 11993 } 11994 11995 /* execute never */ 11996 if (xn) { 11997 *prot &= ~PAGE_EXEC; 11998 } 11999 } 12000 } 12001 12002 fi->type = ARMFault_Permission; 12003 fi->level = 1; 12004 return !(*prot & (1 << access_type)); 12005 } 12006 12007 static bool v8m_is_sau_exempt(CPUARMState *env, 12008 uint32_t address, MMUAccessType access_type) 12009 { 12010 /* The architecture specifies that certain address ranges are 12011 * exempt from v8M SAU/IDAU checks. 12012 */ 12013 return 12014 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 12015 (address >= 0xe0000000 && address <= 0xe0002fff) || 12016 (address >= 0xe000e000 && address <= 0xe000efff) || 12017 (address >= 0xe002e000 && address <= 0xe002efff) || 12018 (address >= 0xe0040000 && address <= 0xe0041fff) || 12019 (address >= 0xe00ff000 && address <= 0xe00fffff); 12020 } 12021 12022 void v8m_security_lookup(CPUARMState *env, uint32_t address, 12023 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12024 V8M_SAttributes *sattrs) 12025 { 12026 /* Look up the security attributes for this address. Compare the 12027 * pseudocode SecurityCheck() function. 12028 * We assume the caller has zero-initialized *sattrs. 12029 */ 12030 ARMCPU *cpu = env_archcpu(env); 12031 int r; 12032 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 12033 int idau_region = IREGION_NOTVALID; 12034 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 12035 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 12036 12037 if (cpu->idau) { 12038 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 12039 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 12040 12041 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 12042 &idau_nsc); 12043 } 12044 12045 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 12046 /* 0xf0000000..0xffffffff is always S for insn fetches */ 12047 return; 12048 } 12049 12050 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 12051 sattrs->ns = !regime_is_secure(env, mmu_idx); 12052 return; 12053 } 12054 12055 if (idau_region != IREGION_NOTVALID) { 12056 sattrs->irvalid = true; 12057 sattrs->iregion = idau_region; 12058 } 12059 12060 switch (env->sau.ctrl & 3) { 12061 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 12062 break; 12063 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 12064 sattrs->ns = true; 12065 break; 12066 default: /* SAU.ENABLE == 1 */ 12067 for (r = 0; r < cpu->sau_sregion; r++) { 12068 if (env->sau.rlar[r] & 1) { 12069 uint32_t base = env->sau.rbar[r] & ~0x1f; 12070 uint32_t limit = env->sau.rlar[r] | 0x1f; 12071 12072 if (base <= address && limit >= address) { 12073 if (base > addr_page_base || limit < addr_page_limit) { 12074 sattrs->subpage = true; 12075 } 12076 if (sattrs->srvalid) { 12077 /* If we hit in more than one region then we must report 12078 * as Secure, not NS-Callable, with no valid region 12079 * number info. 12080 */ 12081 sattrs->ns = false; 12082 sattrs->nsc = false; 12083 sattrs->sregion = 0; 12084 sattrs->srvalid = false; 12085 break; 12086 } else { 12087 if (env->sau.rlar[r] & 2) { 12088 sattrs->nsc = true; 12089 } else { 12090 sattrs->ns = true; 12091 } 12092 sattrs->srvalid = true; 12093 sattrs->sregion = r; 12094 } 12095 } else { 12096 /* 12097 * Address not in this region. We must check whether the 12098 * region covers addresses in the same page as our address. 12099 * In that case we must not report a size that covers the 12100 * whole page for a subsequent hit against a different MPU 12101 * region or the background region, because it would result 12102 * in incorrect TLB hits for subsequent accesses to 12103 * addresses that are in this MPU region. 12104 */ 12105 if (limit >= base && 12106 ranges_overlap(base, limit - base + 1, 12107 addr_page_base, 12108 TARGET_PAGE_SIZE)) { 12109 sattrs->subpage = true; 12110 } 12111 } 12112 } 12113 } 12114 break; 12115 } 12116 12117 /* 12118 * The IDAU will override the SAU lookup results if it specifies 12119 * higher security than the SAU does. 12120 */ 12121 if (!idau_ns) { 12122 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 12123 sattrs->ns = false; 12124 sattrs->nsc = idau_nsc; 12125 } 12126 } 12127 } 12128 12129 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 12130 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12131 hwaddr *phys_ptr, MemTxAttrs *txattrs, 12132 int *prot, bool *is_subpage, 12133 ARMMMUFaultInfo *fi, uint32_t *mregion) 12134 { 12135 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 12136 * that a full phys-to-virt translation does). 12137 * mregion is (if not NULL) set to the region number which matched, 12138 * or -1 if no region number is returned (MPU off, address did not 12139 * hit a region, address hit in multiple regions). 12140 * We set is_subpage to true if the region hit doesn't cover the 12141 * entire TARGET_PAGE the address is within. 12142 */ 12143 ARMCPU *cpu = env_archcpu(env); 12144 bool is_user = regime_is_user(env, mmu_idx); 12145 uint32_t secure = regime_is_secure(env, mmu_idx); 12146 int n; 12147 int matchregion = -1; 12148 bool hit = false; 12149 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 12150 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 12151 12152 *is_subpage = false; 12153 *phys_ptr = address; 12154 *prot = 0; 12155 if (mregion) { 12156 *mregion = -1; 12157 } 12158 12159 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 12160 * was an exception vector read from the vector table (which is always 12161 * done using the default system address map), because those accesses 12162 * are done in arm_v7m_load_vector(), which always does a direct 12163 * read using address_space_ldl(), rather than going via this function. 12164 */ 12165 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 12166 hit = true; 12167 } else if (m_is_ppb_region(env, address)) { 12168 hit = true; 12169 } else { 12170 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 12171 hit = true; 12172 } 12173 12174 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 12175 /* region search */ 12176 /* Note that the base address is bits [31:5] from the register 12177 * with bits [4:0] all zeroes, but the limit address is bits 12178 * [31:5] from the register with bits [4:0] all ones. 12179 */ 12180 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 12181 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 12182 12183 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 12184 /* Region disabled */ 12185 continue; 12186 } 12187 12188 if (address < base || address > limit) { 12189 /* 12190 * Address not in this region. We must check whether the 12191 * region covers addresses in the same page as our address. 12192 * In that case we must not report a size that covers the 12193 * whole page for a subsequent hit against a different MPU 12194 * region or the background region, because it would result in 12195 * incorrect TLB hits for subsequent accesses to addresses that 12196 * are in this MPU region. 12197 */ 12198 if (limit >= base && 12199 ranges_overlap(base, limit - base + 1, 12200 addr_page_base, 12201 TARGET_PAGE_SIZE)) { 12202 *is_subpage = true; 12203 } 12204 continue; 12205 } 12206 12207 if (base > addr_page_base || limit < addr_page_limit) { 12208 *is_subpage = true; 12209 } 12210 12211 if (matchregion != -1) { 12212 /* Multiple regions match -- always a failure (unlike 12213 * PMSAv7 where highest-numbered-region wins) 12214 */ 12215 fi->type = ARMFault_Permission; 12216 fi->level = 1; 12217 return true; 12218 } 12219 12220 matchregion = n; 12221 hit = true; 12222 } 12223 } 12224 12225 if (!hit) { 12226 /* background fault */ 12227 fi->type = ARMFault_Background; 12228 return true; 12229 } 12230 12231 if (matchregion == -1) { 12232 /* hit using the background region */ 12233 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 12234 } else { 12235 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 12236 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 12237 bool pxn = false; 12238 12239 if (arm_feature(env, ARM_FEATURE_V8_1M)) { 12240 pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1); 12241 } 12242 12243 if (m_is_system_region(env, address)) { 12244 /* System space is always execute never */ 12245 xn = 1; 12246 } 12247 12248 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 12249 if (*prot && !xn && !(pxn && !is_user)) { 12250 *prot |= PAGE_EXEC; 12251 } 12252 /* We don't need to look the attribute up in the MAIR0/MAIR1 12253 * registers because that only tells us about cacheability. 12254 */ 12255 if (mregion) { 12256 *mregion = matchregion; 12257 } 12258 } 12259 12260 fi->type = ARMFault_Permission; 12261 fi->level = 1; 12262 return !(*prot & (1 << access_type)); 12263 } 12264 12265 12266 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 12267 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12268 hwaddr *phys_ptr, MemTxAttrs *txattrs, 12269 int *prot, target_ulong *page_size, 12270 ARMMMUFaultInfo *fi) 12271 { 12272 uint32_t secure = regime_is_secure(env, mmu_idx); 12273 V8M_SAttributes sattrs = {}; 12274 bool ret; 12275 bool mpu_is_subpage; 12276 12277 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 12278 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 12279 if (access_type == MMU_INST_FETCH) { 12280 /* Instruction fetches always use the MMU bank and the 12281 * transaction attribute determined by the fetch address, 12282 * regardless of CPU state. This is painful for QEMU 12283 * to handle, because it would mean we need to encode 12284 * into the mmu_idx not just the (user, negpri) information 12285 * for the current security state but also that for the 12286 * other security state, which would balloon the number 12287 * of mmu_idx values needed alarmingly. 12288 * Fortunately we can avoid this because it's not actually 12289 * possible to arbitrarily execute code from memory with 12290 * the wrong security attribute: it will always generate 12291 * an exception of some kind or another, apart from the 12292 * special case of an NS CPU executing an SG instruction 12293 * in S&NSC memory. So we always just fail the translation 12294 * here and sort things out in the exception handler 12295 * (including possibly emulating an SG instruction). 12296 */ 12297 if (sattrs.ns != !secure) { 12298 if (sattrs.nsc) { 12299 fi->type = ARMFault_QEMU_NSCExec; 12300 } else { 12301 fi->type = ARMFault_QEMU_SFault; 12302 } 12303 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 12304 *phys_ptr = address; 12305 *prot = 0; 12306 return true; 12307 } 12308 } else { 12309 /* For data accesses we always use the MMU bank indicated 12310 * by the current CPU state, but the security attributes 12311 * might downgrade a secure access to nonsecure. 12312 */ 12313 if (sattrs.ns) { 12314 txattrs->secure = false; 12315 } else if (!secure) { 12316 /* NS access to S memory must fault. 12317 * Architecturally we should first check whether the 12318 * MPU information for this address indicates that we 12319 * are doing an unaligned access to Device memory, which 12320 * should generate a UsageFault instead. QEMU does not 12321 * currently check for that kind of unaligned access though. 12322 * If we added it we would need to do so as a special case 12323 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 12324 */ 12325 fi->type = ARMFault_QEMU_SFault; 12326 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 12327 *phys_ptr = address; 12328 *prot = 0; 12329 return true; 12330 } 12331 } 12332 } 12333 12334 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 12335 txattrs, prot, &mpu_is_subpage, fi, NULL); 12336 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 12337 return ret; 12338 } 12339 12340 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 12341 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12342 hwaddr *phys_ptr, int *prot, 12343 ARMMMUFaultInfo *fi) 12344 { 12345 int n; 12346 uint32_t mask; 12347 uint32_t base; 12348 bool is_user = regime_is_user(env, mmu_idx); 12349 12350 if (regime_translation_disabled(env, mmu_idx)) { 12351 /* MPU disabled. */ 12352 *phys_ptr = address; 12353 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 12354 return false; 12355 } 12356 12357 *phys_ptr = address; 12358 for (n = 7; n >= 0; n--) { 12359 base = env->cp15.c6_region[n]; 12360 if ((base & 1) == 0) { 12361 continue; 12362 } 12363 mask = 1 << ((base >> 1) & 0x1f); 12364 /* Keep this shift separate from the above to avoid an 12365 (undefined) << 32. */ 12366 mask = (mask << 1) - 1; 12367 if (((base ^ address) & ~mask) == 0) { 12368 break; 12369 } 12370 } 12371 if (n < 0) { 12372 fi->type = ARMFault_Background; 12373 return true; 12374 } 12375 12376 if (access_type == MMU_INST_FETCH) { 12377 mask = env->cp15.pmsav5_insn_ap; 12378 } else { 12379 mask = env->cp15.pmsav5_data_ap; 12380 } 12381 mask = (mask >> (n * 4)) & 0xf; 12382 switch (mask) { 12383 case 0: 12384 fi->type = ARMFault_Permission; 12385 fi->level = 1; 12386 return true; 12387 case 1: 12388 if (is_user) { 12389 fi->type = ARMFault_Permission; 12390 fi->level = 1; 12391 return true; 12392 } 12393 *prot = PAGE_READ | PAGE_WRITE; 12394 break; 12395 case 2: 12396 *prot = PAGE_READ; 12397 if (!is_user) { 12398 *prot |= PAGE_WRITE; 12399 } 12400 break; 12401 case 3: 12402 *prot = PAGE_READ | PAGE_WRITE; 12403 break; 12404 case 5: 12405 if (is_user) { 12406 fi->type = ARMFault_Permission; 12407 fi->level = 1; 12408 return true; 12409 } 12410 *prot = PAGE_READ; 12411 break; 12412 case 6: 12413 *prot = PAGE_READ; 12414 break; 12415 default: 12416 /* Bad permission. */ 12417 fi->type = ARMFault_Permission; 12418 fi->level = 1; 12419 return true; 12420 } 12421 *prot |= PAGE_EXEC; 12422 return false; 12423 } 12424 12425 /* Combine either inner or outer cacheability attributes for normal 12426 * memory, according to table D4-42 and pseudocode procedure 12427 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 12428 * 12429 * NB: only stage 1 includes allocation hints (RW bits), leading to 12430 * some asymmetry. 12431 */ 12432 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 12433 { 12434 if (s1 == 4 || s2 == 4) { 12435 /* non-cacheable has precedence */ 12436 return 4; 12437 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 12438 /* stage 1 write-through takes precedence */ 12439 return s1; 12440 } else if (extract32(s2, 2, 2) == 2) { 12441 /* stage 2 write-through takes precedence, but the allocation hint 12442 * is still taken from stage 1 12443 */ 12444 return (2 << 2) | extract32(s1, 0, 2); 12445 } else { /* write-back */ 12446 return s1; 12447 } 12448 } 12449 12450 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 12451 * and CombineS1S2Desc() 12452 * 12453 * @s1: Attributes from stage 1 walk 12454 * @s2: Attributes from stage 2 walk 12455 */ 12456 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 12457 { 12458 uint8_t s1lo, s2lo, s1hi, s2hi; 12459 ARMCacheAttrs ret; 12460 bool tagged = false; 12461 12462 if (s1.attrs == 0xf0) { 12463 tagged = true; 12464 s1.attrs = 0xff; 12465 } 12466 12467 s1lo = extract32(s1.attrs, 0, 4); 12468 s2lo = extract32(s2.attrs, 0, 4); 12469 s1hi = extract32(s1.attrs, 4, 4); 12470 s2hi = extract32(s2.attrs, 4, 4); 12471 12472 /* Combine shareability attributes (table D4-43) */ 12473 if (s1.shareability == 2 || s2.shareability == 2) { 12474 /* if either are outer-shareable, the result is outer-shareable */ 12475 ret.shareability = 2; 12476 } else if (s1.shareability == 3 || s2.shareability == 3) { 12477 /* if either are inner-shareable, the result is inner-shareable */ 12478 ret.shareability = 3; 12479 } else { 12480 /* both non-shareable */ 12481 ret.shareability = 0; 12482 } 12483 12484 /* Combine memory type and cacheability attributes */ 12485 if (s1hi == 0 || s2hi == 0) { 12486 /* Device has precedence over normal */ 12487 if (s1lo == 0 || s2lo == 0) { 12488 /* nGnRnE has precedence over anything */ 12489 ret.attrs = 0; 12490 } else if (s1lo == 4 || s2lo == 4) { 12491 /* non-Reordering has precedence over Reordering */ 12492 ret.attrs = 4; /* nGnRE */ 12493 } else if (s1lo == 8 || s2lo == 8) { 12494 /* non-Gathering has precedence over Gathering */ 12495 ret.attrs = 8; /* nGRE */ 12496 } else { 12497 ret.attrs = 0xc; /* GRE */ 12498 } 12499 12500 /* Any location for which the resultant memory type is any 12501 * type of Device memory is always treated as Outer Shareable. 12502 */ 12503 ret.shareability = 2; 12504 } else { /* Normal memory */ 12505 /* Outer/inner cacheability combine independently */ 12506 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 12507 | combine_cacheattr_nibble(s1lo, s2lo); 12508 12509 if (ret.attrs == 0x44) { 12510 /* Any location for which the resultant memory type is Normal 12511 * Inner Non-cacheable, Outer Non-cacheable is always treated 12512 * as Outer Shareable. 12513 */ 12514 ret.shareability = 2; 12515 } 12516 } 12517 12518 /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */ 12519 if (tagged && ret.attrs == 0xff) { 12520 ret.attrs = 0xf0; 12521 } 12522 12523 return ret; 12524 } 12525 12526 12527 /* get_phys_addr - get the physical address for this virtual address 12528 * 12529 * Find the physical address corresponding to the given virtual address, 12530 * by doing a translation table walk on MMU based systems or using the 12531 * MPU state on MPU based systems. 12532 * 12533 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 12534 * prot and page_size may not be filled in, and the populated fsr value provides 12535 * information on why the translation aborted, in the format of a 12536 * DFSR/IFSR fault register, with the following caveats: 12537 * * we honour the short vs long DFSR format differences. 12538 * * the WnR bit is never set (the caller must do this). 12539 * * for PSMAv5 based systems we don't bother to return a full FSR format 12540 * value. 12541 * 12542 * @env: CPUARMState 12543 * @address: virtual address to get physical address for 12544 * @access_type: 0 for read, 1 for write, 2 for execute 12545 * @mmu_idx: MMU index indicating required translation regime 12546 * @phys_ptr: set to the physical address corresponding to the virtual address 12547 * @attrs: set to the memory transaction attributes to use 12548 * @prot: set to the permissions for the page containing phys_ptr 12549 * @page_size: set to the size of the page containing phys_ptr 12550 * @fi: set to fault info if the translation fails 12551 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 12552 */ 12553 bool get_phys_addr(CPUARMState *env, target_ulong address, 12554 MMUAccessType access_type, ARMMMUIdx mmu_idx, 12555 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 12556 target_ulong *page_size, 12557 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 12558 { 12559 ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx); 12560 12561 if (mmu_idx != s1_mmu_idx) { 12562 /* Call ourselves recursively to do the stage 1 and then stage 2 12563 * translations if mmu_idx is a two-stage regime. 12564 */ 12565 if (arm_feature(env, ARM_FEATURE_EL2)) { 12566 hwaddr ipa; 12567 int s2_prot; 12568 int ret; 12569 bool ipa_secure; 12570 ARMCacheAttrs cacheattrs2 = {}; 12571 ARMMMUIdx s2_mmu_idx; 12572 bool is_el0; 12573 12574 ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa, 12575 attrs, prot, page_size, fi, cacheattrs); 12576 12577 /* If S1 fails or S2 is disabled, return early. */ 12578 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 12579 *phys_ptr = ipa; 12580 return ret; 12581 } 12582 12583 ipa_secure = attrs->secure; 12584 if (arm_is_secure_below_el3(env)) { 12585 if (ipa_secure) { 12586 attrs->secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); 12587 } else { 12588 attrs->secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); 12589 } 12590 } else { 12591 assert(!ipa_secure); 12592 } 12593 12594 s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2; 12595 is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0; 12596 12597 /* S1 is done. Now do S2 translation. */ 12598 ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0, 12599 phys_ptr, attrs, &s2_prot, 12600 page_size, fi, &cacheattrs2); 12601 fi->s2addr = ipa; 12602 /* Combine the S1 and S2 perms. */ 12603 *prot &= s2_prot; 12604 12605 /* If S2 fails, return early. */ 12606 if (ret) { 12607 return ret; 12608 } 12609 12610 /* Combine the S1 and S2 cache attributes. */ 12611 if (arm_hcr_el2_eff(env) & HCR_DC) { 12612 /* 12613 * HCR.DC forces the first stage attributes to 12614 * Normal Non-Shareable, 12615 * Inner Write-Back Read-Allocate Write-Allocate, 12616 * Outer Write-Back Read-Allocate Write-Allocate. 12617 * Do not overwrite Tagged within attrs. 12618 */ 12619 if (cacheattrs->attrs != 0xf0) { 12620 cacheattrs->attrs = 0xff; 12621 } 12622 cacheattrs->shareability = 0; 12623 } 12624 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 12625 12626 /* Check if IPA translates to secure or non-secure PA space. */ 12627 if (arm_is_secure_below_el3(env)) { 12628 if (ipa_secure) { 12629 attrs->secure = 12630 !(env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW)); 12631 } else { 12632 attrs->secure = 12633 !((env->cp15.vtcr_el2.raw_tcr & (VTCR_NSA | VTCR_NSW)) 12634 || (env->cp15.vstcr_el2.raw_tcr & (VSTCR_SA | VSTCR_SW))); 12635 } 12636 } 12637 return 0; 12638 } else { 12639 /* 12640 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 12641 */ 12642 mmu_idx = stage_1_mmu_idx(mmu_idx); 12643 } 12644 } 12645 12646 /* The page table entries may downgrade secure to non-secure, but 12647 * cannot upgrade an non-secure translation regime's attributes 12648 * to secure. 12649 */ 12650 attrs->secure = regime_is_secure(env, mmu_idx); 12651 attrs->user = regime_is_user(env, mmu_idx); 12652 12653 /* Fast Context Switch Extension. This doesn't exist at all in v8. 12654 * In v7 and earlier it affects all stage 1 translations. 12655 */ 12656 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 12657 && !arm_feature(env, ARM_FEATURE_V8)) { 12658 if (regime_el(env, mmu_idx) == 3) { 12659 address += env->cp15.fcseidr_s; 12660 } else { 12661 address += env->cp15.fcseidr_ns; 12662 } 12663 } 12664 12665 if (arm_feature(env, ARM_FEATURE_PMSA)) { 12666 bool ret; 12667 *page_size = TARGET_PAGE_SIZE; 12668 12669 if (arm_feature(env, ARM_FEATURE_V8)) { 12670 /* PMSAv8 */ 12671 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 12672 phys_ptr, attrs, prot, page_size, fi); 12673 } else if (arm_feature(env, ARM_FEATURE_V7)) { 12674 /* PMSAv7 */ 12675 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 12676 phys_ptr, prot, page_size, fi); 12677 } else { 12678 /* Pre-v7 MPU */ 12679 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 12680 phys_ptr, prot, fi); 12681 } 12682 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 12683 " mmu_idx %u -> %s (prot %c%c%c)\n", 12684 access_type == MMU_DATA_LOAD ? "reading" : 12685 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 12686 (uint32_t)address, mmu_idx, 12687 ret ? "Miss" : "Hit", 12688 *prot & PAGE_READ ? 'r' : '-', 12689 *prot & PAGE_WRITE ? 'w' : '-', 12690 *prot & PAGE_EXEC ? 'x' : '-'); 12691 12692 return ret; 12693 } 12694 12695 /* Definitely a real MMU, not an MPU */ 12696 12697 if (regime_translation_disabled(env, mmu_idx)) { 12698 uint64_t hcr; 12699 uint8_t memattr; 12700 12701 /* 12702 * MMU disabled. S1 addresses within aa64 translation regimes are 12703 * still checked for bounds -- see AArch64.TranslateAddressS1Off. 12704 */ 12705 if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) { 12706 int r_el = regime_el(env, mmu_idx); 12707 if (arm_el_is_aa64(env, r_el)) { 12708 int pamax = arm_pamax(env_archcpu(env)); 12709 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr; 12710 int addrtop, tbi; 12711 12712 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 12713 if (access_type == MMU_INST_FETCH) { 12714 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 12715 } 12716 tbi = (tbi >> extract64(address, 55, 1)) & 1; 12717 addrtop = (tbi ? 55 : 63); 12718 12719 if (extract64(address, pamax, addrtop - pamax + 1) != 0) { 12720 fi->type = ARMFault_AddressSize; 12721 fi->level = 0; 12722 fi->stage2 = false; 12723 return 1; 12724 } 12725 12726 /* 12727 * When TBI is disabled, we've just validated that all of the 12728 * bits above PAMax are zero, so logically we only need to 12729 * clear the top byte for TBI. But it's clearer to follow 12730 * the pseudocode set of addrdesc.paddress. 12731 */ 12732 address = extract64(address, 0, 52); 12733 } 12734 } 12735 *phys_ptr = address; 12736 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 12737 *page_size = TARGET_PAGE_SIZE; 12738 12739 /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */ 12740 hcr = arm_hcr_el2_eff(env); 12741 cacheattrs->shareability = 0; 12742 if (hcr & HCR_DC) { 12743 if (hcr & HCR_DCT) { 12744 memattr = 0xf0; /* Tagged, Normal, WB, RWA */ 12745 } else { 12746 memattr = 0xff; /* Normal, WB, RWA */ 12747 } 12748 } else if (access_type == MMU_INST_FETCH) { 12749 if (regime_sctlr(env, mmu_idx) & SCTLR_I) { 12750 memattr = 0xee; /* Normal, WT, RA, NT */ 12751 } else { 12752 memattr = 0x44; /* Normal, NC, No */ 12753 } 12754 cacheattrs->shareability = 2; /* outer sharable */ 12755 } else { 12756 memattr = 0x00; /* Device, nGnRnE */ 12757 } 12758 cacheattrs->attrs = memattr; 12759 return 0; 12760 } 12761 12762 if (regime_using_lpae_format(env, mmu_idx)) { 12763 return get_phys_addr_lpae(env, address, access_type, mmu_idx, false, 12764 phys_ptr, attrs, prot, page_size, 12765 fi, cacheattrs); 12766 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 12767 return get_phys_addr_v6(env, address, access_type, mmu_idx, 12768 phys_ptr, attrs, prot, page_size, fi); 12769 } else { 12770 return get_phys_addr_v5(env, address, access_type, mmu_idx, 12771 phys_ptr, prot, page_size, fi); 12772 } 12773 } 12774 12775 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 12776 MemTxAttrs *attrs) 12777 { 12778 ARMCPU *cpu = ARM_CPU(cs); 12779 CPUARMState *env = &cpu->env; 12780 hwaddr phys_addr; 12781 target_ulong page_size; 12782 int prot; 12783 bool ret; 12784 ARMMMUFaultInfo fi = {}; 12785 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 12786 ARMCacheAttrs cacheattrs = {}; 12787 12788 *attrs = (MemTxAttrs) {}; 12789 12790 ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr, 12791 attrs, &prot, &page_size, &fi, &cacheattrs); 12792 12793 if (ret) { 12794 return -1; 12795 } 12796 return phys_addr; 12797 } 12798 12799 #endif 12800 12801 /* Note that signed overflow is undefined in C. The following routines are 12802 careful to use unsigned types where modulo arithmetic is required. 12803 Failure to do so _will_ break on newer gcc. */ 12804 12805 /* Signed saturating arithmetic. */ 12806 12807 /* Perform 16-bit signed saturating addition. */ 12808 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 12809 { 12810 uint16_t res; 12811 12812 res = a + b; 12813 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 12814 if (a & 0x8000) 12815 res = 0x8000; 12816 else 12817 res = 0x7fff; 12818 } 12819 return res; 12820 } 12821 12822 /* Perform 8-bit signed saturating addition. */ 12823 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 12824 { 12825 uint8_t res; 12826 12827 res = a + b; 12828 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 12829 if (a & 0x80) 12830 res = 0x80; 12831 else 12832 res = 0x7f; 12833 } 12834 return res; 12835 } 12836 12837 /* Perform 16-bit signed saturating subtraction. */ 12838 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 12839 { 12840 uint16_t res; 12841 12842 res = a - b; 12843 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 12844 if (a & 0x8000) 12845 res = 0x8000; 12846 else 12847 res = 0x7fff; 12848 } 12849 return res; 12850 } 12851 12852 /* Perform 8-bit signed saturating subtraction. */ 12853 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 12854 { 12855 uint8_t res; 12856 12857 res = a - b; 12858 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 12859 if (a & 0x80) 12860 res = 0x80; 12861 else 12862 res = 0x7f; 12863 } 12864 return res; 12865 } 12866 12867 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 12868 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 12869 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 12870 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 12871 #define PFX q 12872 12873 #include "op_addsub.h" 12874 12875 /* Unsigned saturating arithmetic. */ 12876 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 12877 { 12878 uint16_t res; 12879 res = a + b; 12880 if (res < a) 12881 res = 0xffff; 12882 return res; 12883 } 12884 12885 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 12886 { 12887 if (a > b) 12888 return a - b; 12889 else 12890 return 0; 12891 } 12892 12893 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 12894 { 12895 uint8_t res; 12896 res = a + b; 12897 if (res < a) 12898 res = 0xff; 12899 return res; 12900 } 12901 12902 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 12903 { 12904 if (a > b) 12905 return a - b; 12906 else 12907 return 0; 12908 } 12909 12910 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 12911 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 12912 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 12913 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 12914 #define PFX uq 12915 12916 #include "op_addsub.h" 12917 12918 /* Signed modulo arithmetic. */ 12919 #define SARITH16(a, b, n, op) do { \ 12920 int32_t sum; \ 12921 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 12922 RESULT(sum, n, 16); \ 12923 if (sum >= 0) \ 12924 ge |= 3 << (n * 2); \ 12925 } while(0) 12926 12927 #define SARITH8(a, b, n, op) do { \ 12928 int32_t sum; \ 12929 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 12930 RESULT(sum, n, 8); \ 12931 if (sum >= 0) \ 12932 ge |= 1 << n; \ 12933 } while(0) 12934 12935 12936 #define ADD16(a, b, n) SARITH16(a, b, n, +) 12937 #define SUB16(a, b, n) SARITH16(a, b, n, -) 12938 #define ADD8(a, b, n) SARITH8(a, b, n, +) 12939 #define SUB8(a, b, n) SARITH8(a, b, n, -) 12940 #define PFX s 12941 #define ARITH_GE 12942 12943 #include "op_addsub.h" 12944 12945 /* Unsigned modulo arithmetic. */ 12946 #define ADD16(a, b, n) do { \ 12947 uint32_t sum; \ 12948 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 12949 RESULT(sum, n, 16); \ 12950 if ((sum >> 16) == 1) \ 12951 ge |= 3 << (n * 2); \ 12952 } while(0) 12953 12954 #define ADD8(a, b, n) do { \ 12955 uint32_t sum; \ 12956 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 12957 RESULT(sum, n, 8); \ 12958 if ((sum >> 8) == 1) \ 12959 ge |= 1 << n; \ 12960 } while(0) 12961 12962 #define SUB16(a, b, n) do { \ 12963 uint32_t sum; \ 12964 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 12965 RESULT(sum, n, 16); \ 12966 if ((sum >> 16) == 0) \ 12967 ge |= 3 << (n * 2); \ 12968 } while(0) 12969 12970 #define SUB8(a, b, n) do { \ 12971 uint32_t sum; \ 12972 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 12973 RESULT(sum, n, 8); \ 12974 if ((sum >> 8) == 0) \ 12975 ge |= 1 << n; \ 12976 } while(0) 12977 12978 #define PFX u 12979 #define ARITH_GE 12980 12981 #include "op_addsub.h" 12982 12983 /* Halved signed arithmetic. */ 12984 #define ADD16(a, b, n) \ 12985 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 12986 #define SUB16(a, b, n) \ 12987 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 12988 #define ADD8(a, b, n) \ 12989 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 12990 #define SUB8(a, b, n) \ 12991 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 12992 #define PFX sh 12993 12994 #include "op_addsub.h" 12995 12996 /* Halved unsigned arithmetic. */ 12997 #define ADD16(a, b, n) \ 12998 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 12999 #define SUB16(a, b, n) \ 13000 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 13001 #define ADD8(a, b, n) \ 13002 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 13003 #define SUB8(a, b, n) \ 13004 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 13005 #define PFX uh 13006 13007 #include "op_addsub.h" 13008 13009 static inline uint8_t do_usad(uint8_t a, uint8_t b) 13010 { 13011 if (a > b) 13012 return a - b; 13013 else 13014 return b - a; 13015 } 13016 13017 /* Unsigned sum of absolute byte differences. */ 13018 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 13019 { 13020 uint32_t sum; 13021 sum = do_usad(a, b); 13022 sum += do_usad(a >> 8, b >> 8); 13023 sum += do_usad(a >> 16, b >> 16); 13024 sum += do_usad(a >> 24, b >> 24); 13025 return sum; 13026 } 13027 13028 /* For ARMv6 SEL instruction. */ 13029 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 13030 { 13031 uint32_t mask; 13032 13033 mask = 0; 13034 if (flags & 1) 13035 mask |= 0xff; 13036 if (flags & 2) 13037 mask |= 0xff00; 13038 if (flags & 4) 13039 mask |= 0xff0000; 13040 if (flags & 8) 13041 mask |= 0xff000000; 13042 return (a & mask) | (b & ~mask); 13043 } 13044 13045 /* CRC helpers. 13046 * The upper bytes of val (above the number specified by 'bytes') must have 13047 * been zeroed out by the caller. 13048 */ 13049 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 13050 { 13051 uint8_t buf[4]; 13052 13053 stl_le_p(buf, val); 13054 13055 /* zlib crc32 converts the accumulator and output to one's complement. */ 13056 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 13057 } 13058 13059 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 13060 { 13061 uint8_t buf[4]; 13062 13063 stl_le_p(buf, val); 13064 13065 /* Linux crc32c converts the output to one's complement. */ 13066 return crc32c(acc, buf, bytes) ^ 0xffffffff; 13067 } 13068 13069 /* Return the exception level to which FP-disabled exceptions should 13070 * be taken, or 0 if FP is enabled. 13071 */ 13072 int fp_exception_el(CPUARMState *env, int cur_el) 13073 { 13074 #ifndef CONFIG_USER_ONLY 13075 uint64_t hcr_el2; 13076 13077 /* CPACR and the CPTR registers don't exist before v6, so FP is 13078 * always accessible 13079 */ 13080 if (!arm_feature(env, ARM_FEATURE_V6)) { 13081 return 0; 13082 } 13083 13084 if (arm_feature(env, ARM_FEATURE_M)) { 13085 /* CPACR can cause a NOCP UsageFault taken to current security state */ 13086 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { 13087 return 1; 13088 } 13089 13090 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { 13091 if (!extract32(env->v7m.nsacr, 10, 1)) { 13092 /* FP insns cause a NOCP UsageFault taken to Secure */ 13093 return 3; 13094 } 13095 } 13096 13097 return 0; 13098 } 13099 13100 hcr_el2 = arm_hcr_el2_eff(env); 13101 13102 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 13103 * 0, 2 : trap EL0 and EL1/PL1 accesses 13104 * 1 : trap only EL0 accesses 13105 * 3 : trap no accesses 13106 * This register is ignored if E2H+TGE are both set. 13107 */ 13108 if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 13109 int fpen = extract32(env->cp15.cpacr_el1, 20, 2); 13110 13111 switch (fpen) { 13112 case 0: 13113 case 2: 13114 if (cur_el == 0 || cur_el == 1) { 13115 /* Trap to PL1, which might be EL1 or EL3 */ 13116 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 13117 return 3; 13118 } 13119 return 1; 13120 } 13121 if (cur_el == 3 && !is_a64(env)) { 13122 /* Secure PL1 running at EL3 */ 13123 return 3; 13124 } 13125 break; 13126 case 1: 13127 if (cur_el == 0) { 13128 return 1; 13129 } 13130 break; 13131 case 3: 13132 break; 13133 } 13134 } 13135 13136 /* 13137 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode 13138 * to control non-secure access to the FPU. It doesn't have any 13139 * effect if EL3 is AArch64 or if EL3 doesn't exist at all. 13140 */ 13141 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 13142 cur_el <= 2 && !arm_is_secure_below_el3(env))) { 13143 if (!extract32(env->cp15.nsacr, 10, 1)) { 13144 /* FP insns act as UNDEF */ 13145 return cur_el == 2 ? 2 : 1; 13146 } 13147 } 13148 13149 /* 13150 * CPTR_EL2 is present in v7VE or v8, and changes format 13151 * with HCR_EL2.E2H (regardless of TGE). 13152 */ 13153 if (cur_el <= 2) { 13154 if (hcr_el2 & HCR_E2H) { 13155 /* Check CPTR_EL2.FPEN. */ 13156 switch (extract32(env->cp15.cptr_el[2], 20, 2)) { 13157 case 1: 13158 if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) { 13159 break; 13160 } 13161 /* fall through */ 13162 case 0: 13163 case 2: 13164 return 2; 13165 } 13166 } else if (arm_is_el2_enabled(env)) { 13167 if (env->cp15.cptr_el[2] & CPTR_TFP) { 13168 return 2; 13169 } 13170 } 13171 } 13172 13173 /* CPTR_EL3 : present in v8 */ 13174 if (env->cp15.cptr_el[3] & CPTR_TFP) { 13175 /* Trap all FP ops to EL3 */ 13176 return 3; 13177 } 13178 #endif 13179 return 0; 13180 } 13181 13182 /* Return the exception level we're running at if this is our mmu_idx */ 13183 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 13184 { 13185 if (mmu_idx & ARM_MMU_IDX_M) { 13186 return mmu_idx & ARM_MMU_IDX_M_PRIV; 13187 } 13188 13189 switch (mmu_idx) { 13190 case ARMMMUIdx_E10_0: 13191 case ARMMMUIdx_E20_0: 13192 case ARMMMUIdx_SE10_0: 13193 case ARMMMUIdx_SE20_0: 13194 return 0; 13195 case ARMMMUIdx_E10_1: 13196 case ARMMMUIdx_E10_1_PAN: 13197 case ARMMMUIdx_SE10_1: 13198 case ARMMMUIdx_SE10_1_PAN: 13199 return 1; 13200 case ARMMMUIdx_E2: 13201 case ARMMMUIdx_E20_2: 13202 case ARMMMUIdx_E20_2_PAN: 13203 case ARMMMUIdx_SE2: 13204 case ARMMMUIdx_SE20_2: 13205 case ARMMMUIdx_SE20_2_PAN: 13206 return 2; 13207 case ARMMMUIdx_SE3: 13208 return 3; 13209 default: 13210 g_assert_not_reached(); 13211 } 13212 } 13213 13214 #ifndef CONFIG_TCG 13215 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 13216 { 13217 g_assert_not_reached(); 13218 } 13219 #endif 13220 13221 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) 13222 { 13223 ARMMMUIdx idx; 13224 uint64_t hcr; 13225 13226 if (arm_feature(env, ARM_FEATURE_M)) { 13227 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 13228 } 13229 13230 /* See ARM pseudo-function ELIsInHost. */ 13231 switch (el) { 13232 case 0: 13233 hcr = arm_hcr_el2_eff(env); 13234 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 13235 idx = ARMMMUIdx_E20_0; 13236 } else { 13237 idx = ARMMMUIdx_E10_0; 13238 } 13239 break; 13240 case 1: 13241 if (env->pstate & PSTATE_PAN) { 13242 idx = ARMMMUIdx_E10_1_PAN; 13243 } else { 13244 idx = ARMMMUIdx_E10_1; 13245 } 13246 break; 13247 case 2: 13248 /* Note that TGE does not apply at EL2. */ 13249 if (arm_hcr_el2_eff(env) & HCR_E2H) { 13250 if (env->pstate & PSTATE_PAN) { 13251 idx = ARMMMUIdx_E20_2_PAN; 13252 } else { 13253 idx = ARMMMUIdx_E20_2; 13254 } 13255 } else { 13256 idx = ARMMMUIdx_E2; 13257 } 13258 break; 13259 case 3: 13260 return ARMMMUIdx_SE3; 13261 default: 13262 g_assert_not_reached(); 13263 } 13264 13265 if (arm_is_secure_below_el3(env)) { 13266 idx &= ~ARM_MMU_IDX_A_NS; 13267 } 13268 13269 return idx; 13270 } 13271 13272 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 13273 { 13274 return arm_mmu_idx_el(env, arm_current_el(env)); 13275 } 13276 13277 #ifndef CONFIG_USER_ONLY 13278 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 13279 { 13280 return stage_1_mmu_idx(arm_mmu_idx(env)); 13281 } 13282 #endif 13283 13284 static CPUARMTBFlags rebuild_hflags_common(CPUARMState *env, int fp_el, 13285 ARMMMUIdx mmu_idx, 13286 CPUARMTBFlags flags) 13287 { 13288 DP_TBFLAG_ANY(flags, FPEXC_EL, fp_el); 13289 DP_TBFLAG_ANY(flags, MMUIDX, arm_to_core_mmu_idx(mmu_idx)); 13290 13291 if (arm_singlestep_active(env)) { 13292 DP_TBFLAG_ANY(flags, SS_ACTIVE, 1); 13293 } 13294 return flags; 13295 } 13296 13297 static CPUARMTBFlags rebuild_hflags_common_32(CPUARMState *env, int fp_el, 13298 ARMMMUIdx mmu_idx, 13299 CPUARMTBFlags flags) 13300 { 13301 bool sctlr_b = arm_sctlr_b(env); 13302 13303 if (sctlr_b) { 13304 DP_TBFLAG_A32(flags, SCTLR__B, 1); 13305 } 13306 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { 13307 DP_TBFLAG_ANY(flags, BE_DATA, 1); 13308 } 13309 DP_TBFLAG_A32(flags, NS, !access_secure_reg(env)); 13310 13311 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 13312 } 13313 13314 static CPUARMTBFlags rebuild_hflags_m32(CPUARMState *env, int fp_el, 13315 ARMMMUIdx mmu_idx) 13316 { 13317 CPUARMTBFlags flags = {}; 13318 uint32_t ccr = env->v7m.ccr[env->v7m.secure]; 13319 13320 /* Without HaveMainExt, CCR.UNALIGN_TRP is RES1. */ 13321 if (ccr & R_V7M_CCR_UNALIGN_TRP_MASK) { 13322 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); 13323 } 13324 13325 if (arm_v7m_is_handler_mode(env)) { 13326 DP_TBFLAG_M32(flags, HANDLER, 1); 13327 } 13328 13329 /* 13330 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN 13331 * is suppressing them because the requested execution priority 13332 * is less than 0. 13333 */ 13334 if (arm_feature(env, ARM_FEATURE_V8) && 13335 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 13336 (ccr & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 13337 DP_TBFLAG_M32(flags, STACKCHECK, 1); 13338 } 13339 13340 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 13341 } 13342 13343 static CPUARMTBFlags rebuild_hflags_aprofile(CPUARMState *env) 13344 { 13345 CPUARMTBFlags flags = {}; 13346 13347 DP_TBFLAG_ANY(flags, DEBUG_TARGET_EL, arm_debug_target_el(env)); 13348 return flags; 13349 } 13350 13351 static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el, 13352 ARMMMUIdx mmu_idx) 13353 { 13354 CPUARMTBFlags flags = rebuild_hflags_aprofile(env); 13355 int el = arm_current_el(env); 13356 13357 if (arm_sctlr(env, el) & SCTLR_A) { 13358 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); 13359 } 13360 13361 if (arm_el_is_aa64(env, 1)) { 13362 DP_TBFLAG_A32(flags, VFPEN, 1); 13363 } 13364 13365 if (el < 2 && env->cp15.hstr_el2 && 13366 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 13367 DP_TBFLAG_A32(flags, HSTR_ACTIVE, 1); 13368 } 13369 13370 if (env->uncached_cpsr & CPSR_IL) { 13371 DP_TBFLAG_ANY(flags, PSTATE__IL, 1); 13372 } 13373 13374 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 13375 } 13376 13377 static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, 13378 ARMMMUIdx mmu_idx) 13379 { 13380 CPUARMTBFlags flags = rebuild_hflags_aprofile(env); 13381 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); 13382 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 13383 uint64_t sctlr; 13384 int tbii, tbid; 13385 13386 DP_TBFLAG_ANY(flags, AARCH64_STATE, 1); 13387 13388 /* Get control bits for tagged addresses. */ 13389 tbid = aa64_va_parameter_tbi(tcr, mmu_idx); 13390 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); 13391 13392 DP_TBFLAG_A64(flags, TBII, tbii); 13393 DP_TBFLAG_A64(flags, TBID, tbid); 13394 13395 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 13396 int sve_el = sve_exception_el(env, el); 13397 uint32_t zcr_len; 13398 13399 /* 13400 * If SVE is disabled, but FP is enabled, 13401 * then the effective len is 0. 13402 */ 13403 if (sve_el != 0 && fp_el == 0) { 13404 zcr_len = 0; 13405 } else { 13406 zcr_len = sve_zcr_len_for_el(env, el); 13407 } 13408 DP_TBFLAG_A64(flags, SVEEXC_EL, sve_el); 13409 DP_TBFLAG_A64(flags, ZCR_LEN, zcr_len); 13410 } 13411 13412 sctlr = regime_sctlr(env, stage1); 13413 13414 if (sctlr & SCTLR_A) { 13415 DP_TBFLAG_ANY(flags, ALIGN_MEM, 1); 13416 } 13417 13418 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { 13419 DP_TBFLAG_ANY(flags, BE_DATA, 1); 13420 } 13421 13422 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { 13423 /* 13424 * In order to save space in flags, we record only whether 13425 * pauth is "inactive", meaning all insns are implemented as 13426 * a nop, or "active" when some action must be performed. 13427 * The decision of which action to take is left to a helper. 13428 */ 13429 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { 13430 DP_TBFLAG_A64(flags, PAUTH_ACTIVE, 1); 13431 } 13432 } 13433 13434 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 13435 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ 13436 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { 13437 DP_TBFLAG_A64(flags, BT, 1); 13438 } 13439 } 13440 13441 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ 13442 if (!(env->pstate & PSTATE_UAO)) { 13443 switch (mmu_idx) { 13444 case ARMMMUIdx_E10_1: 13445 case ARMMMUIdx_E10_1_PAN: 13446 case ARMMMUIdx_SE10_1: 13447 case ARMMMUIdx_SE10_1_PAN: 13448 /* TODO: ARMv8.3-NV */ 13449 DP_TBFLAG_A64(flags, UNPRIV, 1); 13450 break; 13451 case ARMMMUIdx_E20_2: 13452 case ARMMMUIdx_E20_2_PAN: 13453 case ARMMMUIdx_SE20_2: 13454 case ARMMMUIdx_SE20_2_PAN: 13455 /* 13456 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is 13457 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. 13458 */ 13459 if (env->cp15.hcr_el2 & HCR_TGE) { 13460 DP_TBFLAG_A64(flags, UNPRIV, 1); 13461 } 13462 break; 13463 default: 13464 break; 13465 } 13466 } 13467 13468 if (env->pstate & PSTATE_IL) { 13469 DP_TBFLAG_ANY(flags, PSTATE__IL, 1); 13470 } 13471 13472 if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { 13473 /* 13474 * Set MTE_ACTIVE if any access may be Checked, and leave clear 13475 * if all accesses must be Unchecked: 13476 * 1) If no TBI, then there are no tags in the address to check, 13477 * 2) If Tag Check Override, then all accesses are Unchecked, 13478 * 3) If Tag Check Fail == 0, then Checked access have no effect, 13479 * 4) If no Allocation Tag Access, then all accesses are Unchecked. 13480 */ 13481 if (allocation_tag_access_enabled(env, el, sctlr)) { 13482 DP_TBFLAG_A64(flags, ATA, 1); 13483 if (tbid 13484 && !(env->pstate & PSTATE_TCO) 13485 && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) { 13486 DP_TBFLAG_A64(flags, MTE_ACTIVE, 1); 13487 } 13488 } 13489 /* And again for unprivileged accesses, if required. */ 13490 if (EX_TBFLAG_A64(flags, UNPRIV) 13491 && tbid 13492 && !(env->pstate & PSTATE_TCO) 13493 && (sctlr & SCTLR_TCF0) 13494 && allocation_tag_access_enabled(env, 0, sctlr)) { 13495 DP_TBFLAG_A64(flags, MTE0_ACTIVE, 1); 13496 } 13497 /* Cache TCMA as well as TBI. */ 13498 DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); 13499 } 13500 13501 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 13502 } 13503 13504 static CPUARMTBFlags rebuild_hflags_internal(CPUARMState *env) 13505 { 13506 int el = arm_current_el(env); 13507 int fp_el = fp_exception_el(env, el); 13508 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13509 13510 if (is_a64(env)) { 13511 return rebuild_hflags_a64(env, el, fp_el, mmu_idx); 13512 } else if (arm_feature(env, ARM_FEATURE_M)) { 13513 return rebuild_hflags_m32(env, fp_el, mmu_idx); 13514 } else { 13515 return rebuild_hflags_a32(env, fp_el, mmu_idx); 13516 } 13517 } 13518 13519 void arm_rebuild_hflags(CPUARMState *env) 13520 { 13521 env->hflags = rebuild_hflags_internal(env); 13522 } 13523 13524 /* 13525 * If we have triggered a EL state change we can't rely on the 13526 * translator having passed it to us, we need to recompute. 13527 */ 13528 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env) 13529 { 13530 int el = arm_current_el(env); 13531 int fp_el = fp_exception_el(env, el); 13532 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13533 13534 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 13535 } 13536 13537 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) 13538 { 13539 int fp_el = fp_exception_el(env, el); 13540 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13541 13542 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 13543 } 13544 13545 /* 13546 * If we have triggered a EL state change we can't rely on the 13547 * translator having passed it to us, we need to recompute. 13548 */ 13549 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) 13550 { 13551 int el = arm_current_el(env); 13552 int fp_el = fp_exception_el(env, el); 13553 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13554 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 13555 } 13556 13557 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) 13558 { 13559 int fp_el = fp_exception_el(env, el); 13560 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13561 13562 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 13563 } 13564 13565 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) 13566 { 13567 int fp_el = fp_exception_el(env, el); 13568 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 13569 13570 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); 13571 } 13572 13573 static inline void assert_hflags_rebuild_correctly(CPUARMState *env) 13574 { 13575 #ifdef CONFIG_DEBUG_TCG 13576 CPUARMTBFlags c = env->hflags; 13577 CPUARMTBFlags r = rebuild_hflags_internal(env); 13578 13579 if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) { 13580 fprintf(stderr, "TCG hflags mismatch " 13581 "(current:(0x%08x,0x" TARGET_FMT_lx ")" 13582 " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n", 13583 c.flags, c.flags2, r.flags, r.flags2); 13584 abort(); 13585 } 13586 #endif 13587 } 13588 13589 static bool mve_no_pred(CPUARMState *env) 13590 { 13591 /* 13592 * Return true if there is definitely no predication of MVE 13593 * instructions by VPR or LTPSIZE. (Returning false even if there 13594 * isn't any predication is OK; generated code will just be 13595 * a little worse.) 13596 * If the CPU does not implement MVE then this TB flag is always 0. 13597 * 13598 * NOTE: if you change this logic, the "recalculate s->mve_no_pred" 13599 * logic in gen_update_fp_context() needs to be updated to match. 13600 * 13601 * We do not include the effect of the ECI bits here -- they are 13602 * tracked in other TB flags. This simplifies the logic for 13603 * "when did we emit code that changes the MVE_NO_PRED TB flag 13604 * and thus need to end the TB?". 13605 */ 13606 if (cpu_isar_feature(aa32_mve, env_archcpu(env))) { 13607 return false; 13608 } 13609 if (env->v7m.vpr) { 13610 return false; 13611 } 13612 if (env->v7m.ltpsize < 4) { 13613 return false; 13614 } 13615 return true; 13616 } 13617 13618 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 13619 target_ulong *cs_base, uint32_t *pflags) 13620 { 13621 CPUARMTBFlags flags; 13622 13623 assert_hflags_rebuild_correctly(env); 13624 flags = env->hflags; 13625 13626 if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { 13627 *pc = env->pc; 13628 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 13629 DP_TBFLAG_A64(flags, BTYPE, env->btype); 13630 } 13631 } else { 13632 *pc = env->regs[15]; 13633 13634 if (arm_feature(env, ARM_FEATURE_M)) { 13635 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 13636 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) 13637 != env->v7m.secure) { 13638 DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1); 13639 } 13640 13641 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && 13642 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || 13643 (env->v7m.secure && 13644 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { 13645 /* 13646 * ASPEN is set, but FPCA/SFPA indicate that there is no 13647 * active FP context; we must create a new FP context before 13648 * executing any FP insn. 13649 */ 13650 DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1); 13651 } 13652 13653 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 13654 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { 13655 DP_TBFLAG_M32(flags, LSPACT, 1); 13656 } 13657 13658 if (mve_no_pred(env)) { 13659 DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); 13660 } 13661 } else { 13662 /* 13663 * Note that XSCALE_CPAR shares bits with VECSTRIDE. 13664 * Note that VECLEN+VECSTRIDE are RES0 for M-profile. 13665 */ 13666 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 13667 DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); 13668 } else { 13669 DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); 13670 DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); 13671 } 13672 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { 13673 DP_TBFLAG_A32(flags, VFPEN, 1); 13674 } 13675 } 13676 13677 DP_TBFLAG_AM32(flags, THUMB, env->thumb); 13678 DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); 13679 } 13680 13681 /* 13682 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 13683 * states defined in the ARM ARM for software singlestep: 13684 * SS_ACTIVE PSTATE.SS State 13685 * 0 x Inactive (the TB flag for SS is always 0) 13686 * 1 0 Active-pending 13687 * 1 1 Active-not-pending 13688 * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. 13689 */ 13690 if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { 13691 DP_TBFLAG_ANY(flags, PSTATE__SS, 1); 13692 } 13693 13694 *pflags = flags.flags; 13695 *cs_base = flags.flags2; 13696 } 13697 13698 #ifdef TARGET_AARCH64 13699 /* 13700 * The manual says that when SVE is enabled and VQ is widened the 13701 * implementation is allowed to zero the previously inaccessible 13702 * portion of the registers. The corollary to that is that when 13703 * SVE is enabled and VQ is narrowed we are also allowed to zero 13704 * the now inaccessible portion of the registers. 13705 * 13706 * The intent of this is that no predicate bit beyond VQ is ever set. 13707 * Which means that some operations on predicate registers themselves 13708 * may operate on full uint64_t or even unrolled across the maximum 13709 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 13710 * may well be cheaper than conditionals to restrict the operation 13711 * to the relevant portion of a uint16_t[16]. 13712 */ 13713 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 13714 { 13715 int i, j; 13716 uint64_t pmask; 13717 13718 assert(vq >= 1 && vq <= ARM_MAX_VQ); 13719 assert(vq <= env_archcpu(env)->sve_max_vq); 13720 13721 /* Zap the high bits of the zregs. */ 13722 for (i = 0; i < 32; i++) { 13723 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 13724 } 13725 13726 /* Zap the high bits of the pregs and ffr. */ 13727 pmask = 0; 13728 if (vq & 3) { 13729 pmask = ~(-1ULL << (16 * (vq & 3))); 13730 } 13731 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 13732 for (i = 0; i < 17; ++i) { 13733 env->vfp.pregs[i].p[j] &= pmask; 13734 } 13735 pmask = 0; 13736 } 13737 } 13738 13739 /* 13740 * Notice a change in SVE vector size when changing EL. 13741 */ 13742 void aarch64_sve_change_el(CPUARMState *env, int old_el, 13743 int new_el, bool el0_a64) 13744 { 13745 ARMCPU *cpu = env_archcpu(env); 13746 int old_len, new_len; 13747 bool old_a64, new_a64; 13748 13749 /* Nothing to do if no SVE. */ 13750 if (!cpu_isar_feature(aa64_sve, cpu)) { 13751 return; 13752 } 13753 13754 /* Nothing to do if FP is disabled in either EL. */ 13755 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 13756 return; 13757 } 13758 13759 /* 13760 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 13761 * at ELx, or not available because the EL is in AArch32 state, then 13762 * for all purposes other than a direct read, the ZCR_ELx.LEN field 13763 * has an effective value of 0". 13764 * 13765 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 13766 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 13767 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 13768 * we already have the correct register contents when encountering the 13769 * vq0->vq0 transition between EL0->EL1. 13770 */ 13771 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 13772 old_len = (old_a64 && !sve_exception_el(env, old_el) 13773 ? sve_zcr_len_for_el(env, old_el) : 0); 13774 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 13775 new_len = (new_a64 && !sve_exception_el(env, new_el) 13776 ? sve_zcr_len_for_el(env, new_el) : 0); 13777 13778 /* When changing vector length, clear inaccessible state. */ 13779 if (new_len < old_len) { 13780 aarch64_sve_narrow_vq(env, new_len + 1); 13781 } 13782 } 13783 #endif 13784