1 /* 2 * ARM generic helpers. 3 * 4 * This code is licensed under the GNU GPL v2 or later. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/units.h" 11 #include "target/arm/idau.h" 12 #include "trace.h" 13 #include "cpu.h" 14 #include "internals.h" 15 #include "exec/gdbstub.h" 16 #include "exec/helper-proto.h" 17 #include "qemu/host-utils.h" 18 #include "qemu/main-loop.h" 19 #include "qemu/bitops.h" 20 #include "qemu/crc32c.h" 21 #include "qemu/qemu-print.h" 22 #include "exec/exec-all.h" 23 #include <zlib.h> /* For crc32 */ 24 #include "hw/irq.h" 25 #include "hw/semihosting/semihost.h" 26 #include "sysemu/cpus.h" 27 #include "sysemu/kvm.h" 28 #include "sysemu/tcg.h" 29 #include "qemu/range.h" 30 #include "qapi/qapi-commands-machine-target.h" 31 #include "qapi/error.h" 32 #include "qemu/guest-random.h" 33 #ifdef CONFIG_TCG 34 #include "arm_ldst.h" 35 #include "exec/cpu_ldst.h" 36 #endif 37 38 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 39 40 #ifndef CONFIG_USER_ONLY 41 42 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 43 MMUAccessType access_type, ARMMMUIdx mmu_idx, 44 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 45 target_ulong *page_size_ptr, 46 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 47 #endif 48 49 static void switch_mode(CPUARMState *env, int mode); 50 51 static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) 52 { 53 ARMCPU *cpu = env_archcpu(env); 54 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; 55 56 /* VFP data registers are always little-endian. */ 57 if (reg < nregs) { 58 return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg)); 59 } 60 if (arm_feature(env, ARM_FEATURE_NEON)) { 61 /* Aliases for Q regs. */ 62 nregs += 16; 63 if (reg < nregs) { 64 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 65 return gdb_get_reg128(buf, q[0], q[1]); 66 } 67 } 68 switch (reg - nregs) { 69 case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break; 70 case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break; 71 case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break; 72 } 73 return 0; 74 } 75 76 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 77 { 78 ARMCPU *cpu = env_archcpu(env); 79 int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; 80 81 if (reg < nregs) { 82 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 83 return 8; 84 } 85 if (arm_feature(env, ARM_FEATURE_NEON)) { 86 nregs += 16; 87 if (reg < nregs) { 88 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 89 q[0] = ldq_le_p(buf); 90 q[1] = ldq_le_p(buf + 8); 91 return 16; 92 } 93 } 94 switch (reg - nregs) { 95 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 96 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; 97 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 98 } 99 return 0; 100 } 101 102 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) 103 { 104 switch (reg) { 105 case 0 ... 31: 106 { 107 /* 128 bit FP register - quads are in LE order */ 108 uint64_t *q = aa64_vfp_qreg(env, reg); 109 return gdb_get_reg128(buf, q[1], q[0]); 110 } 111 case 32: 112 /* FPSR */ 113 return gdb_get_reg32(buf, vfp_get_fpsr(env)); 114 case 33: 115 /* FPCR */ 116 return gdb_get_reg32(buf,vfp_get_fpcr(env)); 117 default: 118 return 0; 119 } 120 } 121 122 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 123 { 124 switch (reg) { 125 case 0 ... 31: 126 /* 128 bit FP register */ 127 { 128 uint64_t *q = aa64_vfp_qreg(env, reg); 129 q[0] = ldq_le_p(buf); 130 q[1] = ldq_le_p(buf + 8); 131 return 16; 132 } 133 case 32: 134 /* FPSR */ 135 vfp_set_fpsr(env, ldl_p(buf)); 136 return 4; 137 case 33: 138 /* FPCR */ 139 vfp_set_fpcr(env, ldl_p(buf)); 140 return 4; 141 default: 142 return 0; 143 } 144 } 145 146 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 147 { 148 assert(ri->fieldoffset); 149 if (cpreg_field_is_64bit(ri)) { 150 return CPREG_FIELD64(env, ri); 151 } else { 152 return CPREG_FIELD32(env, ri); 153 } 154 } 155 156 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 157 uint64_t value) 158 { 159 assert(ri->fieldoffset); 160 if (cpreg_field_is_64bit(ri)) { 161 CPREG_FIELD64(env, ri) = value; 162 } else { 163 CPREG_FIELD32(env, ri) = value; 164 } 165 } 166 167 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 168 { 169 return (char *)env + ri->fieldoffset; 170 } 171 172 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 173 { 174 /* Raw read of a coprocessor register (as needed for migration, etc). */ 175 if (ri->type & ARM_CP_CONST) { 176 return ri->resetvalue; 177 } else if (ri->raw_readfn) { 178 return ri->raw_readfn(env, ri); 179 } else if (ri->readfn) { 180 return ri->readfn(env, ri); 181 } else { 182 return raw_read(env, ri); 183 } 184 } 185 186 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 187 uint64_t v) 188 { 189 /* Raw write of a coprocessor register (as needed for migration, etc). 190 * Note that constant registers are treated as write-ignored; the 191 * caller should check for success by whether a readback gives the 192 * value written. 193 */ 194 if (ri->type & ARM_CP_CONST) { 195 return; 196 } else if (ri->raw_writefn) { 197 ri->raw_writefn(env, ri, v); 198 } else if (ri->writefn) { 199 ri->writefn(env, ri, v); 200 } else { 201 raw_write(env, ri, v); 202 } 203 } 204 205 static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) 206 { 207 ARMCPU *cpu = env_archcpu(env); 208 const ARMCPRegInfo *ri; 209 uint32_t key; 210 211 key = cpu->dyn_xml.cpregs_keys[reg]; 212 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 213 if (ri) { 214 if (cpreg_field_is_64bit(ri)) { 215 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 216 } else { 217 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 218 } 219 } 220 return 0; 221 } 222 223 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 224 { 225 return 0; 226 } 227 228 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 229 { 230 /* Return true if the regdef would cause an assertion if you called 231 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 232 * program bug for it not to have the NO_RAW flag). 233 * NB that returning false here doesn't necessarily mean that calling 234 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 235 * read/write access functions which are safe for raw use" from "has 236 * read/write access functions which have side effects but has forgotten 237 * to provide raw access functions". 238 * The tests here line up with the conditions in read/write_raw_cp_reg() 239 * and assertions in raw_read()/raw_write(). 240 */ 241 if ((ri->type & ARM_CP_CONST) || 242 ri->fieldoffset || 243 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 244 return false; 245 } 246 return true; 247 } 248 249 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 250 { 251 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 252 int i; 253 bool ok = true; 254 255 for (i = 0; i < cpu->cpreg_array_len; i++) { 256 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 257 const ARMCPRegInfo *ri; 258 uint64_t newval; 259 260 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 261 if (!ri) { 262 ok = false; 263 continue; 264 } 265 if (ri->type & ARM_CP_NO_RAW) { 266 continue; 267 } 268 269 newval = read_raw_cp_reg(&cpu->env, ri); 270 if (kvm_sync) { 271 /* 272 * Only sync if the previous list->cpustate sync succeeded. 273 * Rather than tracking the success/failure state for every 274 * item in the list, we just recheck "does the raw write we must 275 * have made in write_list_to_cpustate() read back OK" here. 276 */ 277 uint64_t oldval = cpu->cpreg_values[i]; 278 279 if (oldval == newval) { 280 continue; 281 } 282 283 write_raw_cp_reg(&cpu->env, ri, oldval); 284 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 285 continue; 286 } 287 288 write_raw_cp_reg(&cpu->env, ri, newval); 289 } 290 cpu->cpreg_values[i] = newval; 291 } 292 return ok; 293 } 294 295 bool write_list_to_cpustate(ARMCPU *cpu) 296 { 297 int i; 298 bool ok = true; 299 300 for (i = 0; i < cpu->cpreg_array_len; i++) { 301 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 302 uint64_t v = cpu->cpreg_values[i]; 303 const ARMCPRegInfo *ri; 304 305 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 306 if (!ri) { 307 ok = false; 308 continue; 309 } 310 if (ri->type & ARM_CP_NO_RAW) { 311 continue; 312 } 313 /* Write value and confirm it reads back as written 314 * (to catch read-only registers and partially read-only 315 * registers where the incoming migration value doesn't match) 316 */ 317 write_raw_cp_reg(&cpu->env, ri, v); 318 if (read_raw_cp_reg(&cpu->env, ri) != v) { 319 ok = false; 320 } 321 } 322 return ok; 323 } 324 325 static void add_cpreg_to_list(gpointer key, gpointer opaque) 326 { 327 ARMCPU *cpu = opaque; 328 uint64_t regidx; 329 const ARMCPRegInfo *ri; 330 331 regidx = *(uint32_t *)key; 332 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 333 334 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 335 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 336 /* The value array need not be initialized at this point */ 337 cpu->cpreg_array_len++; 338 } 339 } 340 341 static void count_cpreg(gpointer key, gpointer opaque) 342 { 343 ARMCPU *cpu = opaque; 344 uint64_t regidx; 345 const ARMCPRegInfo *ri; 346 347 regidx = *(uint32_t *)key; 348 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 349 350 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 351 cpu->cpreg_array_len++; 352 } 353 } 354 355 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 356 { 357 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 358 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 359 360 if (aidx > bidx) { 361 return 1; 362 } 363 if (aidx < bidx) { 364 return -1; 365 } 366 return 0; 367 } 368 369 void init_cpreg_list(ARMCPU *cpu) 370 { 371 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 372 * Note that we require cpreg_tuples[] to be sorted by key ID. 373 */ 374 GList *keys; 375 int arraylen; 376 377 keys = g_hash_table_get_keys(cpu->cp_regs); 378 keys = g_list_sort(keys, cpreg_key_compare); 379 380 cpu->cpreg_array_len = 0; 381 382 g_list_foreach(keys, count_cpreg, cpu); 383 384 arraylen = cpu->cpreg_array_len; 385 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 386 cpu->cpreg_values = g_new(uint64_t, arraylen); 387 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 388 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 389 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 390 cpu->cpreg_array_len = 0; 391 392 g_list_foreach(keys, add_cpreg_to_list, cpu); 393 394 assert(cpu->cpreg_array_len == arraylen); 395 396 g_list_free(keys); 397 } 398 399 /* 400 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 401 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 402 * 403 * access_el3_aa32ns: Used to check AArch32 register views. 404 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 405 */ 406 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 407 const ARMCPRegInfo *ri, 408 bool isread) 409 { 410 bool secure = arm_is_secure_below_el3(env); 411 412 assert(!arm_el_is_aa64(env, 3)); 413 if (secure) { 414 return CP_ACCESS_TRAP_UNCATEGORIZED; 415 } 416 return CP_ACCESS_OK; 417 } 418 419 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 420 const ARMCPRegInfo *ri, 421 bool isread) 422 { 423 if (!arm_el_is_aa64(env, 3)) { 424 return access_el3_aa32ns(env, ri, isread); 425 } 426 return CP_ACCESS_OK; 427 } 428 429 /* Some secure-only AArch32 registers trap to EL3 if used from 430 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 431 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 432 * We assume that the .access field is set to PL1_RW. 433 */ 434 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 435 const ARMCPRegInfo *ri, 436 bool isread) 437 { 438 if (arm_current_el(env) == 3) { 439 return CP_ACCESS_OK; 440 } 441 if (arm_is_secure_below_el3(env)) { 442 return CP_ACCESS_TRAP_EL3; 443 } 444 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 445 return CP_ACCESS_TRAP_UNCATEGORIZED; 446 } 447 448 /* Check for traps to "powerdown debug" registers, which are controlled 449 * by MDCR.TDOSA 450 */ 451 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 452 bool isread) 453 { 454 int el = arm_current_el(env); 455 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || 456 (env->cp15.mdcr_el2 & MDCR_TDE) || 457 (arm_hcr_el2_eff(env) & HCR_TGE); 458 459 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { 460 return CP_ACCESS_TRAP_EL2; 461 } 462 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 463 return CP_ACCESS_TRAP_EL3; 464 } 465 return CP_ACCESS_OK; 466 } 467 468 /* Check for traps to "debug ROM" registers, which are controlled 469 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 470 */ 471 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 472 bool isread) 473 { 474 int el = arm_current_el(env); 475 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || 476 (env->cp15.mdcr_el2 & MDCR_TDE) || 477 (arm_hcr_el2_eff(env) & HCR_TGE); 478 479 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { 480 return CP_ACCESS_TRAP_EL2; 481 } 482 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 483 return CP_ACCESS_TRAP_EL3; 484 } 485 return CP_ACCESS_OK; 486 } 487 488 /* Check for traps to general debug registers, which are controlled 489 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 490 */ 491 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 492 bool isread) 493 { 494 int el = arm_current_el(env); 495 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || 496 (env->cp15.mdcr_el2 & MDCR_TDE) || 497 (arm_hcr_el2_eff(env) & HCR_TGE); 498 499 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { 500 return CP_ACCESS_TRAP_EL2; 501 } 502 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 503 return CP_ACCESS_TRAP_EL3; 504 } 505 return CP_ACCESS_OK; 506 } 507 508 /* Check for traps to performance monitor registers, which are controlled 509 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 510 */ 511 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 512 bool isread) 513 { 514 int el = arm_current_el(env); 515 516 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 517 && !arm_is_secure_below_el3(env)) { 518 return CP_ACCESS_TRAP_EL2; 519 } 520 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 521 return CP_ACCESS_TRAP_EL3; 522 } 523 return CP_ACCESS_OK; 524 } 525 526 /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ 527 static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, 528 bool isread) 529 { 530 if (arm_current_el(env) == 1) { 531 uint64_t trap = isread ? HCR_TRVM : HCR_TVM; 532 if (arm_hcr_el2_eff(env) & trap) { 533 return CP_ACCESS_TRAP_EL2; 534 } 535 } 536 return CP_ACCESS_OK; 537 } 538 539 /* Check for traps from EL1 due to HCR_EL2.TSW. */ 540 static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri, 541 bool isread) 542 { 543 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) { 544 return CP_ACCESS_TRAP_EL2; 545 } 546 return CP_ACCESS_OK; 547 } 548 549 /* Check for traps from EL1 due to HCR_EL2.TACR. */ 550 static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, 551 bool isread) 552 { 553 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) { 554 return CP_ACCESS_TRAP_EL2; 555 } 556 return CP_ACCESS_OK; 557 } 558 559 /* Check for traps from EL1 due to HCR_EL2.TTLB. */ 560 static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, 561 bool isread) 562 { 563 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { 564 return CP_ACCESS_TRAP_EL2; 565 } 566 return CP_ACCESS_OK; 567 } 568 569 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 570 { 571 ARMCPU *cpu = env_archcpu(env); 572 573 raw_write(env, ri, value); 574 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 575 } 576 577 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 578 { 579 ARMCPU *cpu = env_archcpu(env); 580 581 if (raw_read(env, ri) != value) { 582 /* Unlike real hardware the qemu TLB uses virtual addresses, 583 * not modified virtual addresses, so this causes a TLB flush. 584 */ 585 tlb_flush(CPU(cpu)); 586 raw_write(env, ri, value); 587 } 588 } 589 590 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 591 uint64_t value) 592 { 593 ARMCPU *cpu = env_archcpu(env); 594 595 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 596 && !extended_addresses_enabled(env)) { 597 /* For VMSA (when not using the LPAE long descriptor page table 598 * format) this register includes the ASID, so do a TLB flush. 599 * For PMSA it is purely a process ID and no action is needed. 600 */ 601 tlb_flush(CPU(cpu)); 602 } 603 raw_write(env, ri, value); 604 } 605 606 /* IS variants of TLB operations must affect all cores */ 607 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 608 uint64_t value) 609 { 610 CPUState *cs = env_cpu(env); 611 612 tlb_flush_all_cpus_synced(cs); 613 } 614 615 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 616 uint64_t value) 617 { 618 CPUState *cs = env_cpu(env); 619 620 tlb_flush_all_cpus_synced(cs); 621 } 622 623 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 624 uint64_t value) 625 { 626 CPUState *cs = env_cpu(env); 627 628 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 629 } 630 631 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 632 uint64_t value) 633 { 634 CPUState *cs = env_cpu(env); 635 636 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 637 } 638 639 /* 640 * Non-IS variants of TLB operations are upgraded to 641 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to 642 * force broadcast of these operations. 643 */ 644 static bool tlb_force_broadcast(CPUARMState *env) 645 { 646 return (env->cp15.hcr_el2 & HCR_FB) && 647 arm_current_el(env) == 1 && arm_is_secure_below_el3(env); 648 } 649 650 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 651 uint64_t value) 652 { 653 /* Invalidate all (TLBIALL) */ 654 CPUState *cs = env_cpu(env); 655 656 if (tlb_force_broadcast(env)) { 657 tlb_flush_all_cpus_synced(cs); 658 } else { 659 tlb_flush(cs); 660 } 661 } 662 663 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 664 uint64_t value) 665 { 666 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 667 CPUState *cs = env_cpu(env); 668 669 value &= TARGET_PAGE_MASK; 670 if (tlb_force_broadcast(env)) { 671 tlb_flush_page_all_cpus_synced(cs, value); 672 } else { 673 tlb_flush_page(cs, value); 674 } 675 } 676 677 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 678 uint64_t value) 679 { 680 /* Invalidate by ASID (TLBIASID) */ 681 CPUState *cs = env_cpu(env); 682 683 if (tlb_force_broadcast(env)) { 684 tlb_flush_all_cpus_synced(cs); 685 } else { 686 tlb_flush(cs); 687 } 688 } 689 690 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 691 uint64_t value) 692 { 693 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 694 CPUState *cs = env_cpu(env); 695 696 value &= TARGET_PAGE_MASK; 697 if (tlb_force_broadcast(env)) { 698 tlb_flush_page_all_cpus_synced(cs, value); 699 } else { 700 tlb_flush_page(cs, value); 701 } 702 } 703 704 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 705 uint64_t value) 706 { 707 CPUState *cs = env_cpu(env); 708 709 tlb_flush_by_mmuidx(cs, 710 ARMMMUIdxBit_E10_1 | 711 ARMMMUIdxBit_E10_1_PAN | 712 ARMMMUIdxBit_E10_0 | 713 ARMMMUIdxBit_Stage2); 714 } 715 716 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 717 uint64_t value) 718 { 719 CPUState *cs = env_cpu(env); 720 721 tlb_flush_by_mmuidx_all_cpus_synced(cs, 722 ARMMMUIdxBit_E10_1 | 723 ARMMMUIdxBit_E10_1_PAN | 724 ARMMMUIdxBit_E10_0 | 725 ARMMMUIdxBit_Stage2); 726 } 727 728 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 729 uint64_t value) 730 { 731 /* Invalidate by IPA. This has to invalidate any structures that 732 * contain only stage 2 translation information, but does not need 733 * to apply to structures that contain combined stage 1 and stage 2 734 * translation information. 735 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 736 */ 737 CPUState *cs = env_cpu(env); 738 uint64_t pageaddr; 739 740 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 741 return; 742 } 743 744 pageaddr = sextract64(value << 12, 0, 40); 745 746 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); 747 } 748 749 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 750 uint64_t value) 751 { 752 CPUState *cs = env_cpu(env); 753 uint64_t pageaddr; 754 755 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 756 return; 757 } 758 759 pageaddr = sextract64(value << 12, 0, 40); 760 761 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 762 ARMMMUIdxBit_Stage2); 763 } 764 765 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 766 uint64_t value) 767 { 768 CPUState *cs = env_cpu(env); 769 770 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); 771 } 772 773 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 774 uint64_t value) 775 { 776 CPUState *cs = env_cpu(env); 777 778 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); 779 } 780 781 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 782 uint64_t value) 783 { 784 CPUState *cs = env_cpu(env); 785 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 786 787 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); 788 } 789 790 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 791 uint64_t value) 792 { 793 CPUState *cs = env_cpu(env); 794 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 795 796 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 797 ARMMMUIdxBit_E2); 798 } 799 800 static const ARMCPRegInfo cp_reginfo[] = { 801 /* Define the secure and non-secure FCSE identifier CP registers 802 * separately because there is no secure bank in V8 (no _EL3). This allows 803 * the secure register to be properly reset and migrated. There is also no 804 * v8 EL1 version of the register so the non-secure instance stands alone. 805 */ 806 { .name = "FCSEIDR", 807 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 808 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 809 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 810 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 811 { .name = "FCSEIDR_S", 812 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 813 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 814 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 815 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 816 /* Define the secure and non-secure context identifier CP registers 817 * separately because there is no secure bank in V8 (no _EL3). This allows 818 * the secure register to be properly reset and migrated. In the 819 * non-secure case, the 32-bit register will have reset and migration 820 * disabled during registration as it is handled by the 64-bit instance. 821 */ 822 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 823 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 824 .access = PL1_RW, .accessfn = access_tvm_trvm, 825 .secure = ARM_CP_SECSTATE_NS, 826 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 827 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 828 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 829 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 830 .access = PL1_RW, .accessfn = access_tvm_trvm, 831 .secure = ARM_CP_SECSTATE_S, 832 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 833 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 834 REGINFO_SENTINEL 835 }; 836 837 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 838 /* NB: Some of these registers exist in v8 but with more precise 839 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 840 */ 841 /* MMU Domain access control / MPU write buffer control */ 842 { .name = "DACR", 843 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 844 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 845 .writefn = dacr_write, .raw_writefn = raw_write, 846 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 847 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 848 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 849 * For v6 and v5, these mappings are overly broad. 850 */ 851 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 852 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 853 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 854 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 855 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 856 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 857 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 858 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 859 /* Cache maintenance ops; some of this space may be overridden later. */ 860 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 861 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 862 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 863 REGINFO_SENTINEL 864 }; 865 866 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 867 /* Not all pre-v6 cores implemented this WFI, so this is slightly 868 * over-broad. 869 */ 870 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 871 .access = PL1_W, .type = ARM_CP_WFI }, 872 REGINFO_SENTINEL 873 }; 874 875 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 876 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 877 * is UNPREDICTABLE; we choose to NOP as most implementations do). 878 */ 879 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 880 .access = PL1_W, .type = ARM_CP_WFI }, 881 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 882 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 883 * OMAPCP will override this space. 884 */ 885 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 886 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 887 .resetvalue = 0 }, 888 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 889 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 890 .resetvalue = 0 }, 891 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 892 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 893 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 894 .resetvalue = 0 }, 895 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 896 * implementing it as RAZ means the "debug architecture version" bits 897 * will read as a reserved value, which should cause Linux to not try 898 * to use the debug hardware. 899 */ 900 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 901 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 902 /* MMU TLB control. Note that the wildcarding means we cover not just 903 * the unified TLB ops but also the dside/iside/inner-shareable variants. 904 */ 905 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 906 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 907 .type = ARM_CP_NO_RAW }, 908 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 909 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 910 .type = ARM_CP_NO_RAW }, 911 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 912 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 913 .type = ARM_CP_NO_RAW }, 914 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 915 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 916 .type = ARM_CP_NO_RAW }, 917 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 918 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 919 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 920 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 921 REGINFO_SENTINEL 922 }; 923 924 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 925 uint64_t value) 926 { 927 uint32_t mask = 0; 928 929 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 930 if (!arm_feature(env, ARM_FEATURE_V8)) { 931 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 932 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 933 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 934 */ 935 if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { 936 /* VFP coprocessor: cp10 & cp11 [23:20] */ 937 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 938 939 if (!arm_feature(env, ARM_FEATURE_NEON)) { 940 /* ASEDIS [31] bit is RAO/WI */ 941 value |= (1 << 31); 942 } 943 944 /* VFPv3 and upwards with NEON implement 32 double precision 945 * registers (D0-D31). 946 */ 947 if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { 948 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 949 value |= (1 << 30); 950 } 951 } 952 value &= mask; 953 } 954 955 /* 956 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 957 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 958 */ 959 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 960 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 961 value &= ~(0xf << 20); 962 value |= env->cp15.cpacr_el1 & (0xf << 20); 963 } 964 965 env->cp15.cpacr_el1 = value; 966 } 967 968 static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) 969 { 970 /* 971 * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 972 * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. 973 */ 974 uint64_t value = env->cp15.cpacr_el1; 975 976 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 977 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 978 value &= ~(0xf << 20); 979 } 980 return value; 981 } 982 983 984 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 985 { 986 /* Call cpacr_write() so that we reset with the correct RAO bits set 987 * for our CPU features. 988 */ 989 cpacr_write(env, ri, 0); 990 } 991 992 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 993 bool isread) 994 { 995 if (arm_feature(env, ARM_FEATURE_V8)) { 996 /* Check if CPACR accesses are to be trapped to EL2 */ 997 if (arm_current_el(env) == 1 && 998 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 999 return CP_ACCESS_TRAP_EL2; 1000 /* Check if CPACR accesses are to be trapped to EL3 */ 1001 } else if (arm_current_el(env) < 3 && 1002 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 1003 return CP_ACCESS_TRAP_EL3; 1004 } 1005 } 1006 1007 return CP_ACCESS_OK; 1008 } 1009 1010 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 1011 bool isread) 1012 { 1013 /* Check if CPTR accesses are set to trap to EL3 */ 1014 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 1015 return CP_ACCESS_TRAP_EL3; 1016 } 1017 1018 return CP_ACCESS_OK; 1019 } 1020 1021 static const ARMCPRegInfo v6_cp_reginfo[] = { 1022 /* prefetch by MVA in v6, NOP in v7 */ 1023 { .name = "MVA_prefetch", 1024 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 1025 .access = PL1_W, .type = ARM_CP_NOP }, 1026 /* We need to break the TB after ISB to execute self-modifying code 1027 * correctly and also to take any pending interrupts immediately. 1028 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 1029 */ 1030 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 1031 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 1032 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 1033 .access = PL0_W, .type = ARM_CP_NOP }, 1034 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 1035 .access = PL0_W, .type = ARM_CP_NOP }, 1036 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 1037 .access = PL1_RW, .accessfn = access_tvm_trvm, 1038 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 1039 offsetof(CPUARMState, cp15.ifar_ns) }, 1040 .resetvalue = 0, }, 1041 /* Watchpoint Fault Address Register : should actually only be present 1042 * for 1136, 1176, 11MPCore. 1043 */ 1044 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 1045 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 1046 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 1047 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 1048 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 1049 .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, 1050 REGINFO_SENTINEL 1051 }; 1052 1053 /* Definitions for the PMU registers */ 1054 #define PMCRN_MASK 0xf800 1055 #define PMCRN_SHIFT 11 1056 #define PMCRLC 0x40 1057 #define PMCRDP 0x20 1058 #define PMCRX 0x10 1059 #define PMCRD 0x8 1060 #define PMCRC 0x4 1061 #define PMCRP 0x2 1062 #define PMCRE 0x1 1063 /* 1064 * Mask of PMCR bits writeable by guest (not including WO bits like C, P, 1065 * which can be written as 1 to trigger behaviour but which stay RAZ). 1066 */ 1067 #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) 1068 1069 #define PMXEVTYPER_P 0x80000000 1070 #define PMXEVTYPER_U 0x40000000 1071 #define PMXEVTYPER_NSK 0x20000000 1072 #define PMXEVTYPER_NSU 0x10000000 1073 #define PMXEVTYPER_NSH 0x08000000 1074 #define PMXEVTYPER_M 0x04000000 1075 #define PMXEVTYPER_MT 0x02000000 1076 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1077 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1078 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1079 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1080 PMXEVTYPER_EVTCOUNT) 1081 1082 #define PMCCFILTR 0xf8000000 1083 #define PMCCFILTR_M PMXEVTYPER_M 1084 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1085 1086 static inline uint32_t pmu_num_counters(CPUARMState *env) 1087 { 1088 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 1089 } 1090 1091 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1092 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1093 { 1094 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 1095 } 1096 1097 typedef struct pm_event { 1098 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 1099 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 1100 bool (*supported)(CPUARMState *); 1101 /* 1102 * Retrieve the current count of the underlying event. The programmed 1103 * counters hold a difference from the return value from this function 1104 */ 1105 uint64_t (*get_count)(CPUARMState *); 1106 /* 1107 * Return how many nanoseconds it will take (at a minimum) for count events 1108 * to occur. A negative value indicates the counter will never overflow, or 1109 * that the counter has otherwise arranged for the overflow bit to be set 1110 * and the PMU interrupt to be raised on overflow. 1111 */ 1112 int64_t (*ns_per_count)(uint64_t); 1113 } pm_event; 1114 1115 static bool event_always_supported(CPUARMState *env) 1116 { 1117 return true; 1118 } 1119 1120 static uint64_t swinc_get_count(CPUARMState *env) 1121 { 1122 /* 1123 * SW_INCR events are written directly to the pmevcntr's by writes to 1124 * PMSWINC, so there is no underlying count maintained by the PMU itself 1125 */ 1126 return 0; 1127 } 1128 1129 static int64_t swinc_ns_per(uint64_t ignored) 1130 { 1131 return -1; 1132 } 1133 1134 /* 1135 * Return the underlying cycle count for the PMU cycle counters. If we're in 1136 * usermode, simply return 0. 1137 */ 1138 static uint64_t cycles_get_count(CPUARMState *env) 1139 { 1140 #ifndef CONFIG_USER_ONLY 1141 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1142 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1143 #else 1144 return cpu_get_host_ticks(); 1145 #endif 1146 } 1147 1148 #ifndef CONFIG_USER_ONLY 1149 static int64_t cycles_ns_per(uint64_t cycles) 1150 { 1151 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 1152 } 1153 1154 static bool instructions_supported(CPUARMState *env) 1155 { 1156 return use_icount == 1 /* Precise instruction counting */; 1157 } 1158 1159 static uint64_t instructions_get_count(CPUARMState *env) 1160 { 1161 return (uint64_t)cpu_get_icount_raw(); 1162 } 1163 1164 static int64_t instructions_ns_per(uint64_t icount) 1165 { 1166 return cpu_icount_to_ns((int64_t)icount); 1167 } 1168 #endif 1169 1170 static bool pmu_8_1_events_supported(CPUARMState *env) 1171 { 1172 /* For events which are supported in any v8.1 PMU */ 1173 return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); 1174 } 1175 1176 static bool pmu_8_4_events_supported(CPUARMState *env) 1177 { 1178 /* For events which are supported in any v8.1 PMU */ 1179 return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); 1180 } 1181 1182 static uint64_t zero_event_get_count(CPUARMState *env) 1183 { 1184 /* For events which on QEMU never fire, so their count is always zero */ 1185 return 0; 1186 } 1187 1188 static int64_t zero_event_ns_per(uint64_t cycles) 1189 { 1190 /* An event which never fires can never overflow */ 1191 return -1; 1192 } 1193 1194 static const pm_event pm_events[] = { 1195 { .number = 0x000, /* SW_INCR */ 1196 .supported = event_always_supported, 1197 .get_count = swinc_get_count, 1198 .ns_per_count = swinc_ns_per, 1199 }, 1200 #ifndef CONFIG_USER_ONLY 1201 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 1202 .supported = instructions_supported, 1203 .get_count = instructions_get_count, 1204 .ns_per_count = instructions_ns_per, 1205 }, 1206 { .number = 0x011, /* CPU_CYCLES, Cycle */ 1207 .supported = event_always_supported, 1208 .get_count = cycles_get_count, 1209 .ns_per_count = cycles_ns_per, 1210 }, 1211 #endif 1212 { .number = 0x023, /* STALL_FRONTEND */ 1213 .supported = pmu_8_1_events_supported, 1214 .get_count = zero_event_get_count, 1215 .ns_per_count = zero_event_ns_per, 1216 }, 1217 { .number = 0x024, /* STALL_BACKEND */ 1218 .supported = pmu_8_1_events_supported, 1219 .get_count = zero_event_get_count, 1220 .ns_per_count = zero_event_ns_per, 1221 }, 1222 { .number = 0x03c, /* STALL */ 1223 .supported = pmu_8_4_events_supported, 1224 .get_count = zero_event_get_count, 1225 .ns_per_count = zero_event_ns_per, 1226 }, 1227 }; 1228 1229 /* 1230 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 1231 * events (i.e. the statistical profiling extension), this implementation 1232 * should first be updated to something sparse instead of the current 1233 * supported_event_map[] array. 1234 */ 1235 #define MAX_EVENT_ID 0x3c 1236 #define UNSUPPORTED_EVENT UINT16_MAX 1237 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 1238 1239 /* 1240 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 1241 * of ARM event numbers to indices in our pm_events array. 1242 * 1243 * Note: Events in the 0x40XX range are not currently supported. 1244 */ 1245 void pmu_init(ARMCPU *cpu) 1246 { 1247 unsigned int i; 1248 1249 /* 1250 * Empty supported_event_map and cpu->pmceid[01] before adding supported 1251 * events to them 1252 */ 1253 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 1254 supported_event_map[i] = UNSUPPORTED_EVENT; 1255 } 1256 cpu->pmceid0 = 0; 1257 cpu->pmceid1 = 0; 1258 1259 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 1260 const pm_event *cnt = &pm_events[i]; 1261 assert(cnt->number <= MAX_EVENT_ID); 1262 /* We do not currently support events in the 0x40xx range */ 1263 assert(cnt->number <= 0x3f); 1264 1265 if (cnt->supported(&cpu->env)) { 1266 supported_event_map[cnt->number] = i; 1267 uint64_t event_mask = 1ULL << (cnt->number & 0x1f); 1268 if (cnt->number & 0x20) { 1269 cpu->pmceid1 |= event_mask; 1270 } else { 1271 cpu->pmceid0 |= event_mask; 1272 } 1273 } 1274 } 1275 } 1276 1277 /* 1278 * Check at runtime whether a PMU event is supported for the current machine 1279 */ 1280 static bool event_supported(uint16_t number) 1281 { 1282 if (number > MAX_EVENT_ID) { 1283 return false; 1284 } 1285 return supported_event_map[number] != UNSUPPORTED_EVENT; 1286 } 1287 1288 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 1289 bool isread) 1290 { 1291 /* Performance monitor registers user accessibility is controlled 1292 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 1293 * trapping to EL2 or EL3 for other accesses. 1294 */ 1295 int el = arm_current_el(env); 1296 1297 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 1298 return CP_ACCESS_TRAP; 1299 } 1300 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 1301 && !arm_is_secure_below_el3(env)) { 1302 return CP_ACCESS_TRAP_EL2; 1303 } 1304 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 1305 return CP_ACCESS_TRAP_EL3; 1306 } 1307 1308 return CP_ACCESS_OK; 1309 } 1310 1311 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 1312 const ARMCPRegInfo *ri, 1313 bool isread) 1314 { 1315 /* ER: event counter read trap control */ 1316 if (arm_feature(env, ARM_FEATURE_V8) 1317 && arm_current_el(env) == 0 1318 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 1319 && isread) { 1320 return CP_ACCESS_OK; 1321 } 1322 1323 return pmreg_access(env, ri, isread); 1324 } 1325 1326 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1327 const ARMCPRegInfo *ri, 1328 bool isread) 1329 { 1330 /* SW: software increment write trap control */ 1331 if (arm_feature(env, ARM_FEATURE_V8) 1332 && arm_current_el(env) == 0 1333 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1334 && !isread) { 1335 return CP_ACCESS_OK; 1336 } 1337 1338 return pmreg_access(env, ri, isread); 1339 } 1340 1341 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1342 const ARMCPRegInfo *ri, 1343 bool isread) 1344 { 1345 /* ER: event counter read trap control */ 1346 if (arm_feature(env, ARM_FEATURE_V8) 1347 && arm_current_el(env) == 0 1348 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1349 return CP_ACCESS_OK; 1350 } 1351 1352 return pmreg_access(env, ri, isread); 1353 } 1354 1355 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1356 const ARMCPRegInfo *ri, 1357 bool isread) 1358 { 1359 /* CR: cycle counter read trap control */ 1360 if (arm_feature(env, ARM_FEATURE_V8) 1361 && arm_current_el(env) == 0 1362 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1363 && isread) { 1364 return CP_ACCESS_OK; 1365 } 1366 1367 return pmreg_access(env, ri, isread); 1368 } 1369 1370 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using 1371 * the current EL, security state, and register configuration. 1372 */ 1373 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 1374 { 1375 uint64_t filter; 1376 bool e, p, u, nsk, nsu, nsh, m; 1377 bool enabled, prohibited, filtered; 1378 bool secure = arm_is_secure(env); 1379 int el = arm_current_el(env); 1380 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; 1381 1382 if (!arm_feature(env, ARM_FEATURE_PMU)) { 1383 return false; 1384 } 1385 1386 if (!arm_feature(env, ARM_FEATURE_EL2) || 1387 (counter < hpmn || counter == 31)) { 1388 e = env->cp15.c9_pmcr & PMCRE; 1389 } else { 1390 e = env->cp15.mdcr_el2 & MDCR_HPME; 1391 } 1392 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 1393 1394 if (!secure) { 1395 if (el == 2 && (counter < hpmn || counter == 31)) { 1396 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; 1397 } else { 1398 prohibited = false; 1399 } 1400 } else { 1401 prohibited = arm_feature(env, ARM_FEATURE_EL3) && 1402 (env->cp15.mdcr_el3 & MDCR_SPME); 1403 } 1404 1405 if (prohibited && counter == 31) { 1406 prohibited = env->cp15.c9_pmcr & PMCRDP; 1407 } 1408 1409 if (counter == 31) { 1410 filter = env->cp15.pmccfiltr_el0; 1411 } else { 1412 filter = env->cp15.c14_pmevtyper[counter]; 1413 } 1414 1415 p = filter & PMXEVTYPER_P; 1416 u = filter & PMXEVTYPER_U; 1417 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1418 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1419 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1420 m = arm_el_is_aa64(env, 1) && 1421 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1422 1423 if (el == 0) { 1424 filtered = secure ? u : u != nsu; 1425 } else if (el == 1) { 1426 filtered = secure ? p : p != nsk; 1427 } else if (el == 2) { 1428 filtered = !nsh; 1429 } else { /* EL3 */ 1430 filtered = m != p; 1431 } 1432 1433 if (counter != 31) { 1434 /* 1435 * If not checking PMCCNTR, ensure the counter is setup to an event we 1436 * support 1437 */ 1438 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1439 if (!event_supported(event)) { 1440 return false; 1441 } 1442 } 1443 1444 return enabled && !prohibited && !filtered; 1445 } 1446 1447 static void pmu_update_irq(CPUARMState *env) 1448 { 1449 ARMCPU *cpu = env_archcpu(env); 1450 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1451 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1452 } 1453 1454 /* 1455 * Ensure c15_ccnt is the guest-visible count so that operations such as 1456 * enabling/disabling the counter or filtering, modifying the count itself, 1457 * etc. can be done logically. This is essentially a no-op if the counter is 1458 * not enabled at the time of the call. 1459 */ 1460 static void pmccntr_op_start(CPUARMState *env) 1461 { 1462 uint64_t cycles = cycles_get_count(env); 1463 1464 if (pmu_counter_enabled(env, 31)) { 1465 uint64_t eff_cycles = cycles; 1466 if (env->cp15.c9_pmcr & PMCRD) { 1467 /* Increment once every 64 processor clock cycles */ 1468 eff_cycles /= 64; 1469 } 1470 1471 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1472 1473 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1474 1ull << 63 : 1ull << 31; 1475 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1476 env->cp15.c9_pmovsr |= (1 << 31); 1477 pmu_update_irq(env); 1478 } 1479 1480 env->cp15.c15_ccnt = new_pmccntr; 1481 } 1482 env->cp15.c15_ccnt_delta = cycles; 1483 } 1484 1485 /* 1486 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1487 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1488 * pmccntr_op_start. 1489 */ 1490 static void pmccntr_op_finish(CPUARMState *env) 1491 { 1492 if (pmu_counter_enabled(env, 31)) { 1493 #ifndef CONFIG_USER_ONLY 1494 /* Calculate when the counter will next overflow */ 1495 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1496 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1497 remaining_cycles = (uint32_t)remaining_cycles; 1498 } 1499 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1500 1501 if (overflow_in > 0) { 1502 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1503 overflow_in; 1504 ARMCPU *cpu = env_archcpu(env); 1505 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1506 } 1507 #endif 1508 1509 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1510 if (env->cp15.c9_pmcr & PMCRD) { 1511 /* Increment once every 64 processor clock cycles */ 1512 prev_cycles /= 64; 1513 } 1514 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1515 } 1516 } 1517 1518 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1519 { 1520 1521 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1522 uint64_t count = 0; 1523 if (event_supported(event)) { 1524 uint16_t event_idx = supported_event_map[event]; 1525 count = pm_events[event_idx].get_count(env); 1526 } 1527 1528 if (pmu_counter_enabled(env, counter)) { 1529 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1530 1531 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { 1532 env->cp15.c9_pmovsr |= (1 << counter); 1533 pmu_update_irq(env); 1534 } 1535 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1536 } 1537 env->cp15.c14_pmevcntr_delta[counter] = count; 1538 } 1539 1540 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1541 { 1542 if (pmu_counter_enabled(env, counter)) { 1543 #ifndef CONFIG_USER_ONLY 1544 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1545 uint16_t event_idx = supported_event_map[event]; 1546 uint64_t delta = UINT32_MAX - 1547 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; 1548 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); 1549 1550 if (overflow_in > 0) { 1551 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1552 overflow_in; 1553 ARMCPU *cpu = env_archcpu(env); 1554 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1555 } 1556 #endif 1557 1558 env->cp15.c14_pmevcntr_delta[counter] -= 1559 env->cp15.c14_pmevcntr[counter]; 1560 } 1561 } 1562 1563 void pmu_op_start(CPUARMState *env) 1564 { 1565 unsigned int i; 1566 pmccntr_op_start(env); 1567 for (i = 0; i < pmu_num_counters(env); i++) { 1568 pmevcntr_op_start(env, i); 1569 } 1570 } 1571 1572 void pmu_op_finish(CPUARMState *env) 1573 { 1574 unsigned int i; 1575 pmccntr_op_finish(env); 1576 for (i = 0; i < pmu_num_counters(env); i++) { 1577 pmevcntr_op_finish(env, i); 1578 } 1579 } 1580 1581 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1582 { 1583 pmu_op_start(&cpu->env); 1584 } 1585 1586 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1587 { 1588 pmu_op_finish(&cpu->env); 1589 } 1590 1591 void arm_pmu_timer_cb(void *opaque) 1592 { 1593 ARMCPU *cpu = opaque; 1594 1595 /* 1596 * Update all the counter values based on the current underlying counts, 1597 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1598 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1599 * counter may expire. 1600 */ 1601 pmu_op_start(&cpu->env); 1602 pmu_op_finish(&cpu->env); 1603 } 1604 1605 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1606 uint64_t value) 1607 { 1608 pmu_op_start(env); 1609 1610 if (value & PMCRC) { 1611 /* The counter has been reset */ 1612 env->cp15.c15_ccnt = 0; 1613 } 1614 1615 if (value & PMCRP) { 1616 unsigned int i; 1617 for (i = 0; i < pmu_num_counters(env); i++) { 1618 env->cp15.c14_pmevcntr[i] = 0; 1619 } 1620 } 1621 1622 env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; 1623 env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); 1624 1625 pmu_op_finish(env); 1626 } 1627 1628 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1629 uint64_t value) 1630 { 1631 unsigned int i; 1632 for (i = 0; i < pmu_num_counters(env); i++) { 1633 /* Increment a counter's count iff: */ 1634 if ((value & (1 << i)) && /* counter's bit is set */ 1635 /* counter is enabled and not filtered */ 1636 pmu_counter_enabled(env, i) && 1637 /* counter is SW_INCR */ 1638 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1639 pmevcntr_op_start(env, i); 1640 1641 /* 1642 * Detect if this write causes an overflow since we can't predict 1643 * PMSWINC overflows like we can for other events 1644 */ 1645 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1646 1647 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { 1648 env->cp15.c9_pmovsr |= (1 << i); 1649 pmu_update_irq(env); 1650 } 1651 1652 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1653 1654 pmevcntr_op_finish(env, i); 1655 } 1656 } 1657 } 1658 1659 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1660 { 1661 uint64_t ret; 1662 pmccntr_op_start(env); 1663 ret = env->cp15.c15_ccnt; 1664 pmccntr_op_finish(env); 1665 return ret; 1666 } 1667 1668 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1669 uint64_t value) 1670 { 1671 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1672 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1673 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1674 * accessed. 1675 */ 1676 env->cp15.c9_pmselr = value & 0x1f; 1677 } 1678 1679 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1680 uint64_t value) 1681 { 1682 pmccntr_op_start(env); 1683 env->cp15.c15_ccnt = value; 1684 pmccntr_op_finish(env); 1685 } 1686 1687 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1688 uint64_t value) 1689 { 1690 uint64_t cur_val = pmccntr_read(env, NULL); 1691 1692 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1693 } 1694 1695 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1696 uint64_t value) 1697 { 1698 pmccntr_op_start(env); 1699 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1700 pmccntr_op_finish(env); 1701 } 1702 1703 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1704 uint64_t value) 1705 { 1706 pmccntr_op_start(env); 1707 /* M is not accessible from AArch32 */ 1708 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1709 (value & PMCCFILTR); 1710 pmccntr_op_finish(env); 1711 } 1712 1713 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1714 { 1715 /* M is not visible in AArch32 */ 1716 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1717 } 1718 1719 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1720 uint64_t value) 1721 { 1722 value &= pmu_counter_mask(env); 1723 env->cp15.c9_pmcnten |= value; 1724 } 1725 1726 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1727 uint64_t value) 1728 { 1729 value &= pmu_counter_mask(env); 1730 env->cp15.c9_pmcnten &= ~value; 1731 } 1732 1733 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1734 uint64_t value) 1735 { 1736 value &= pmu_counter_mask(env); 1737 env->cp15.c9_pmovsr &= ~value; 1738 pmu_update_irq(env); 1739 } 1740 1741 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1742 uint64_t value) 1743 { 1744 value &= pmu_counter_mask(env); 1745 env->cp15.c9_pmovsr |= value; 1746 pmu_update_irq(env); 1747 } 1748 1749 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1750 uint64_t value, const uint8_t counter) 1751 { 1752 if (counter == 31) { 1753 pmccfiltr_write(env, ri, value); 1754 } else if (counter < pmu_num_counters(env)) { 1755 pmevcntr_op_start(env, counter); 1756 1757 /* 1758 * If this counter's event type is changing, store the current 1759 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1760 * pmevcntr_op_finish has the correct baseline when it converts back to 1761 * a delta. 1762 */ 1763 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1764 PMXEVTYPER_EVTCOUNT; 1765 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1766 if (old_event != new_event) { 1767 uint64_t count = 0; 1768 if (event_supported(new_event)) { 1769 uint16_t event_idx = supported_event_map[new_event]; 1770 count = pm_events[event_idx].get_count(env); 1771 } 1772 env->cp15.c14_pmevcntr_delta[counter] = count; 1773 } 1774 1775 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1776 pmevcntr_op_finish(env, counter); 1777 } 1778 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1779 * PMSELR value is equal to or greater than the number of implemented 1780 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1781 */ 1782 } 1783 1784 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1785 const uint8_t counter) 1786 { 1787 if (counter == 31) { 1788 return env->cp15.pmccfiltr_el0; 1789 } else if (counter < pmu_num_counters(env)) { 1790 return env->cp15.c14_pmevtyper[counter]; 1791 } else { 1792 /* 1793 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1794 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1795 */ 1796 return 0; 1797 } 1798 } 1799 1800 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1801 uint64_t value) 1802 { 1803 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1804 pmevtyper_write(env, ri, value, counter); 1805 } 1806 1807 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1808 uint64_t value) 1809 { 1810 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1811 env->cp15.c14_pmevtyper[counter] = value; 1812 1813 /* 1814 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1815 * pmu_op_finish calls when loading saved state for a migration. Because 1816 * we're potentially updating the type of event here, the value written to 1817 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a 1818 * different counter type. Therefore, we need to set this value to the 1819 * current count for the counter type we're writing so that pmu_op_finish 1820 * has the correct count for its calculation. 1821 */ 1822 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1823 if (event_supported(event)) { 1824 uint16_t event_idx = supported_event_map[event]; 1825 env->cp15.c14_pmevcntr_delta[counter] = 1826 pm_events[event_idx].get_count(env); 1827 } 1828 } 1829 1830 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1831 { 1832 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1833 return pmevtyper_read(env, ri, counter); 1834 } 1835 1836 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1837 uint64_t value) 1838 { 1839 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1840 } 1841 1842 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1843 { 1844 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1845 } 1846 1847 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1848 uint64_t value, uint8_t counter) 1849 { 1850 if (counter < pmu_num_counters(env)) { 1851 pmevcntr_op_start(env, counter); 1852 env->cp15.c14_pmevcntr[counter] = value; 1853 pmevcntr_op_finish(env, counter); 1854 } 1855 /* 1856 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1857 * are CONSTRAINED UNPREDICTABLE. 1858 */ 1859 } 1860 1861 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1862 uint8_t counter) 1863 { 1864 if (counter < pmu_num_counters(env)) { 1865 uint64_t ret; 1866 pmevcntr_op_start(env, counter); 1867 ret = env->cp15.c14_pmevcntr[counter]; 1868 pmevcntr_op_finish(env, counter); 1869 return ret; 1870 } else { 1871 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1872 * are CONSTRAINED UNPREDICTABLE. */ 1873 return 0; 1874 } 1875 } 1876 1877 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1878 uint64_t value) 1879 { 1880 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1881 pmevcntr_write(env, ri, value, counter); 1882 } 1883 1884 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1885 { 1886 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1887 return pmevcntr_read(env, ri, counter); 1888 } 1889 1890 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1891 uint64_t value) 1892 { 1893 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1894 assert(counter < pmu_num_counters(env)); 1895 env->cp15.c14_pmevcntr[counter] = value; 1896 pmevcntr_write(env, ri, value, counter); 1897 } 1898 1899 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1900 { 1901 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1902 assert(counter < pmu_num_counters(env)); 1903 return env->cp15.c14_pmevcntr[counter]; 1904 } 1905 1906 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1907 uint64_t value) 1908 { 1909 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1910 } 1911 1912 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1913 { 1914 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1915 } 1916 1917 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1918 uint64_t value) 1919 { 1920 if (arm_feature(env, ARM_FEATURE_V8)) { 1921 env->cp15.c9_pmuserenr = value & 0xf; 1922 } else { 1923 env->cp15.c9_pmuserenr = value & 1; 1924 } 1925 } 1926 1927 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1928 uint64_t value) 1929 { 1930 /* We have no event counters so only the C bit can be changed */ 1931 value &= pmu_counter_mask(env); 1932 env->cp15.c9_pminten |= value; 1933 pmu_update_irq(env); 1934 } 1935 1936 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1937 uint64_t value) 1938 { 1939 value &= pmu_counter_mask(env); 1940 env->cp15.c9_pminten &= ~value; 1941 pmu_update_irq(env); 1942 } 1943 1944 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1945 uint64_t value) 1946 { 1947 /* Note that even though the AArch64 view of this register has bits 1948 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1949 * architectural requirements for bits which are RES0 only in some 1950 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1951 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1952 */ 1953 raw_write(env, ri, value & ~0x1FULL); 1954 } 1955 1956 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1957 { 1958 /* Begin with base v8.0 state. */ 1959 uint32_t valid_mask = 0x3fff; 1960 ARMCPU *cpu = env_archcpu(env); 1961 1962 if (arm_el_is_aa64(env, 3)) { 1963 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ 1964 valid_mask &= ~SCR_NET; 1965 } else { 1966 valid_mask &= ~(SCR_RW | SCR_ST); 1967 } 1968 1969 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1970 valid_mask &= ~SCR_HCE; 1971 1972 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1973 * supported if EL2 exists. The bit is UNK/SBZP when 1974 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1975 * when EL2 is unavailable. 1976 * On ARMv8, this bit is always available. 1977 */ 1978 if (arm_feature(env, ARM_FEATURE_V7) && 1979 !arm_feature(env, ARM_FEATURE_V8)) { 1980 valid_mask &= ~SCR_SMD; 1981 } 1982 } 1983 if (cpu_isar_feature(aa64_lor, cpu)) { 1984 valid_mask |= SCR_TLOR; 1985 } 1986 if (cpu_isar_feature(aa64_pauth, cpu)) { 1987 valid_mask |= SCR_API | SCR_APK; 1988 } 1989 1990 /* Clear all-context RES0 bits. */ 1991 value &= valid_mask; 1992 raw_write(env, ri, value); 1993 } 1994 1995 static CPAccessResult access_aa64_tid2(CPUARMState *env, 1996 const ARMCPRegInfo *ri, 1997 bool isread) 1998 { 1999 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { 2000 return CP_ACCESS_TRAP_EL2; 2001 } 2002 2003 return CP_ACCESS_OK; 2004 } 2005 2006 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2007 { 2008 ARMCPU *cpu = env_archcpu(env); 2009 2010 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 2011 * bank 2012 */ 2013 uint32_t index = A32_BANKED_REG_GET(env, csselr, 2014 ri->secure & ARM_CP_SECSTATE_S); 2015 2016 return cpu->ccsidr[index]; 2017 } 2018 2019 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2020 uint64_t value) 2021 { 2022 raw_write(env, ri, value & 0xf); 2023 } 2024 2025 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 2026 { 2027 CPUState *cs = env_cpu(env); 2028 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 2029 uint64_t ret = 0; 2030 bool allow_virt = (arm_current_el(env) == 1 && 2031 (!arm_is_secure_below_el3(env) || 2032 (env->cp15.scr_el3 & SCR_EEL2))); 2033 2034 if (allow_virt && (hcr_el2 & HCR_IMO)) { 2035 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 2036 ret |= CPSR_I; 2037 } 2038 } else { 2039 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 2040 ret |= CPSR_I; 2041 } 2042 } 2043 2044 if (allow_virt && (hcr_el2 & HCR_FMO)) { 2045 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 2046 ret |= CPSR_F; 2047 } 2048 } else { 2049 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 2050 ret |= CPSR_F; 2051 } 2052 } 2053 2054 /* External aborts are not possible in QEMU so A bit is always clear */ 2055 return ret; 2056 } 2057 2058 static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 2059 bool isread) 2060 { 2061 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { 2062 return CP_ACCESS_TRAP_EL2; 2063 } 2064 2065 return CP_ACCESS_OK; 2066 } 2067 2068 static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, 2069 bool isread) 2070 { 2071 if (arm_feature(env, ARM_FEATURE_V8)) { 2072 return access_aa64_tid1(env, ri, isread); 2073 } 2074 2075 return CP_ACCESS_OK; 2076 } 2077 2078 static const ARMCPRegInfo v7_cp_reginfo[] = { 2079 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 2080 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 2081 .access = PL1_W, .type = ARM_CP_NOP }, 2082 /* Performance monitors are implementation defined in v7, 2083 * but with an ARM recommended set of registers, which we 2084 * follow. 2085 * 2086 * Performance registers fall into three categories: 2087 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 2088 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 2089 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 2090 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 2091 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 2092 */ 2093 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 2094 .access = PL0_RW, .type = ARM_CP_ALIAS, 2095 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2096 .writefn = pmcntenset_write, 2097 .accessfn = pmreg_access, 2098 .raw_writefn = raw_write }, 2099 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 2100 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 2101 .access = PL0_RW, .accessfn = pmreg_access, 2102 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 2103 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 2104 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 2105 .access = PL0_RW, 2106 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 2107 .accessfn = pmreg_access, 2108 .writefn = pmcntenclr_write, 2109 .type = ARM_CP_ALIAS }, 2110 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 2111 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 2112 .access = PL0_RW, .accessfn = pmreg_access, 2113 .type = ARM_CP_ALIAS, 2114 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 2115 .writefn = pmcntenclr_write }, 2116 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 2117 .access = PL0_RW, .type = ARM_CP_IO, 2118 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2119 .accessfn = pmreg_access, 2120 .writefn = pmovsr_write, 2121 .raw_writefn = raw_write }, 2122 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 2123 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 2124 .access = PL0_RW, .accessfn = pmreg_access, 2125 .type = ARM_CP_ALIAS | ARM_CP_IO, 2126 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2127 .writefn = pmovsr_write, 2128 .raw_writefn = raw_write }, 2129 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 2130 .access = PL0_W, .accessfn = pmreg_access_swinc, 2131 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2132 .writefn = pmswinc_write }, 2133 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 2134 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 2135 .access = PL0_W, .accessfn = pmreg_access_swinc, 2136 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2137 .writefn = pmswinc_write }, 2138 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 2139 .access = PL0_RW, .type = ARM_CP_ALIAS, 2140 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 2141 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 2142 .raw_writefn = raw_write}, 2143 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 2144 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 2145 .access = PL0_RW, .accessfn = pmreg_access_selr, 2146 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 2147 .writefn = pmselr_write, .raw_writefn = raw_write, }, 2148 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 2149 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 2150 .readfn = pmccntr_read, .writefn = pmccntr_write32, 2151 .accessfn = pmreg_access_ccntr }, 2152 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 2153 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 2154 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 2155 .type = ARM_CP_IO, 2156 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 2157 .readfn = pmccntr_read, .writefn = pmccntr_write, 2158 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 2159 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 2160 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 2161 .access = PL0_RW, .accessfn = pmreg_access, 2162 .type = ARM_CP_ALIAS | ARM_CP_IO, 2163 .resetvalue = 0, }, 2164 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 2165 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 2166 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 2167 .access = PL0_RW, .accessfn = pmreg_access, 2168 .type = ARM_CP_IO, 2169 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 2170 .resetvalue = 0, }, 2171 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 2172 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2173 .accessfn = pmreg_access, 2174 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2175 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 2176 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 2177 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2178 .accessfn = pmreg_access, 2179 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2180 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 2181 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2182 .accessfn = pmreg_access_xevcntr, 2183 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2184 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 2185 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 2186 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2187 .accessfn = pmreg_access_xevcntr, 2188 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2189 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 2190 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 2191 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 2192 .resetvalue = 0, 2193 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2194 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2195 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2196 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2197 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2198 .resetvalue = 0, 2199 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2200 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2201 .access = PL1_RW, .accessfn = access_tpm, 2202 .type = ARM_CP_ALIAS | ARM_CP_IO, 2203 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2204 .resetvalue = 0, 2205 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2206 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2207 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2208 .access = PL1_RW, .accessfn = access_tpm, 2209 .type = ARM_CP_IO, 2210 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2211 .writefn = pmintenset_write, .raw_writefn = raw_write, 2212 .resetvalue = 0x0 }, 2213 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2214 .access = PL1_RW, .accessfn = access_tpm, 2215 .type = ARM_CP_ALIAS | ARM_CP_IO, 2216 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2217 .writefn = pmintenclr_write, }, 2218 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2219 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2220 .access = PL1_RW, .accessfn = access_tpm, 2221 .type = ARM_CP_ALIAS | ARM_CP_IO, 2222 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2223 .writefn = pmintenclr_write }, 2224 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2225 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2226 .access = PL1_R, 2227 .accessfn = access_aa64_tid2, 2228 .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2229 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2230 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2231 .access = PL1_RW, 2232 .accessfn = access_aa64_tid2, 2233 .writefn = csselr_write, .resetvalue = 0, 2234 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2235 offsetof(CPUARMState, cp15.csselr_ns) } }, 2236 /* Auxiliary ID register: this actually has an IMPDEF value but for now 2237 * just RAZ for all cores: 2238 */ 2239 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2240 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2241 .access = PL1_R, .type = ARM_CP_CONST, 2242 .accessfn = access_aa64_tid1, 2243 .resetvalue = 0 }, 2244 /* Auxiliary fault status registers: these also are IMPDEF, and we 2245 * choose to RAZ/WI for all cores. 2246 */ 2247 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2248 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2249 .access = PL1_RW, .accessfn = access_tvm_trvm, 2250 .type = ARM_CP_CONST, .resetvalue = 0 }, 2251 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2252 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2253 .access = PL1_RW, .accessfn = access_tvm_trvm, 2254 .type = ARM_CP_CONST, .resetvalue = 0 }, 2255 /* MAIR can just read-as-written because we don't implement caches 2256 * and so don't need to care about memory attributes. 2257 */ 2258 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2259 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2260 .access = PL1_RW, .accessfn = access_tvm_trvm, 2261 .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2262 .resetvalue = 0 }, 2263 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2264 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2265 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2266 .resetvalue = 0 }, 2267 /* For non-long-descriptor page tables these are PRRR and NMRR; 2268 * regardless they still act as reads-as-written for QEMU. 2269 */ 2270 /* MAIR0/1 are defined separately from their 64-bit counterpart which 2271 * allows them to assign the correct fieldoffset based on the endianness 2272 * handled in the field definitions. 2273 */ 2274 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2275 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2276 .access = PL1_RW, .accessfn = access_tvm_trvm, 2277 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2278 offsetof(CPUARMState, cp15.mair0_ns) }, 2279 .resetfn = arm_cp_reset_ignore }, 2280 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2281 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, 2282 .access = PL1_RW, .accessfn = access_tvm_trvm, 2283 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2284 offsetof(CPUARMState, cp15.mair1_ns) }, 2285 .resetfn = arm_cp_reset_ignore }, 2286 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2287 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2288 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2289 /* 32 bit ITLB invalidates */ 2290 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 2291 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2292 .writefn = tlbiall_write }, 2293 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 2294 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2295 .writefn = tlbimva_write }, 2296 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 2297 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2298 .writefn = tlbiasid_write }, 2299 /* 32 bit DTLB invalidates */ 2300 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 2301 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2302 .writefn = tlbiall_write }, 2303 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 2304 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2305 .writefn = tlbimva_write }, 2306 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 2307 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2308 .writefn = tlbiasid_write }, 2309 /* 32 bit TLB invalidates */ 2310 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 2311 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2312 .writefn = tlbiall_write }, 2313 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 2314 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2315 .writefn = tlbimva_write }, 2316 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 2317 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2318 .writefn = tlbiasid_write }, 2319 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 2320 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2321 .writefn = tlbimvaa_write }, 2322 REGINFO_SENTINEL 2323 }; 2324 2325 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 2326 /* 32 bit TLB invalidates, Inner Shareable */ 2327 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 2328 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2329 .writefn = tlbiall_is_write }, 2330 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 2331 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2332 .writefn = tlbimva_is_write }, 2333 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 2334 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2335 .writefn = tlbiasid_is_write }, 2336 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 2337 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 2338 .writefn = tlbimvaa_is_write }, 2339 REGINFO_SENTINEL 2340 }; 2341 2342 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2343 /* PMOVSSET is not implemented in v7 before v7ve */ 2344 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2345 .access = PL0_RW, .accessfn = pmreg_access, 2346 .type = ARM_CP_ALIAS | ARM_CP_IO, 2347 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2348 .writefn = pmovsset_write, 2349 .raw_writefn = raw_write }, 2350 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2351 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2352 .access = PL0_RW, .accessfn = pmreg_access, 2353 .type = ARM_CP_ALIAS | ARM_CP_IO, 2354 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2355 .writefn = pmovsset_write, 2356 .raw_writefn = raw_write }, 2357 REGINFO_SENTINEL 2358 }; 2359 2360 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2361 uint64_t value) 2362 { 2363 value &= 1; 2364 env->teecr = value; 2365 } 2366 2367 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2368 bool isread) 2369 { 2370 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2371 return CP_ACCESS_TRAP; 2372 } 2373 return CP_ACCESS_OK; 2374 } 2375 2376 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2377 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2378 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2379 .resetvalue = 0, 2380 .writefn = teecr_write }, 2381 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2382 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2383 .accessfn = teehbr_access, .resetvalue = 0 }, 2384 REGINFO_SENTINEL 2385 }; 2386 2387 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2388 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2389 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2390 .access = PL0_RW, 2391 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2392 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2393 .access = PL0_RW, 2394 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2395 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2396 .resetfn = arm_cp_reset_ignore }, 2397 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2398 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2399 .access = PL0_R|PL1_W, 2400 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2401 .resetvalue = 0}, 2402 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2403 .access = PL0_R|PL1_W, 2404 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2405 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2406 .resetfn = arm_cp_reset_ignore }, 2407 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2408 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2409 .access = PL1_RW, 2410 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2411 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2412 .access = PL1_RW, 2413 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2414 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2415 .resetvalue = 0 }, 2416 REGINFO_SENTINEL 2417 }; 2418 2419 #ifndef CONFIG_USER_ONLY 2420 2421 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2422 bool isread) 2423 { 2424 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2425 * Writable only at the highest implemented exception level. 2426 */ 2427 int el = arm_current_el(env); 2428 uint64_t hcr; 2429 uint32_t cntkctl; 2430 2431 switch (el) { 2432 case 0: 2433 hcr = arm_hcr_el2_eff(env); 2434 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2435 cntkctl = env->cp15.cnthctl_el2; 2436 } else { 2437 cntkctl = env->cp15.c14_cntkctl; 2438 } 2439 if (!extract32(cntkctl, 0, 2)) { 2440 return CP_ACCESS_TRAP; 2441 } 2442 break; 2443 case 1: 2444 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2445 arm_is_secure_below_el3(env)) { 2446 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2447 return CP_ACCESS_TRAP_UNCATEGORIZED; 2448 } 2449 break; 2450 case 2: 2451 case 3: 2452 break; 2453 } 2454 2455 if (!isread && el < arm_highest_el(env)) { 2456 return CP_ACCESS_TRAP_UNCATEGORIZED; 2457 } 2458 2459 return CP_ACCESS_OK; 2460 } 2461 2462 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2463 bool isread) 2464 { 2465 unsigned int cur_el = arm_current_el(env); 2466 bool secure = arm_is_secure(env); 2467 uint64_t hcr = arm_hcr_el2_eff(env); 2468 2469 switch (cur_el) { 2470 case 0: 2471 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ 2472 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2473 return (extract32(env->cp15.cnthctl_el2, timeridx, 1) 2474 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2475 } 2476 2477 /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ 2478 if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2479 return CP_ACCESS_TRAP; 2480 } 2481 2482 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */ 2483 if (hcr & HCR_E2H) { 2484 if (timeridx == GTIMER_PHYS && 2485 !extract32(env->cp15.cnthctl_el2, 10, 1)) { 2486 return CP_ACCESS_TRAP_EL2; 2487 } 2488 } else { 2489 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2490 if (arm_feature(env, ARM_FEATURE_EL2) && 2491 timeridx == GTIMER_PHYS && !secure && 2492 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 2493 return CP_ACCESS_TRAP_EL2; 2494 } 2495 } 2496 break; 2497 2498 case 1: 2499 /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ 2500 if (arm_feature(env, ARM_FEATURE_EL2) && 2501 timeridx == GTIMER_PHYS && !secure && 2502 (hcr & HCR_E2H 2503 ? !extract32(env->cp15.cnthctl_el2, 10, 1) 2504 : !extract32(env->cp15.cnthctl_el2, 0, 1))) { 2505 return CP_ACCESS_TRAP_EL2; 2506 } 2507 break; 2508 } 2509 return CP_ACCESS_OK; 2510 } 2511 2512 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2513 bool isread) 2514 { 2515 unsigned int cur_el = arm_current_el(env); 2516 bool secure = arm_is_secure(env); 2517 uint64_t hcr = arm_hcr_el2_eff(env); 2518 2519 switch (cur_el) { 2520 case 0: 2521 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2522 /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ 2523 return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) 2524 ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); 2525 } 2526 2527 /* 2528 * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from 2529 * EL0 if EL0[PV]TEN is zero. 2530 */ 2531 if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2532 return CP_ACCESS_TRAP; 2533 } 2534 /* fall through */ 2535 2536 case 1: 2537 if (arm_feature(env, ARM_FEATURE_EL2) && 2538 timeridx == GTIMER_PHYS && !secure) { 2539 if (hcr & HCR_E2H) { 2540 /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ 2541 if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { 2542 return CP_ACCESS_TRAP_EL2; 2543 } 2544 } else { 2545 /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ 2546 if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { 2547 return CP_ACCESS_TRAP_EL2; 2548 } 2549 } 2550 } 2551 break; 2552 } 2553 return CP_ACCESS_OK; 2554 } 2555 2556 static CPAccessResult gt_pct_access(CPUARMState *env, 2557 const ARMCPRegInfo *ri, 2558 bool isread) 2559 { 2560 return gt_counter_access(env, GTIMER_PHYS, isread); 2561 } 2562 2563 static CPAccessResult gt_vct_access(CPUARMState *env, 2564 const ARMCPRegInfo *ri, 2565 bool isread) 2566 { 2567 return gt_counter_access(env, GTIMER_VIRT, isread); 2568 } 2569 2570 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2571 bool isread) 2572 { 2573 return gt_timer_access(env, GTIMER_PHYS, isread); 2574 } 2575 2576 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2577 bool isread) 2578 { 2579 return gt_timer_access(env, GTIMER_VIRT, isread); 2580 } 2581 2582 static CPAccessResult gt_stimer_access(CPUARMState *env, 2583 const ARMCPRegInfo *ri, 2584 bool isread) 2585 { 2586 /* The AArch64 register view of the secure physical timer is 2587 * always accessible from EL3, and configurably accessible from 2588 * Secure EL1. 2589 */ 2590 switch (arm_current_el(env)) { 2591 case 1: 2592 if (!arm_is_secure(env)) { 2593 return CP_ACCESS_TRAP; 2594 } 2595 if (!(env->cp15.scr_el3 & SCR_ST)) { 2596 return CP_ACCESS_TRAP_EL3; 2597 } 2598 return CP_ACCESS_OK; 2599 case 0: 2600 case 2: 2601 return CP_ACCESS_TRAP; 2602 case 3: 2603 return CP_ACCESS_OK; 2604 default: 2605 g_assert_not_reached(); 2606 } 2607 } 2608 2609 static uint64_t gt_get_countervalue(CPUARMState *env) 2610 { 2611 ARMCPU *cpu = env_archcpu(env); 2612 2613 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); 2614 } 2615 2616 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2617 { 2618 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2619 2620 if (gt->ctl & 1) { 2621 /* Timer enabled: calculate and set current ISTATUS, irq, and 2622 * reset timer to when ISTATUS next has to change 2623 */ 2624 uint64_t offset = timeridx == GTIMER_VIRT ? 2625 cpu->env.cp15.cntvoff_el2 : 0; 2626 uint64_t count = gt_get_countervalue(&cpu->env); 2627 /* Note that this must be unsigned 64 bit arithmetic: */ 2628 int istatus = count - offset >= gt->cval; 2629 uint64_t nexttick; 2630 int irqstate; 2631 2632 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2633 2634 irqstate = (istatus && !(gt->ctl & 2)); 2635 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2636 2637 if (istatus) { 2638 /* Next transition is when count rolls back over to zero */ 2639 nexttick = UINT64_MAX; 2640 } else { 2641 /* Next transition is when we hit cval */ 2642 nexttick = gt->cval + offset; 2643 } 2644 /* Note that the desired next expiry time might be beyond the 2645 * signed-64-bit range of a QEMUTimer -- in this case we just 2646 * set the timer for as far in the future as possible. When the 2647 * timer expires we will reset the timer for any remaining period. 2648 */ 2649 if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { 2650 timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); 2651 } else { 2652 timer_mod(cpu->gt_timer[timeridx], nexttick); 2653 } 2654 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 2655 } else { 2656 /* Timer disabled: ISTATUS and timer output always clear */ 2657 gt->ctl &= ~4; 2658 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 2659 timer_del(cpu->gt_timer[timeridx]); 2660 trace_arm_gt_recalc_disabled(timeridx); 2661 } 2662 } 2663 2664 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2665 int timeridx) 2666 { 2667 ARMCPU *cpu = env_archcpu(env); 2668 2669 timer_del(cpu->gt_timer[timeridx]); 2670 } 2671 2672 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2673 { 2674 return gt_get_countervalue(env); 2675 } 2676 2677 static uint64_t gt_virt_cnt_offset(CPUARMState *env) 2678 { 2679 uint64_t hcr; 2680 2681 switch (arm_current_el(env)) { 2682 case 2: 2683 hcr = arm_hcr_el2_eff(env); 2684 if (hcr & HCR_E2H) { 2685 return 0; 2686 } 2687 break; 2688 case 0: 2689 hcr = arm_hcr_el2_eff(env); 2690 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 2691 return 0; 2692 } 2693 break; 2694 } 2695 2696 return env->cp15.cntvoff_el2; 2697 } 2698 2699 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2700 { 2701 return gt_get_countervalue(env) - gt_virt_cnt_offset(env); 2702 } 2703 2704 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2705 int timeridx, 2706 uint64_t value) 2707 { 2708 trace_arm_gt_cval_write(timeridx, value); 2709 env->cp15.c14_timer[timeridx].cval = value; 2710 gt_recalc_timer(env_archcpu(env), timeridx); 2711 } 2712 2713 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2714 int timeridx) 2715 { 2716 uint64_t offset = 0; 2717 2718 switch (timeridx) { 2719 case GTIMER_VIRT: 2720 case GTIMER_HYPVIRT: 2721 offset = gt_virt_cnt_offset(env); 2722 break; 2723 } 2724 2725 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2726 (gt_get_countervalue(env) - offset)); 2727 } 2728 2729 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2730 int timeridx, 2731 uint64_t value) 2732 { 2733 uint64_t offset = 0; 2734 2735 switch (timeridx) { 2736 case GTIMER_VIRT: 2737 case GTIMER_HYPVIRT: 2738 offset = gt_virt_cnt_offset(env); 2739 break; 2740 } 2741 2742 trace_arm_gt_tval_write(timeridx, value); 2743 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2744 sextract64(value, 0, 32); 2745 gt_recalc_timer(env_archcpu(env), timeridx); 2746 } 2747 2748 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2749 int timeridx, 2750 uint64_t value) 2751 { 2752 ARMCPU *cpu = env_archcpu(env); 2753 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2754 2755 trace_arm_gt_ctl_write(timeridx, value); 2756 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2757 if ((oldval ^ value) & 1) { 2758 /* Enable toggled */ 2759 gt_recalc_timer(cpu, timeridx); 2760 } else if ((oldval ^ value) & 2) { 2761 /* IMASK toggled: don't need to recalculate, 2762 * just set the interrupt line based on ISTATUS 2763 */ 2764 int irqstate = (oldval & 4) && !(value & 2); 2765 2766 trace_arm_gt_imask_toggle(timeridx, irqstate); 2767 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2768 } 2769 } 2770 2771 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2772 { 2773 gt_timer_reset(env, ri, GTIMER_PHYS); 2774 } 2775 2776 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2777 uint64_t value) 2778 { 2779 gt_cval_write(env, ri, GTIMER_PHYS, value); 2780 } 2781 2782 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2783 { 2784 return gt_tval_read(env, ri, GTIMER_PHYS); 2785 } 2786 2787 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2788 uint64_t value) 2789 { 2790 gt_tval_write(env, ri, GTIMER_PHYS, value); 2791 } 2792 2793 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2794 uint64_t value) 2795 { 2796 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2797 } 2798 2799 static int gt_phys_redir_timeridx(CPUARMState *env) 2800 { 2801 switch (arm_mmu_idx(env)) { 2802 case ARMMMUIdx_E20_0: 2803 case ARMMMUIdx_E20_2: 2804 case ARMMMUIdx_E20_2_PAN: 2805 return GTIMER_HYP; 2806 default: 2807 return GTIMER_PHYS; 2808 } 2809 } 2810 2811 static int gt_virt_redir_timeridx(CPUARMState *env) 2812 { 2813 switch (arm_mmu_idx(env)) { 2814 case ARMMMUIdx_E20_0: 2815 case ARMMMUIdx_E20_2: 2816 case ARMMMUIdx_E20_2_PAN: 2817 return GTIMER_HYPVIRT; 2818 default: 2819 return GTIMER_VIRT; 2820 } 2821 } 2822 2823 static uint64_t gt_phys_redir_cval_read(CPUARMState *env, 2824 const ARMCPRegInfo *ri) 2825 { 2826 int timeridx = gt_phys_redir_timeridx(env); 2827 return env->cp15.c14_timer[timeridx].cval; 2828 } 2829 2830 static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2831 uint64_t value) 2832 { 2833 int timeridx = gt_phys_redir_timeridx(env); 2834 gt_cval_write(env, ri, timeridx, value); 2835 } 2836 2837 static uint64_t gt_phys_redir_tval_read(CPUARMState *env, 2838 const ARMCPRegInfo *ri) 2839 { 2840 int timeridx = gt_phys_redir_timeridx(env); 2841 return gt_tval_read(env, ri, timeridx); 2842 } 2843 2844 static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2845 uint64_t value) 2846 { 2847 int timeridx = gt_phys_redir_timeridx(env); 2848 gt_tval_write(env, ri, timeridx, value); 2849 } 2850 2851 static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, 2852 const ARMCPRegInfo *ri) 2853 { 2854 int timeridx = gt_phys_redir_timeridx(env); 2855 return env->cp15.c14_timer[timeridx].ctl; 2856 } 2857 2858 static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2859 uint64_t value) 2860 { 2861 int timeridx = gt_phys_redir_timeridx(env); 2862 gt_ctl_write(env, ri, timeridx, value); 2863 } 2864 2865 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2866 { 2867 gt_timer_reset(env, ri, GTIMER_VIRT); 2868 } 2869 2870 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2871 uint64_t value) 2872 { 2873 gt_cval_write(env, ri, GTIMER_VIRT, value); 2874 } 2875 2876 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2877 { 2878 return gt_tval_read(env, ri, GTIMER_VIRT); 2879 } 2880 2881 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2882 uint64_t value) 2883 { 2884 gt_tval_write(env, ri, GTIMER_VIRT, value); 2885 } 2886 2887 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2888 uint64_t value) 2889 { 2890 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2891 } 2892 2893 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2894 uint64_t value) 2895 { 2896 ARMCPU *cpu = env_archcpu(env); 2897 2898 trace_arm_gt_cntvoff_write(value); 2899 raw_write(env, ri, value); 2900 gt_recalc_timer(cpu, GTIMER_VIRT); 2901 } 2902 2903 static uint64_t gt_virt_redir_cval_read(CPUARMState *env, 2904 const ARMCPRegInfo *ri) 2905 { 2906 int timeridx = gt_virt_redir_timeridx(env); 2907 return env->cp15.c14_timer[timeridx].cval; 2908 } 2909 2910 static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2911 uint64_t value) 2912 { 2913 int timeridx = gt_virt_redir_timeridx(env); 2914 gt_cval_write(env, ri, timeridx, value); 2915 } 2916 2917 static uint64_t gt_virt_redir_tval_read(CPUARMState *env, 2918 const ARMCPRegInfo *ri) 2919 { 2920 int timeridx = gt_virt_redir_timeridx(env); 2921 return gt_tval_read(env, ri, timeridx); 2922 } 2923 2924 static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2925 uint64_t value) 2926 { 2927 int timeridx = gt_virt_redir_timeridx(env); 2928 gt_tval_write(env, ri, timeridx, value); 2929 } 2930 2931 static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, 2932 const ARMCPRegInfo *ri) 2933 { 2934 int timeridx = gt_virt_redir_timeridx(env); 2935 return env->cp15.c14_timer[timeridx].ctl; 2936 } 2937 2938 static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2939 uint64_t value) 2940 { 2941 int timeridx = gt_virt_redir_timeridx(env); 2942 gt_ctl_write(env, ri, timeridx, value); 2943 } 2944 2945 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2946 { 2947 gt_timer_reset(env, ri, GTIMER_HYP); 2948 } 2949 2950 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2951 uint64_t value) 2952 { 2953 gt_cval_write(env, ri, GTIMER_HYP, value); 2954 } 2955 2956 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2957 { 2958 return gt_tval_read(env, ri, GTIMER_HYP); 2959 } 2960 2961 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2962 uint64_t value) 2963 { 2964 gt_tval_write(env, ri, GTIMER_HYP, value); 2965 } 2966 2967 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2968 uint64_t value) 2969 { 2970 gt_ctl_write(env, ri, GTIMER_HYP, value); 2971 } 2972 2973 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2974 { 2975 gt_timer_reset(env, ri, GTIMER_SEC); 2976 } 2977 2978 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2979 uint64_t value) 2980 { 2981 gt_cval_write(env, ri, GTIMER_SEC, value); 2982 } 2983 2984 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2985 { 2986 return gt_tval_read(env, ri, GTIMER_SEC); 2987 } 2988 2989 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2990 uint64_t value) 2991 { 2992 gt_tval_write(env, ri, GTIMER_SEC, value); 2993 } 2994 2995 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2996 uint64_t value) 2997 { 2998 gt_ctl_write(env, ri, GTIMER_SEC, value); 2999 } 3000 3001 static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3002 { 3003 gt_timer_reset(env, ri, GTIMER_HYPVIRT); 3004 } 3005 3006 static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3007 uint64_t value) 3008 { 3009 gt_cval_write(env, ri, GTIMER_HYPVIRT, value); 3010 } 3011 3012 static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 3013 { 3014 return gt_tval_read(env, ri, GTIMER_HYPVIRT); 3015 } 3016 3017 static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 3018 uint64_t value) 3019 { 3020 gt_tval_write(env, ri, GTIMER_HYPVIRT, value); 3021 } 3022 3023 static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 3024 uint64_t value) 3025 { 3026 gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); 3027 } 3028 3029 void arm_gt_ptimer_cb(void *opaque) 3030 { 3031 ARMCPU *cpu = opaque; 3032 3033 gt_recalc_timer(cpu, GTIMER_PHYS); 3034 } 3035 3036 void arm_gt_vtimer_cb(void *opaque) 3037 { 3038 ARMCPU *cpu = opaque; 3039 3040 gt_recalc_timer(cpu, GTIMER_VIRT); 3041 } 3042 3043 void arm_gt_htimer_cb(void *opaque) 3044 { 3045 ARMCPU *cpu = opaque; 3046 3047 gt_recalc_timer(cpu, GTIMER_HYP); 3048 } 3049 3050 void arm_gt_stimer_cb(void *opaque) 3051 { 3052 ARMCPU *cpu = opaque; 3053 3054 gt_recalc_timer(cpu, GTIMER_SEC); 3055 } 3056 3057 void arm_gt_hvtimer_cb(void *opaque) 3058 { 3059 ARMCPU *cpu = opaque; 3060 3061 gt_recalc_timer(cpu, GTIMER_HYPVIRT); 3062 } 3063 3064 static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) 3065 { 3066 ARMCPU *cpu = env_archcpu(env); 3067 3068 cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; 3069 } 3070 3071 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3072 /* Note that CNTFRQ is purely reads-as-written for the benefit 3073 * of software; writing it doesn't actually change the timer frequency. 3074 * Our reset value matches the fixed frequency we implement the timer at. 3075 */ 3076 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 3077 .type = ARM_CP_ALIAS, 3078 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 3079 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 3080 }, 3081 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3082 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3083 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 3084 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3085 .resetfn = arm_gt_cntfrq_reset, 3086 }, 3087 /* overall control: mostly access permissions */ 3088 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 3089 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 3090 .access = PL1_RW, 3091 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 3092 .resetvalue = 0, 3093 }, 3094 /* per-timer control */ 3095 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3096 .secure = ARM_CP_SECSTATE_NS, 3097 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3098 .accessfn = gt_ptimer_access, 3099 .fieldoffset = offsetoflow32(CPUARMState, 3100 cp15.c14_timer[GTIMER_PHYS].ctl), 3101 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3102 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3103 }, 3104 { .name = "CNTP_CTL_S", 3105 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 3106 .secure = ARM_CP_SECSTATE_S, 3107 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3108 .accessfn = gt_ptimer_access, 3109 .fieldoffset = offsetoflow32(CPUARMState, 3110 cp15.c14_timer[GTIMER_SEC].ctl), 3111 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3112 }, 3113 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 3114 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 3115 .type = ARM_CP_IO, .access = PL0_RW, 3116 .accessfn = gt_ptimer_access, 3117 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 3118 .resetvalue = 0, 3119 .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, 3120 .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, 3121 }, 3122 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 3123 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, 3124 .accessfn = gt_vtimer_access, 3125 .fieldoffset = offsetoflow32(CPUARMState, 3126 cp15.c14_timer[GTIMER_VIRT].ctl), 3127 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3128 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3129 }, 3130 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 3131 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 3132 .type = ARM_CP_IO, .access = PL0_RW, 3133 .accessfn = gt_vtimer_access, 3134 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 3135 .resetvalue = 0, 3136 .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, 3137 .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, 3138 }, 3139 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 3140 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3141 .secure = ARM_CP_SECSTATE_NS, 3142 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3143 .accessfn = gt_ptimer_access, 3144 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3145 }, 3146 { .name = "CNTP_TVAL_S", 3147 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 3148 .secure = ARM_CP_SECSTATE_S, 3149 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3150 .accessfn = gt_ptimer_access, 3151 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 3152 }, 3153 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3154 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 3155 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3156 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 3157 .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, 3158 }, 3159 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 3160 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3161 .accessfn = gt_vtimer_access, 3162 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3163 }, 3164 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 3165 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 3166 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, 3167 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 3168 .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, 3169 }, 3170 /* The counter itself */ 3171 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 3172 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3173 .accessfn = gt_pct_access, 3174 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 3175 }, 3176 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 3177 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 3178 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3179 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 3180 }, 3181 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 3182 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 3183 .accessfn = gt_vct_access, 3184 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 3185 }, 3186 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3187 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3188 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3189 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 3190 }, 3191 /* Comparison value, indicating when the timer goes off */ 3192 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 3193 .secure = ARM_CP_SECSTATE_NS, 3194 .access = PL0_RW, 3195 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3196 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3197 .accessfn = gt_ptimer_access, 3198 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3199 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3200 }, 3201 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 3202 .secure = ARM_CP_SECSTATE_S, 3203 .access = PL0_RW, 3204 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3205 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3206 .accessfn = gt_ptimer_access, 3207 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3208 }, 3209 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3210 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 3211 .access = PL0_RW, 3212 .type = ARM_CP_IO, 3213 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 3214 .resetvalue = 0, .accessfn = gt_ptimer_access, 3215 .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, 3216 .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, 3217 }, 3218 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 3219 .access = PL0_RW, 3220 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 3221 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3222 .accessfn = gt_vtimer_access, 3223 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3224 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3225 }, 3226 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 3227 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 3228 .access = PL0_RW, 3229 .type = ARM_CP_IO, 3230 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 3231 .resetvalue = 0, .accessfn = gt_vtimer_access, 3232 .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, 3233 .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, 3234 }, 3235 /* Secure timer -- this is actually restricted to only EL3 3236 * and configurably Secure-EL1 via the accessfn. 3237 */ 3238 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 3239 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 3240 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 3241 .accessfn = gt_stimer_access, 3242 .readfn = gt_sec_tval_read, 3243 .writefn = gt_sec_tval_write, 3244 .resetfn = gt_sec_timer_reset, 3245 }, 3246 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 3247 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 3248 .type = ARM_CP_IO, .access = PL1_RW, 3249 .accessfn = gt_stimer_access, 3250 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 3251 .resetvalue = 0, 3252 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 3253 }, 3254 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 3255 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 3256 .type = ARM_CP_IO, .access = PL1_RW, 3257 .accessfn = gt_stimer_access, 3258 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 3259 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 3260 }, 3261 REGINFO_SENTINEL 3262 }; 3263 3264 static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, 3265 bool isread) 3266 { 3267 if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { 3268 return CP_ACCESS_TRAP; 3269 } 3270 return CP_ACCESS_OK; 3271 } 3272 3273 #else 3274 3275 /* In user-mode most of the generic timer registers are inaccessible 3276 * however modern kernels (4.12+) allow access to cntvct_el0 3277 */ 3278 3279 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 3280 { 3281 ARMCPU *cpu = env_archcpu(env); 3282 3283 /* Currently we have no support for QEMUTimer in linux-user so we 3284 * can't call gt_get_countervalue(env), instead we directly 3285 * call the lower level functions. 3286 */ 3287 return cpu_get_clock() / gt_cntfrq_period_ns(cpu); 3288 } 3289 3290 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 3291 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 3292 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 3293 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 3294 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 3295 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 3296 }, 3297 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 3298 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 3299 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 3300 .readfn = gt_virt_cnt_read, 3301 }, 3302 REGINFO_SENTINEL 3303 }; 3304 3305 #endif 3306 3307 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3308 { 3309 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3310 raw_write(env, ri, value); 3311 } else if (arm_feature(env, ARM_FEATURE_V7)) { 3312 raw_write(env, ri, value & 0xfffff6ff); 3313 } else { 3314 raw_write(env, ri, value & 0xfffff1ff); 3315 } 3316 } 3317 3318 #ifndef CONFIG_USER_ONLY 3319 /* get_phys_addr() isn't present for user-mode-only targets */ 3320 3321 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 3322 bool isread) 3323 { 3324 if (ri->opc2 & 4) { 3325 /* The ATS12NSO* operations must trap to EL3 if executed in 3326 * Secure EL1 (which can only happen if EL3 is AArch64). 3327 * They are simply UNDEF if executed from NS EL1. 3328 * They function normally from EL2 or EL3. 3329 */ 3330 if (arm_current_el(env) == 1) { 3331 if (arm_is_secure_below_el3(env)) { 3332 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 3333 } 3334 return CP_ACCESS_TRAP_UNCATEGORIZED; 3335 } 3336 } 3337 return CP_ACCESS_OK; 3338 } 3339 3340 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 3341 MMUAccessType access_type, ARMMMUIdx mmu_idx) 3342 { 3343 hwaddr phys_addr; 3344 target_ulong page_size; 3345 int prot; 3346 bool ret; 3347 uint64_t par64; 3348 bool format64 = false; 3349 MemTxAttrs attrs = {}; 3350 ARMMMUFaultInfo fi = {}; 3351 ARMCacheAttrs cacheattrs = {}; 3352 3353 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 3354 &prot, &page_size, &fi, &cacheattrs); 3355 3356 if (ret) { 3357 /* 3358 * Some kinds of translation fault must cause exceptions rather 3359 * than being reported in the PAR. 3360 */ 3361 int current_el = arm_current_el(env); 3362 int target_el; 3363 uint32_t syn, fsr, fsc; 3364 bool take_exc = false; 3365 3366 if (fi.s1ptw && current_el == 1 && !arm_is_secure(env) 3367 && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 3368 /* 3369 * Synchronous stage 2 fault on an access made as part of the 3370 * translation table walk for AT S1E0* or AT S1E1* insn 3371 * executed from NS EL1. If this is a synchronous external abort 3372 * and SCR_EL3.EA == 1, then we take a synchronous external abort 3373 * to EL3. Otherwise the fault is taken as an exception to EL2, 3374 * and HPFAR_EL2 holds the faulting IPA. 3375 */ 3376 if (fi.type == ARMFault_SyncExternalOnWalk && 3377 (env->cp15.scr_el3 & SCR_EA)) { 3378 target_el = 3; 3379 } else { 3380 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; 3381 target_el = 2; 3382 } 3383 take_exc = true; 3384 } else if (fi.type == ARMFault_SyncExternalOnWalk) { 3385 /* 3386 * Synchronous external aborts during a translation table walk 3387 * are taken as Data Abort exceptions. 3388 */ 3389 if (fi.stage2) { 3390 if (current_el == 3) { 3391 target_el = 3; 3392 } else { 3393 target_el = 2; 3394 } 3395 } else { 3396 target_el = exception_target_el(env); 3397 } 3398 take_exc = true; 3399 } 3400 3401 if (take_exc) { 3402 /* Construct FSR and FSC using same logic as arm_deliver_fault() */ 3403 if (target_el == 2 || arm_el_is_aa64(env, target_el) || 3404 arm_s1_regime_using_lpae_format(env, mmu_idx)) { 3405 fsr = arm_fi_to_lfsc(&fi); 3406 fsc = extract32(fsr, 0, 6); 3407 } else { 3408 fsr = arm_fi_to_sfsc(&fi); 3409 fsc = 0x3f; 3410 } 3411 /* 3412 * Report exception with ESR indicating a fault due to a 3413 * translation table walk for a cache maintenance instruction. 3414 */ 3415 syn = syn_data_abort_no_iss(current_el == target_el, 3416 fi.ea, 1, fi.s1ptw, 1, fsc); 3417 env->exception.vaddress = value; 3418 env->exception.fsr = fsr; 3419 raise_exception(env, EXCP_DATA_ABORT, syn, target_el); 3420 } 3421 } 3422 3423 if (is_a64(env)) { 3424 format64 = true; 3425 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 3426 /* 3427 * ATS1Cxx: 3428 * * TTBCR.EAE determines whether the result is returned using the 3429 * 32-bit or the 64-bit PAR format 3430 * * Instructions executed in Hyp mode always use the 64bit format 3431 * 3432 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 3433 * * The Non-secure TTBCR.EAE bit is set to 1 3434 * * The implementation includes EL2, and the value of HCR.VM is 1 3435 * 3436 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 3437 * 3438 * ATS1Hx always uses the 64bit format. 3439 */ 3440 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 3441 3442 if (arm_feature(env, ARM_FEATURE_EL2)) { 3443 if (mmu_idx == ARMMMUIdx_E10_0 || 3444 mmu_idx == ARMMMUIdx_E10_1 || 3445 mmu_idx == ARMMMUIdx_E10_1_PAN) { 3446 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 3447 } else { 3448 format64 |= arm_current_el(env) == 2; 3449 } 3450 } 3451 } 3452 3453 if (format64) { 3454 /* Create a 64-bit PAR */ 3455 par64 = (1 << 11); /* LPAE bit always set */ 3456 if (!ret) { 3457 par64 |= phys_addr & ~0xfffULL; 3458 if (!attrs.secure) { 3459 par64 |= (1 << 9); /* NS */ 3460 } 3461 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 3462 par64 |= cacheattrs.shareability << 7; /* SH */ 3463 } else { 3464 uint32_t fsr = arm_fi_to_lfsc(&fi); 3465 3466 par64 |= 1; /* F */ 3467 par64 |= (fsr & 0x3f) << 1; /* FS */ 3468 if (fi.stage2) { 3469 par64 |= (1 << 9); /* S */ 3470 } 3471 if (fi.s1ptw) { 3472 par64 |= (1 << 8); /* PTW */ 3473 } 3474 } 3475 } else { 3476 /* fsr is a DFSR/IFSR value for the short descriptor 3477 * translation table format (with WnR always clear). 3478 * Convert it to a 32-bit PAR. 3479 */ 3480 if (!ret) { 3481 /* We do not set any attribute bits in the PAR */ 3482 if (page_size == (1 << 24) 3483 && arm_feature(env, ARM_FEATURE_V7)) { 3484 par64 = (phys_addr & 0xff000000) | (1 << 1); 3485 } else { 3486 par64 = phys_addr & 0xfffff000; 3487 } 3488 if (!attrs.secure) { 3489 par64 |= (1 << 9); /* NS */ 3490 } 3491 } else { 3492 uint32_t fsr = arm_fi_to_sfsc(&fi); 3493 3494 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3495 ((fsr & 0xf) << 1) | 1; 3496 } 3497 } 3498 return par64; 3499 } 3500 3501 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3502 { 3503 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3504 uint64_t par64; 3505 ARMMMUIdx mmu_idx; 3506 int el = arm_current_el(env); 3507 bool secure = arm_is_secure_below_el3(env); 3508 3509 switch (ri->opc2 & 6) { 3510 case 0: 3511 /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ 3512 switch (el) { 3513 case 3: 3514 mmu_idx = ARMMMUIdx_SE3; 3515 break; 3516 case 2: 3517 g_assert(!secure); /* TODO: ARMv8.4-SecEL2 */ 3518 /* fall through */ 3519 case 1: 3520 if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { 3521 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN 3522 : ARMMMUIdx_Stage1_E1_PAN); 3523 } else { 3524 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; 3525 } 3526 break; 3527 default: 3528 g_assert_not_reached(); 3529 } 3530 break; 3531 case 2: 3532 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3533 switch (el) { 3534 case 3: 3535 mmu_idx = ARMMMUIdx_SE10_0; 3536 break; 3537 case 2: 3538 mmu_idx = ARMMMUIdx_Stage1_E0; 3539 break; 3540 case 1: 3541 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; 3542 break; 3543 default: 3544 g_assert_not_reached(); 3545 } 3546 break; 3547 case 4: 3548 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3549 mmu_idx = ARMMMUIdx_E10_1; 3550 break; 3551 case 6: 3552 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3553 mmu_idx = ARMMMUIdx_E10_0; 3554 break; 3555 default: 3556 g_assert_not_reached(); 3557 } 3558 3559 par64 = do_ats_write(env, value, access_type, mmu_idx); 3560 3561 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3562 } 3563 3564 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3565 uint64_t value) 3566 { 3567 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3568 uint64_t par64; 3569 3570 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2); 3571 3572 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3573 } 3574 3575 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3576 bool isread) 3577 { 3578 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 3579 return CP_ACCESS_TRAP; 3580 } 3581 return CP_ACCESS_OK; 3582 } 3583 3584 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3585 uint64_t value) 3586 { 3587 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3588 ARMMMUIdx mmu_idx; 3589 int secure = arm_is_secure_below_el3(env); 3590 3591 switch (ri->opc2 & 6) { 3592 case 0: 3593 switch (ri->opc1) { 3594 case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ 3595 if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { 3596 mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN 3597 : ARMMMUIdx_Stage1_E1_PAN); 3598 } else { 3599 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1; 3600 } 3601 break; 3602 case 4: /* AT S1E2R, AT S1E2W */ 3603 mmu_idx = ARMMMUIdx_E2; 3604 break; 3605 case 6: /* AT S1E3R, AT S1E3W */ 3606 mmu_idx = ARMMMUIdx_SE3; 3607 break; 3608 default: 3609 g_assert_not_reached(); 3610 } 3611 break; 3612 case 2: /* AT S1E0R, AT S1E0W */ 3613 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0; 3614 break; 3615 case 4: /* AT S12E1R, AT S12E1W */ 3616 mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1; 3617 break; 3618 case 6: /* AT S12E0R, AT S12E0W */ 3619 mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0; 3620 break; 3621 default: 3622 g_assert_not_reached(); 3623 } 3624 3625 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 3626 } 3627 #endif 3628 3629 static const ARMCPRegInfo vapa_cp_reginfo[] = { 3630 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 3631 .access = PL1_RW, .resetvalue = 0, 3632 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 3633 offsetoflow32(CPUARMState, cp15.par_ns) }, 3634 .writefn = par_write }, 3635 #ifndef CONFIG_USER_ONLY 3636 /* This underdecoding is safe because the reginfo is NO_RAW. */ 3637 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 3638 .access = PL1_W, .accessfn = ats_access, 3639 .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 3640 #endif 3641 REGINFO_SENTINEL 3642 }; 3643 3644 /* Return basic MPU access permission bits. */ 3645 static uint32_t simple_mpu_ap_bits(uint32_t val) 3646 { 3647 uint32_t ret; 3648 uint32_t mask; 3649 int i; 3650 ret = 0; 3651 mask = 3; 3652 for (i = 0; i < 16; i += 2) { 3653 ret |= (val >> i) & mask; 3654 mask <<= 2; 3655 } 3656 return ret; 3657 } 3658 3659 /* Pad basic MPU access permission bits to extended format. */ 3660 static uint32_t extended_mpu_ap_bits(uint32_t val) 3661 { 3662 uint32_t ret; 3663 uint32_t mask; 3664 int i; 3665 ret = 0; 3666 mask = 3; 3667 for (i = 0; i < 16; i += 2) { 3668 ret |= (val & mask) << i; 3669 mask <<= 2; 3670 } 3671 return ret; 3672 } 3673 3674 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3675 uint64_t value) 3676 { 3677 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3678 } 3679 3680 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3681 { 3682 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3683 } 3684 3685 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3686 uint64_t value) 3687 { 3688 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3689 } 3690 3691 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3692 { 3693 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3694 } 3695 3696 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3697 { 3698 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3699 3700 if (!u32p) { 3701 return 0; 3702 } 3703 3704 u32p += env->pmsav7.rnr[M_REG_NS]; 3705 return *u32p; 3706 } 3707 3708 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3709 uint64_t value) 3710 { 3711 ARMCPU *cpu = env_archcpu(env); 3712 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3713 3714 if (!u32p) { 3715 return; 3716 } 3717 3718 u32p += env->pmsav7.rnr[M_REG_NS]; 3719 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3720 *u32p = value; 3721 } 3722 3723 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3724 uint64_t value) 3725 { 3726 ARMCPU *cpu = env_archcpu(env); 3727 uint32_t nrgs = cpu->pmsav7_dregion; 3728 3729 if (value >= nrgs) { 3730 qemu_log_mask(LOG_GUEST_ERROR, 3731 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3732 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3733 return; 3734 } 3735 3736 raw_write(env, ri, value); 3737 } 3738 3739 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3740 /* Reset for all these registers is handled in arm_cpu_reset(), 3741 * because the PMSAv7 is also used by M-profile CPUs, which do 3742 * not register cpregs but still need the state to be reset. 3743 */ 3744 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 3745 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3746 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 3747 .readfn = pmsav7_read, .writefn = pmsav7_write, 3748 .resetfn = arm_cp_reset_ignore }, 3749 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 3750 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3751 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 3752 .readfn = pmsav7_read, .writefn = pmsav7_write, 3753 .resetfn = arm_cp_reset_ignore }, 3754 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 3755 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3756 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 3757 .readfn = pmsav7_read, .writefn = pmsav7_write, 3758 .resetfn = arm_cp_reset_ignore }, 3759 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 3760 .access = PL1_RW, 3761 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 3762 .writefn = pmsav7_rgnr_write, 3763 .resetfn = arm_cp_reset_ignore }, 3764 REGINFO_SENTINEL 3765 }; 3766 3767 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 3768 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3769 .access = PL1_RW, .type = ARM_CP_ALIAS, 3770 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3771 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 3772 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3773 .access = PL1_RW, .type = ARM_CP_ALIAS, 3774 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3775 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 3776 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 3777 .access = PL1_RW, 3778 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3779 .resetvalue = 0, }, 3780 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 3781 .access = PL1_RW, 3782 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3783 .resetvalue = 0, }, 3784 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 3785 .access = PL1_RW, 3786 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 3787 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 3788 .access = PL1_RW, 3789 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 3790 /* Protection region base and size registers */ 3791 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 3792 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3793 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 3794 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 3795 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3796 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 3797 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 3798 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3799 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 3800 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 3801 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3802 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 3803 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 3804 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3805 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 3806 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 3807 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3808 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 3809 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 3810 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3811 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 3812 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 3813 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3814 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 3815 REGINFO_SENTINEL 3816 }; 3817 3818 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 3819 uint64_t value) 3820 { 3821 TCR *tcr = raw_ptr(env, ri); 3822 int maskshift = extract32(value, 0, 3); 3823 3824 if (!arm_feature(env, ARM_FEATURE_V8)) { 3825 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 3826 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 3827 * using Long-desciptor translation table format */ 3828 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 3829 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 3830 /* In an implementation that includes the Security Extensions 3831 * TTBCR has additional fields PD0 [4] and PD1 [5] for 3832 * Short-descriptor translation table format. 3833 */ 3834 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 3835 } else { 3836 value &= TTBCR_N; 3837 } 3838 } 3839 3840 /* Update the masks corresponding to the TCR bank being written 3841 * Note that we always calculate mask and base_mask, but 3842 * they are only used for short-descriptor tables (ie if EAE is 0); 3843 * for long-descriptor tables the TCR fields are used differently 3844 * and the mask and base_mask values are meaningless. 3845 */ 3846 tcr->raw_tcr = value; 3847 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 3848 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 3849 } 3850 3851 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3852 uint64_t value) 3853 { 3854 ARMCPU *cpu = env_archcpu(env); 3855 TCR *tcr = raw_ptr(env, ri); 3856 3857 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3858 /* With LPAE the TTBCR could result in a change of ASID 3859 * via the TTBCR.A1 bit, so do a TLB flush. 3860 */ 3861 tlb_flush(CPU(cpu)); 3862 } 3863 /* Preserve the high half of TCR_EL1, set via TTBCR2. */ 3864 value = deposit64(tcr->raw_tcr, 0, 32, value); 3865 vmsa_ttbcr_raw_write(env, ri, value); 3866 } 3867 3868 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3869 { 3870 TCR *tcr = raw_ptr(env, ri); 3871 3872 /* Reset both the TCR as well as the masks corresponding to the bank of 3873 * the TCR being reset. 3874 */ 3875 tcr->raw_tcr = 0; 3876 tcr->mask = 0; 3877 tcr->base_mask = 0xffffc000u; 3878 } 3879 3880 static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, 3881 uint64_t value) 3882 { 3883 ARMCPU *cpu = env_archcpu(env); 3884 TCR *tcr = raw_ptr(env, ri); 3885 3886 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 3887 tlb_flush(CPU(cpu)); 3888 tcr->raw_tcr = value; 3889 } 3890 3891 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3892 uint64_t value) 3893 { 3894 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3895 if (cpreg_field_is_64bit(ri) && 3896 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3897 ARMCPU *cpu = env_archcpu(env); 3898 tlb_flush(CPU(cpu)); 3899 } 3900 raw_write(env, ri, value); 3901 } 3902 3903 static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3904 uint64_t value) 3905 { 3906 /* 3907 * If we are running with E2&0 regime, then an ASID is active. 3908 * Flush if that might be changing. Note we're not checking 3909 * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that 3910 * holds the active ASID, only checking the field that might. 3911 */ 3912 if (extract64(raw_read(env, ri) ^ value, 48, 16) && 3913 (arm_hcr_el2_eff(env) & HCR_E2H)) { 3914 tlb_flush_by_mmuidx(env_cpu(env), 3915 ARMMMUIdxBit_E20_2 | 3916 ARMMMUIdxBit_E20_2_PAN | 3917 ARMMMUIdxBit_E20_0); 3918 } 3919 raw_write(env, ri, value); 3920 } 3921 3922 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3923 uint64_t value) 3924 { 3925 ARMCPU *cpu = env_archcpu(env); 3926 CPUState *cs = CPU(cpu); 3927 3928 /* 3929 * A change in VMID to the stage2 page table (Stage2) invalidates 3930 * the combined stage 1&2 tlbs (EL10_1 and EL10_0). 3931 */ 3932 if (raw_read(env, ri) != value) { 3933 tlb_flush_by_mmuidx(cs, 3934 ARMMMUIdxBit_E10_1 | 3935 ARMMMUIdxBit_E10_1_PAN | 3936 ARMMMUIdxBit_E10_0 | 3937 ARMMMUIdxBit_Stage2); 3938 raw_write(env, ri, value); 3939 } 3940 } 3941 3942 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 3943 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3944 .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, 3945 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 3946 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 3947 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3948 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 3949 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 3950 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 3951 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 3952 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 3953 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 3954 offsetof(CPUARMState, cp15.dfar_ns) } }, 3955 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 3956 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 3957 .access = PL1_RW, .accessfn = access_tvm_trvm, 3958 .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 3959 .resetvalue = 0, }, 3960 REGINFO_SENTINEL 3961 }; 3962 3963 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 3964 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 3965 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 3966 .access = PL1_RW, .accessfn = access_tvm_trvm, 3967 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 3968 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 3969 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 3970 .access = PL1_RW, .accessfn = access_tvm_trvm, 3971 .writefn = vmsa_ttbr_write, .resetvalue = 0, 3972 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3973 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 3974 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 3975 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 3976 .access = PL1_RW, .accessfn = access_tvm_trvm, 3977 .writefn = vmsa_ttbr_write, .resetvalue = 0, 3978 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3979 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 3980 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 3981 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3982 .access = PL1_RW, .accessfn = access_tvm_trvm, 3983 .writefn = vmsa_tcr_el12_write, 3984 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 3985 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 3986 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3987 .access = PL1_RW, .accessfn = access_tvm_trvm, 3988 .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 3989 .raw_writefn = vmsa_ttbcr_raw_write, 3990 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 3991 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 3992 REGINFO_SENTINEL 3993 }; 3994 3995 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing 3996 * qemu tlbs nor adjusting cached masks. 3997 */ 3998 static const ARMCPRegInfo ttbcr2_reginfo = { 3999 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 4000 .access = PL1_RW, .accessfn = access_tvm_trvm, 4001 .type = ARM_CP_ALIAS, 4002 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]), 4003 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) }, 4004 }; 4005 4006 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 4007 uint64_t value) 4008 { 4009 env->cp15.c15_ticonfig = value & 0xe7; 4010 /* The OS_TYPE bit in this register changes the reported CPUID! */ 4011 env->cp15.c0_cpuid = (value & (1 << 5)) ? 4012 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 4013 } 4014 4015 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 4016 uint64_t value) 4017 { 4018 env->cp15.c15_threadid = value & 0xffff; 4019 } 4020 4021 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 4022 uint64_t value) 4023 { 4024 /* Wait-for-interrupt (deprecated) */ 4025 cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 4026 } 4027 4028 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 4029 uint64_t value) 4030 { 4031 /* On OMAP there are registers indicating the max/min index of dcache lines 4032 * containing a dirty line; cache flush operations have to reset these. 4033 */ 4034 env->cp15.c15_i_max = 0x000; 4035 env->cp15.c15_i_min = 0xff0; 4036 } 4037 4038 static const ARMCPRegInfo omap_cp_reginfo[] = { 4039 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 4040 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 4041 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 4042 .resetvalue = 0, }, 4043 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 4044 .access = PL1_RW, .type = ARM_CP_NOP }, 4045 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 4046 .access = PL1_RW, 4047 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 4048 .writefn = omap_ticonfig_write }, 4049 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 4050 .access = PL1_RW, 4051 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 4052 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 4053 .access = PL1_RW, .resetvalue = 0xff0, 4054 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 4055 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 4056 .access = PL1_RW, 4057 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 4058 .writefn = omap_threadid_write }, 4059 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 4060 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4061 .type = ARM_CP_NO_RAW, 4062 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 4063 /* TODO: Peripheral port remap register: 4064 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 4065 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 4066 * when MMU is off. 4067 */ 4068 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 4069 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 4070 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 4071 .writefn = omap_cachemaint_write }, 4072 { .name = "C9", .cp = 15, .crn = 9, 4073 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 4074 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 4075 REGINFO_SENTINEL 4076 }; 4077 4078 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 4079 uint64_t value) 4080 { 4081 env->cp15.c15_cpar = value & 0x3fff; 4082 } 4083 4084 static const ARMCPRegInfo xscale_cp_reginfo[] = { 4085 { .name = "XSCALE_CPAR", 4086 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 4087 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 4088 .writefn = xscale_cpar_write, }, 4089 { .name = "XSCALE_AUXCR", 4090 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 4091 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 4092 .resetvalue = 0, }, 4093 /* XScale specific cache-lockdown: since we have no cache we NOP these 4094 * and hope the guest does not really rely on cache behaviour. 4095 */ 4096 { .name = "XSCALE_LOCK_ICACHE_LINE", 4097 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 4098 .access = PL1_W, .type = ARM_CP_NOP }, 4099 { .name = "XSCALE_UNLOCK_ICACHE", 4100 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 4101 .access = PL1_W, .type = ARM_CP_NOP }, 4102 { .name = "XSCALE_DCACHE_LOCK", 4103 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 4104 .access = PL1_RW, .type = ARM_CP_NOP }, 4105 { .name = "XSCALE_UNLOCK_DCACHE", 4106 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 4107 .access = PL1_W, .type = ARM_CP_NOP }, 4108 REGINFO_SENTINEL 4109 }; 4110 4111 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 4112 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 4113 * implementation of this implementation-defined space. 4114 * Ideally this should eventually disappear in favour of actually 4115 * implementing the correct behaviour for all cores. 4116 */ 4117 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 4118 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4119 .access = PL1_RW, 4120 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 4121 .resetvalue = 0 }, 4122 REGINFO_SENTINEL 4123 }; 4124 4125 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 4126 /* Cache status: RAZ because we have no cache so it's always clean */ 4127 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 4128 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4129 .resetvalue = 0 }, 4130 REGINFO_SENTINEL 4131 }; 4132 4133 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 4134 /* We never have a a block transfer operation in progress */ 4135 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 4136 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4137 .resetvalue = 0 }, 4138 /* The cache ops themselves: these all NOP for QEMU */ 4139 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 4140 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4141 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 4142 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4143 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 4144 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4145 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 4146 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4147 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 4148 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4149 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 4150 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 4151 REGINFO_SENTINEL 4152 }; 4153 4154 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 4155 /* The cache test-and-clean instructions always return (1 << 30) 4156 * to indicate that there are no dirty cache lines. 4157 */ 4158 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 4159 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4160 .resetvalue = (1 << 30) }, 4161 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 4162 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 4163 .resetvalue = (1 << 30) }, 4164 REGINFO_SENTINEL 4165 }; 4166 4167 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 4168 /* Ignore ReadBuffer accesses */ 4169 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 4170 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 4171 .access = PL1_RW, .resetvalue = 0, 4172 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 4173 REGINFO_SENTINEL 4174 }; 4175 4176 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4177 { 4178 ARMCPU *cpu = env_archcpu(env); 4179 unsigned int cur_el = arm_current_el(env); 4180 bool secure = arm_is_secure(env); 4181 4182 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 4183 return env->cp15.vpidr_el2; 4184 } 4185 return raw_read(env, ri); 4186 } 4187 4188 static uint64_t mpidr_read_val(CPUARMState *env) 4189 { 4190 ARMCPU *cpu = env_archcpu(env); 4191 uint64_t mpidr = cpu->mp_affinity; 4192 4193 if (arm_feature(env, ARM_FEATURE_V7MP)) { 4194 mpidr |= (1U << 31); 4195 /* Cores which are uniprocessor (non-coherent) 4196 * but still implement the MP extensions set 4197 * bit 30. (For instance, Cortex-R5). 4198 */ 4199 if (cpu->mp_is_up) { 4200 mpidr |= (1u << 30); 4201 } 4202 } 4203 return mpidr; 4204 } 4205 4206 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4207 { 4208 unsigned int cur_el = arm_current_el(env); 4209 bool secure = arm_is_secure(env); 4210 4211 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 4212 return env->cp15.vmpidr_el2; 4213 } 4214 return mpidr_read_val(env); 4215 } 4216 4217 static const ARMCPRegInfo lpae_cp_reginfo[] = { 4218 /* NOP AMAIR0/1 */ 4219 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 4220 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 4221 .access = PL1_RW, .accessfn = access_tvm_trvm, 4222 .type = ARM_CP_CONST, .resetvalue = 0 }, 4223 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 4224 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 4225 .access = PL1_RW, .accessfn = access_tvm_trvm, 4226 .type = ARM_CP_CONST, .resetvalue = 0 }, 4227 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 4228 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 4229 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 4230 offsetof(CPUARMState, cp15.par_ns)} }, 4231 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 4232 .access = PL1_RW, .accessfn = access_tvm_trvm, 4233 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4234 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 4235 offsetof(CPUARMState, cp15.ttbr0_ns) }, 4236 .writefn = vmsa_ttbr_write, }, 4237 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 4238 .access = PL1_RW, .accessfn = access_tvm_trvm, 4239 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4240 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 4241 offsetof(CPUARMState, cp15.ttbr1_ns) }, 4242 .writefn = vmsa_ttbr_write, }, 4243 REGINFO_SENTINEL 4244 }; 4245 4246 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4247 { 4248 return vfp_get_fpcr(env); 4249 } 4250 4251 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4252 uint64_t value) 4253 { 4254 vfp_set_fpcr(env, value); 4255 } 4256 4257 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 4258 { 4259 return vfp_get_fpsr(env); 4260 } 4261 4262 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4263 uint64_t value) 4264 { 4265 vfp_set_fpsr(env, value); 4266 } 4267 4268 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 4269 bool isread) 4270 { 4271 if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { 4272 return CP_ACCESS_TRAP; 4273 } 4274 return CP_ACCESS_OK; 4275 } 4276 4277 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 4278 uint64_t value) 4279 { 4280 env->daif = value & PSTATE_DAIF; 4281 } 4282 4283 static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) 4284 { 4285 return env->pstate & PSTATE_PAN; 4286 } 4287 4288 static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, 4289 uint64_t value) 4290 { 4291 env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); 4292 } 4293 4294 static const ARMCPRegInfo pan_reginfo = { 4295 .name = "PAN", .state = ARM_CP_STATE_AA64, 4296 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, 4297 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4298 .readfn = aa64_pan_read, .writefn = aa64_pan_write 4299 }; 4300 4301 static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) 4302 { 4303 return env->pstate & PSTATE_UAO; 4304 } 4305 4306 static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, 4307 uint64_t value) 4308 { 4309 env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); 4310 } 4311 4312 static const ARMCPRegInfo uao_reginfo = { 4313 .name = "UAO", .state = ARM_CP_STATE_AA64, 4314 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, 4315 .type = ARM_CP_NO_RAW, .access = PL1_RW, 4316 .readfn = aa64_uao_read, .writefn = aa64_uao_write 4317 }; 4318 4319 static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, 4320 const ARMCPRegInfo *ri, 4321 bool isread) 4322 { 4323 /* Cache invalidate/clean to Point of Coherency or Persistence... */ 4324 switch (arm_current_el(env)) { 4325 case 0: 4326 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ 4327 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4328 return CP_ACCESS_TRAP; 4329 } 4330 /* fall through */ 4331 case 1: 4332 /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ 4333 if (arm_hcr_el2_eff(env) & HCR_TPCP) { 4334 return CP_ACCESS_TRAP_EL2; 4335 } 4336 break; 4337 } 4338 return CP_ACCESS_OK; 4339 } 4340 4341 static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env, 4342 const ARMCPRegInfo *ri, 4343 bool isread) 4344 { 4345 /* Cache invalidate/clean to Point of Unification... */ 4346 switch (arm_current_el(env)) { 4347 case 0: 4348 /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ 4349 if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { 4350 return CP_ACCESS_TRAP; 4351 } 4352 /* fall through */ 4353 case 1: 4354 /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set. */ 4355 if (arm_hcr_el2_eff(env) & HCR_TPU) { 4356 return CP_ACCESS_TRAP_EL2; 4357 } 4358 break; 4359 } 4360 return CP_ACCESS_OK; 4361 } 4362 4363 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 4364 * Page D4-1736 (DDI0487A.b) 4365 */ 4366 4367 static int vae1_tlbmask(CPUARMState *env) 4368 { 4369 /* Since we exclude secure first, we may read HCR_EL2 directly. */ 4370 if (arm_is_secure_below_el3(env)) { 4371 return ARMMMUIdxBit_SE10_1 | 4372 ARMMMUIdxBit_SE10_1_PAN | 4373 ARMMMUIdxBit_SE10_0; 4374 } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) 4375 == (HCR_E2H | HCR_TGE)) { 4376 return ARMMMUIdxBit_E20_2 | 4377 ARMMMUIdxBit_E20_2_PAN | 4378 ARMMMUIdxBit_E20_0; 4379 } else { 4380 return ARMMMUIdxBit_E10_1 | 4381 ARMMMUIdxBit_E10_1_PAN | 4382 ARMMMUIdxBit_E10_0; 4383 } 4384 } 4385 4386 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4387 uint64_t value) 4388 { 4389 CPUState *cs = env_cpu(env); 4390 int mask = vae1_tlbmask(env); 4391 4392 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4393 } 4394 4395 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4396 uint64_t value) 4397 { 4398 CPUState *cs = env_cpu(env); 4399 int mask = vae1_tlbmask(env); 4400 4401 if (tlb_force_broadcast(env)) { 4402 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4403 } else { 4404 tlb_flush_by_mmuidx(cs, mask); 4405 } 4406 } 4407 4408 static int alle1_tlbmask(CPUARMState *env) 4409 { 4410 /* 4411 * Note that the 'ALL' scope must invalidate both stage 1 and 4412 * stage 2 translations, whereas most other scopes only invalidate 4413 * stage 1 translations. 4414 */ 4415 if (arm_is_secure_below_el3(env)) { 4416 return ARMMMUIdxBit_SE10_1 | 4417 ARMMMUIdxBit_SE10_1_PAN | 4418 ARMMMUIdxBit_SE10_0; 4419 } else if (arm_feature(env, ARM_FEATURE_EL2)) { 4420 return ARMMMUIdxBit_E10_1 | 4421 ARMMMUIdxBit_E10_1_PAN | 4422 ARMMMUIdxBit_E10_0 | 4423 ARMMMUIdxBit_Stage2; 4424 } else { 4425 return ARMMMUIdxBit_E10_1 | 4426 ARMMMUIdxBit_E10_1_PAN | 4427 ARMMMUIdxBit_E10_0; 4428 } 4429 } 4430 4431 static int e2_tlbmask(CPUARMState *env) 4432 { 4433 /* TODO: ARMv8.4-SecEL2 */ 4434 return ARMMMUIdxBit_E20_0 | 4435 ARMMMUIdxBit_E20_2 | 4436 ARMMMUIdxBit_E20_2_PAN | 4437 ARMMMUIdxBit_E2; 4438 } 4439 4440 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4441 uint64_t value) 4442 { 4443 CPUState *cs = env_cpu(env); 4444 int mask = alle1_tlbmask(env); 4445 4446 tlb_flush_by_mmuidx(cs, mask); 4447 } 4448 4449 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4450 uint64_t value) 4451 { 4452 CPUState *cs = env_cpu(env); 4453 int mask = e2_tlbmask(env); 4454 4455 tlb_flush_by_mmuidx(cs, mask); 4456 } 4457 4458 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4459 uint64_t value) 4460 { 4461 ARMCPU *cpu = env_archcpu(env); 4462 CPUState *cs = CPU(cpu); 4463 4464 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3); 4465 } 4466 4467 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4468 uint64_t value) 4469 { 4470 CPUState *cs = env_cpu(env); 4471 int mask = alle1_tlbmask(env); 4472 4473 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4474 } 4475 4476 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4477 uint64_t value) 4478 { 4479 CPUState *cs = env_cpu(env); 4480 int mask = e2_tlbmask(env); 4481 4482 tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); 4483 } 4484 4485 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4486 uint64_t value) 4487 { 4488 CPUState *cs = env_cpu(env); 4489 4490 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3); 4491 } 4492 4493 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 4494 uint64_t value) 4495 { 4496 /* Invalidate by VA, EL2 4497 * Currently handles both VAE2 and VALE2, since we don't support 4498 * flush-last-level-only. 4499 */ 4500 CPUState *cs = env_cpu(env); 4501 int mask = e2_tlbmask(env); 4502 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4503 4504 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4505 } 4506 4507 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 4508 uint64_t value) 4509 { 4510 /* Invalidate by VA, EL3 4511 * Currently handles both VAE3 and VALE3, since we don't support 4512 * flush-last-level-only. 4513 */ 4514 ARMCPU *cpu = env_archcpu(env); 4515 CPUState *cs = CPU(cpu); 4516 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4517 4518 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3); 4519 } 4520 4521 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4522 uint64_t value) 4523 { 4524 CPUState *cs = env_cpu(env); 4525 int mask = vae1_tlbmask(env); 4526 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4527 4528 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); 4529 } 4530 4531 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4532 uint64_t value) 4533 { 4534 /* Invalidate by VA, EL1&0 (AArch64 version). 4535 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 4536 * since we don't support flush-for-specific-ASID-only or 4537 * flush-last-level-only. 4538 */ 4539 CPUState *cs = env_cpu(env); 4540 int mask = vae1_tlbmask(env); 4541 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4542 4543 if (tlb_force_broadcast(env)) { 4544 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); 4545 } else { 4546 tlb_flush_page_by_mmuidx(cs, pageaddr, mask); 4547 } 4548 } 4549 4550 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4551 uint64_t value) 4552 { 4553 CPUState *cs = env_cpu(env); 4554 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4555 4556 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4557 ARMMMUIdxBit_E2); 4558 } 4559 4560 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4561 uint64_t value) 4562 { 4563 CPUState *cs = env_cpu(env); 4564 uint64_t pageaddr = sextract64(value << 12, 0, 56); 4565 4566 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4567 ARMMMUIdxBit_SE3); 4568 } 4569 4570 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 4571 uint64_t value) 4572 { 4573 /* Invalidate by IPA. This has to invalidate any structures that 4574 * contain only stage 2 translation information, but does not need 4575 * to apply to structures that contain combined stage 1 and stage 2 4576 * translation information. 4577 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 4578 */ 4579 ARMCPU *cpu = env_archcpu(env); 4580 CPUState *cs = CPU(cpu); 4581 uint64_t pageaddr; 4582 4583 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4584 return; 4585 } 4586 4587 pageaddr = sextract64(value << 12, 0, 48); 4588 4589 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); 4590 } 4591 4592 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4593 uint64_t value) 4594 { 4595 CPUState *cs = env_cpu(env); 4596 uint64_t pageaddr; 4597 4598 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4599 return; 4600 } 4601 4602 pageaddr = sextract64(value << 12, 0, 48); 4603 4604 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4605 ARMMMUIdxBit_Stage2); 4606 } 4607 4608 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4609 bool isread) 4610 { 4611 int cur_el = arm_current_el(env); 4612 4613 if (cur_el < 2) { 4614 uint64_t hcr = arm_hcr_el2_eff(env); 4615 4616 if (cur_el == 0) { 4617 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 4618 if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { 4619 return CP_ACCESS_TRAP_EL2; 4620 } 4621 } else { 4622 if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4623 return CP_ACCESS_TRAP; 4624 } 4625 if (hcr & HCR_TDZ) { 4626 return CP_ACCESS_TRAP_EL2; 4627 } 4628 } 4629 } else if (hcr & HCR_TDZ) { 4630 return CP_ACCESS_TRAP_EL2; 4631 } 4632 } 4633 return CP_ACCESS_OK; 4634 } 4635 4636 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4637 { 4638 ARMCPU *cpu = env_archcpu(env); 4639 int dzp_bit = 1 << 4; 4640 4641 /* DZP indicates whether DC ZVA access is allowed */ 4642 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4643 dzp_bit = 0; 4644 } 4645 return cpu->dcz_blocksize | dzp_bit; 4646 } 4647 4648 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4649 bool isread) 4650 { 4651 if (!(env->pstate & PSTATE_SP)) { 4652 /* Access to SP_EL0 is undefined if it's being used as 4653 * the stack pointer. 4654 */ 4655 return CP_ACCESS_TRAP_UNCATEGORIZED; 4656 } 4657 return CP_ACCESS_OK; 4658 } 4659 4660 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4661 { 4662 return env->pstate & PSTATE_SP; 4663 } 4664 4665 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 4666 { 4667 update_spsel(env, val); 4668 } 4669 4670 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4671 uint64_t value) 4672 { 4673 ARMCPU *cpu = env_archcpu(env); 4674 4675 if (raw_read(env, ri) == value) { 4676 /* Skip the TLB flush if nothing actually changed; Linux likes 4677 * to do a lot of pointless SCTLR writes. 4678 */ 4679 return; 4680 } 4681 4682 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 4683 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 4684 value &= ~SCTLR_M; 4685 } 4686 4687 raw_write(env, ri, value); 4688 /* ??? Lots of these bits are not implemented. */ 4689 /* This may enable/disable the MMU, so do a TLB flush. */ 4690 tlb_flush(CPU(cpu)); 4691 4692 if (ri->type & ARM_CP_SUPPRESS_TB_END) { 4693 /* 4694 * Normally we would always end the TB on an SCTLR write; see the 4695 * comment in ARMCPRegInfo sctlr initialization below for why Xscale 4696 * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild 4697 * of hflags from the translator, so do it here. 4698 */ 4699 arm_rebuild_hflags(env); 4700 } 4701 } 4702 4703 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 4704 bool isread) 4705 { 4706 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 4707 return CP_ACCESS_TRAP_FP_EL2; 4708 } 4709 if (env->cp15.cptr_el[3] & CPTR_TFP) { 4710 return CP_ACCESS_TRAP_FP_EL3; 4711 } 4712 return CP_ACCESS_OK; 4713 } 4714 4715 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4716 uint64_t value) 4717 { 4718 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 4719 } 4720 4721 static const ARMCPRegInfo v8_cp_reginfo[] = { 4722 /* Minimal set of EL0-visible registers. This will need to be expanded 4723 * significantly for system emulation of AArch64 CPUs. 4724 */ 4725 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 4726 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 4727 .access = PL0_RW, .type = ARM_CP_NZCV }, 4728 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 4729 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 4730 .type = ARM_CP_NO_RAW, 4731 .access = PL0_RW, .accessfn = aa64_daif_access, 4732 .fieldoffset = offsetof(CPUARMState, daif), 4733 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 4734 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 4735 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 4736 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4737 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 4738 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 4739 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 4740 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4741 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 4742 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 4743 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 4744 .access = PL0_R, .type = ARM_CP_NO_RAW, 4745 .readfn = aa64_dczid_read }, 4746 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 4747 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 4748 .access = PL0_W, .type = ARM_CP_DC_ZVA, 4749 #ifndef CONFIG_USER_ONLY 4750 /* Avoid overhead of an access check that always passes in user-mode */ 4751 .accessfn = aa64_zva_access, 4752 #endif 4753 }, 4754 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 4755 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 4756 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 4757 /* Cache ops: all NOPs since we don't emulate caches */ 4758 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 4759 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4760 .access = PL1_W, .type = ARM_CP_NOP, 4761 .accessfn = aa64_cacheop_pou_access }, 4762 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 4763 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4764 .access = PL1_W, .type = ARM_CP_NOP, 4765 .accessfn = aa64_cacheop_pou_access }, 4766 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 4767 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 4768 .access = PL0_W, .type = ARM_CP_NOP, 4769 .accessfn = aa64_cacheop_pou_access }, 4770 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 4771 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4772 .access = PL1_W, .accessfn = aa64_cacheop_poc_access, 4773 .type = ARM_CP_NOP }, 4774 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 4775 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4776 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4777 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 4778 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 4779 .access = PL0_W, .type = ARM_CP_NOP, 4780 .accessfn = aa64_cacheop_poc_access }, 4781 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 4782 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4783 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4784 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 4785 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 4786 .access = PL0_W, .type = ARM_CP_NOP, 4787 .accessfn = aa64_cacheop_pou_access }, 4788 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 4789 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 4790 .access = PL0_W, .type = ARM_CP_NOP, 4791 .accessfn = aa64_cacheop_poc_access }, 4792 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 4793 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4794 .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, 4795 /* TLBI operations */ 4796 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 4797 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 4798 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4799 .writefn = tlbi_aa64_vmalle1is_write }, 4800 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 4801 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 4802 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4803 .writefn = tlbi_aa64_vae1is_write }, 4804 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 4805 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 4806 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4807 .writefn = tlbi_aa64_vmalle1is_write }, 4808 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 4809 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 4810 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4811 .writefn = tlbi_aa64_vae1is_write }, 4812 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 4813 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4814 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4815 .writefn = tlbi_aa64_vae1is_write }, 4816 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 4817 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4818 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4819 .writefn = tlbi_aa64_vae1is_write }, 4820 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 4821 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 4822 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4823 .writefn = tlbi_aa64_vmalle1_write }, 4824 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 4825 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 4826 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4827 .writefn = tlbi_aa64_vae1_write }, 4828 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 4829 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 4830 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4831 .writefn = tlbi_aa64_vmalle1_write }, 4832 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 4833 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 4834 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4835 .writefn = tlbi_aa64_vae1_write }, 4836 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 4837 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4838 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4839 .writefn = tlbi_aa64_vae1_write }, 4840 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 4841 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4842 .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, 4843 .writefn = tlbi_aa64_vae1_write }, 4844 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 4845 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4846 .access = PL2_W, .type = ARM_CP_NO_RAW, 4847 .writefn = tlbi_aa64_ipas2e1is_write }, 4848 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 4849 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4850 .access = PL2_W, .type = ARM_CP_NO_RAW, 4851 .writefn = tlbi_aa64_ipas2e1is_write }, 4852 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 4853 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4854 .access = PL2_W, .type = ARM_CP_NO_RAW, 4855 .writefn = tlbi_aa64_alle1is_write }, 4856 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 4857 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 4858 .access = PL2_W, .type = ARM_CP_NO_RAW, 4859 .writefn = tlbi_aa64_alle1is_write }, 4860 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 4861 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4862 .access = PL2_W, .type = ARM_CP_NO_RAW, 4863 .writefn = tlbi_aa64_ipas2e1_write }, 4864 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 4865 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4866 .access = PL2_W, .type = ARM_CP_NO_RAW, 4867 .writefn = tlbi_aa64_ipas2e1_write }, 4868 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 4869 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4870 .access = PL2_W, .type = ARM_CP_NO_RAW, 4871 .writefn = tlbi_aa64_alle1_write }, 4872 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 4873 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 4874 .access = PL2_W, .type = ARM_CP_NO_RAW, 4875 .writefn = tlbi_aa64_alle1is_write }, 4876 #ifndef CONFIG_USER_ONLY 4877 /* 64 bit address translation operations */ 4878 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 4879 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 4880 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4881 .writefn = ats_write64 }, 4882 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 4883 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 4884 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4885 .writefn = ats_write64 }, 4886 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 4887 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 4888 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4889 .writefn = ats_write64 }, 4890 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 4891 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 4892 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4893 .writefn = ats_write64 }, 4894 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 4895 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 4896 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4897 .writefn = ats_write64 }, 4898 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 4899 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 4900 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4901 .writefn = ats_write64 }, 4902 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 4903 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 4904 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4905 .writefn = ats_write64 }, 4906 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 4907 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 4908 .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4909 .writefn = ats_write64 }, 4910 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 4911 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 4912 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 4913 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4914 .writefn = ats_write64 }, 4915 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 4916 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 4917 .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 4918 .writefn = ats_write64 }, 4919 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 4920 .type = ARM_CP_ALIAS, 4921 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 4922 .access = PL1_RW, .resetvalue = 0, 4923 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 4924 .writefn = par_write }, 4925 #endif 4926 /* TLB invalidate last level of translation table walk */ 4927 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4928 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4929 .writefn = tlbimva_is_write }, 4930 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4931 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4932 .writefn = tlbimvaa_is_write }, 4933 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4934 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4935 .writefn = tlbimva_write }, 4936 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4937 .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, 4938 .writefn = tlbimvaa_write }, 4939 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4940 .type = ARM_CP_NO_RAW, .access = PL2_W, 4941 .writefn = tlbimva_hyp_write }, 4942 { .name = "TLBIMVALHIS", 4943 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4944 .type = ARM_CP_NO_RAW, .access = PL2_W, 4945 .writefn = tlbimva_hyp_is_write }, 4946 { .name = "TLBIIPAS2", 4947 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4948 .type = ARM_CP_NO_RAW, .access = PL2_W, 4949 .writefn = tlbiipas2_write }, 4950 { .name = "TLBIIPAS2IS", 4951 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4952 .type = ARM_CP_NO_RAW, .access = PL2_W, 4953 .writefn = tlbiipas2_is_write }, 4954 { .name = "TLBIIPAS2L", 4955 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4956 .type = ARM_CP_NO_RAW, .access = PL2_W, 4957 .writefn = tlbiipas2_write }, 4958 { .name = "TLBIIPAS2LIS", 4959 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4960 .type = ARM_CP_NO_RAW, .access = PL2_W, 4961 .writefn = tlbiipas2_is_write }, 4962 /* 32 bit cache operations */ 4963 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4964 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 4965 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 4966 .type = ARM_CP_NOP, .access = PL1_W }, 4967 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4968 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 4969 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 4970 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 4971 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 4972 .type = ARM_CP_NOP, .access = PL1_W }, 4973 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 4974 .type = ARM_CP_NOP, .access = PL1_W }, 4975 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4976 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 4977 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4978 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 4979 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 4980 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 4981 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4982 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 4983 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 4984 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access }, 4985 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 4986 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, 4987 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4988 .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, 4989 /* MMU Domain access control / MPU write buffer control */ 4990 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 4991 .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, 4992 .writefn = dacr_write, .raw_writefn = raw_write, 4993 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 4994 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 4995 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 4996 .type = ARM_CP_ALIAS, 4997 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 4998 .access = PL1_RW, 4999 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 5000 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 5001 .type = ARM_CP_ALIAS, 5002 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 5003 .access = PL1_RW, 5004 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 5005 /* We rely on the access checks not allowing the guest to write to the 5006 * state field when SPSel indicates that it's being used as the stack 5007 * pointer. 5008 */ 5009 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 5010 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 5011 .access = PL1_RW, .accessfn = sp_el0_access, 5012 .type = ARM_CP_ALIAS, 5013 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 5014 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 5015 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 5016 .access = PL2_RW, .type = ARM_CP_ALIAS, 5017 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 5018 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 5019 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 5020 .type = ARM_CP_NO_RAW, 5021 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 5022 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 5023 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 5024 .type = ARM_CP_ALIAS, 5025 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 5026 .access = PL2_RW, .accessfn = fpexc32_access }, 5027 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 5028 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 5029 .access = PL2_RW, .resetvalue = 0, 5030 .writefn = dacr_write, .raw_writefn = raw_write, 5031 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 5032 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 5033 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 5034 .access = PL2_RW, .resetvalue = 0, 5035 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 5036 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 5037 .type = ARM_CP_ALIAS, 5038 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 5039 .access = PL2_RW, 5040 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 5041 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 5042 .type = ARM_CP_ALIAS, 5043 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 5044 .access = PL2_RW, 5045 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 5046 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 5047 .type = ARM_CP_ALIAS, 5048 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 5049 .access = PL2_RW, 5050 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 5051 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 5052 .type = ARM_CP_ALIAS, 5053 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 5054 .access = PL2_RW, 5055 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 5056 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 5057 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 5058 .resetvalue = 0, 5059 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 5060 { .name = "SDCR", .type = ARM_CP_ALIAS, 5061 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 5062 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5063 .writefn = sdcr_write, 5064 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 5065 REGINFO_SENTINEL 5066 }; 5067 5068 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 5069 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 5070 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5071 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5072 .access = PL2_RW, 5073 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 5074 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 5075 .type = ARM_CP_NO_RAW, 5076 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5077 .access = PL2_RW, 5078 .type = ARM_CP_CONST, .resetvalue = 0 }, 5079 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5080 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5081 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5082 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5083 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5084 .access = PL2_RW, 5085 .type = ARM_CP_CONST, .resetvalue = 0 }, 5086 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5087 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5088 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5089 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5090 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5091 .access = PL2_RW, .type = ARM_CP_CONST, 5092 .resetvalue = 0 }, 5093 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5094 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5095 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5096 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5097 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5098 .access = PL2_RW, .type = ARM_CP_CONST, 5099 .resetvalue = 0 }, 5100 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5101 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5102 .access = PL2_RW, .type = ARM_CP_CONST, 5103 .resetvalue = 0 }, 5104 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5105 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5106 .access = PL2_RW, .type = ARM_CP_CONST, 5107 .resetvalue = 0 }, 5108 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5109 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5110 .access = PL2_RW, .type = ARM_CP_CONST, 5111 .resetvalue = 0 }, 5112 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5113 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5114 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5115 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 5116 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5117 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5118 .type = ARM_CP_CONST, .resetvalue = 0 }, 5119 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5120 .cp = 15, .opc1 = 6, .crm = 2, 5121 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5122 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 5123 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5124 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5125 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5126 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5127 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5128 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5129 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5130 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5131 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5132 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5133 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5134 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5135 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5136 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5137 .resetvalue = 0 }, 5138 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5139 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5140 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5141 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5142 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5143 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5144 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5145 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5146 .resetvalue = 0 }, 5147 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5148 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5149 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5150 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5151 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 5152 .resetvalue = 0 }, 5153 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5154 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5155 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5156 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5157 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5158 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5159 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5160 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5161 .access = PL2_RW, .accessfn = access_tda, 5162 .type = ARM_CP_CONST, .resetvalue = 0 }, 5163 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 5164 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5165 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 5166 .type = ARM_CP_CONST, .resetvalue = 0 }, 5167 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5168 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5169 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5170 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5171 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5172 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5173 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5174 .type = ARM_CP_CONST, 5175 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5176 .access = PL2_RW, .resetvalue = 0 }, 5177 REGINFO_SENTINEL 5178 }; 5179 5180 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 5181 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 5182 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5183 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5184 .access = PL2_RW, 5185 .type = ARM_CP_CONST, .resetvalue = 0 }, 5186 REGINFO_SENTINEL 5187 }; 5188 5189 static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) 5190 { 5191 ARMCPU *cpu = env_archcpu(env); 5192 5193 if (arm_feature(env, ARM_FEATURE_V8)) { 5194 valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */ 5195 } else { 5196 valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */ 5197 } 5198 5199 if (arm_feature(env, ARM_FEATURE_EL3)) { 5200 valid_mask &= ~HCR_HCD; 5201 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 5202 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 5203 * However, if we're using the SMC PSCI conduit then QEMU is 5204 * effectively acting like EL3 firmware and so the guest at 5205 * EL2 should retain the ability to prevent EL1 from being 5206 * able to make SMC calls into the ersatz firmware, so in 5207 * that case HCR.TSC should be read/write. 5208 */ 5209 valid_mask &= ~HCR_TSC; 5210 } 5211 5212 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 5213 if (cpu_isar_feature(aa64_vh, cpu)) { 5214 valid_mask |= HCR_E2H; 5215 } 5216 if (cpu_isar_feature(aa64_lor, cpu)) { 5217 valid_mask |= HCR_TLOR; 5218 } 5219 if (cpu_isar_feature(aa64_pauth, cpu)) { 5220 valid_mask |= HCR_API | HCR_APK; 5221 } 5222 } 5223 5224 /* Clear RES0 bits. */ 5225 value &= valid_mask; 5226 5227 /* These bits change the MMU setup: 5228 * HCR_VM enables stage 2 translation 5229 * HCR_PTW forbids certain page-table setups 5230 * HCR_DC Disables stage1 and enables stage2 translation 5231 */ 5232 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 5233 tlb_flush(CPU(cpu)); 5234 } 5235 env->cp15.hcr_el2 = value; 5236 5237 /* 5238 * Updates to VI and VF require us to update the status of 5239 * virtual interrupts, which are the logical OR of these bits 5240 * and the state of the input lines from the GIC. (This requires 5241 * that we have the iothread lock, which is done by marking the 5242 * reginfo structs as ARM_CP_IO.) 5243 * Note that if a write to HCR pends a VIRQ or VFIQ it is never 5244 * possible for it to be taken immediately, because VIRQ and 5245 * VFIQ are masked unless running at EL0 or EL1, and HCR 5246 * can only be written at EL2. 5247 */ 5248 g_assert(qemu_mutex_iothread_locked()); 5249 arm_cpu_update_virq(cpu); 5250 arm_cpu_update_vfiq(cpu); 5251 } 5252 5253 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 5254 { 5255 do_hcr_write(env, value, 0); 5256 } 5257 5258 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 5259 uint64_t value) 5260 { 5261 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 5262 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 5263 do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32)); 5264 } 5265 5266 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 5267 uint64_t value) 5268 { 5269 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 5270 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 5271 do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); 5272 } 5273 5274 /* 5275 * Return the effective value of HCR_EL2. 5276 * Bits that are not included here: 5277 * RW (read from SCR_EL3.RW as needed) 5278 */ 5279 uint64_t arm_hcr_el2_eff(CPUARMState *env) 5280 { 5281 uint64_t ret = env->cp15.hcr_el2; 5282 5283 if (arm_is_secure_below_el3(env)) { 5284 /* 5285 * "This register has no effect if EL2 is not enabled in the 5286 * current Security state". This is ARMv8.4-SecEL2 speak for 5287 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 5288 * 5289 * Prior to that, the language was "In an implementation that 5290 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 5291 * as if this field is 0 for all purposes other than a direct 5292 * read or write access of HCR_EL2". With lots of enumeration 5293 * on a per-field basis. In current QEMU, this is condition 5294 * is arm_is_secure_below_el3. 5295 * 5296 * Since the v8.4 language applies to the entire register, and 5297 * appears to be backward compatible, use that. 5298 */ 5299 return 0; 5300 } 5301 5302 /* 5303 * For a cpu that supports both aarch64 and aarch32, we can set bits 5304 * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32. 5305 * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32. 5306 */ 5307 if (!arm_el_is_aa64(env, 2)) { 5308 uint64_t aa32_valid; 5309 5310 /* 5311 * These bits are up-to-date as of ARMv8.6. 5312 * For HCR, it's easiest to list just the 2 bits that are invalid. 5313 * For HCR2, list those that are valid. 5314 */ 5315 aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ); 5316 aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE | 5317 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS); 5318 ret &= aa32_valid; 5319 } 5320 5321 if (ret & HCR_TGE) { 5322 /* These bits are up-to-date as of ARMv8.6. */ 5323 if (ret & HCR_E2H) { 5324 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 5325 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 5326 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 5327 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE | 5328 HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT | 5329 HCR_TTLBIS | HCR_TTLBOS | HCR_TID5); 5330 } else { 5331 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 5332 } 5333 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 5334 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 5335 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 5336 HCR_TLOR); 5337 } 5338 5339 return ret; 5340 } 5341 5342 static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, 5343 uint64_t value) 5344 { 5345 /* 5346 * For A-profile AArch32 EL3, if NSACR.CP10 5347 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5348 */ 5349 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5350 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5351 value &= ~(0x3 << 10); 5352 value |= env->cp15.cptr_el[2] & (0x3 << 10); 5353 } 5354 env->cp15.cptr_el[2] = value; 5355 } 5356 5357 static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) 5358 { 5359 /* 5360 * For A-profile AArch32 EL3, if NSACR.CP10 5361 * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. 5362 */ 5363 uint64_t value = env->cp15.cptr_el[2]; 5364 5365 if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 5366 !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { 5367 value |= 0x3 << 10; 5368 } 5369 return value; 5370 } 5371 5372 static const ARMCPRegInfo el2_cp_reginfo[] = { 5373 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 5374 .type = ARM_CP_IO, 5375 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5376 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5377 .writefn = hcr_write }, 5378 { .name = "HCR", .state = ARM_CP_STATE_AA32, 5379 .type = ARM_CP_ALIAS | ARM_CP_IO, 5380 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 5381 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 5382 .writefn = hcr_writelow }, 5383 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 5384 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 5385 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 5386 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 5387 .type = ARM_CP_ALIAS, 5388 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 5389 .access = PL2_RW, 5390 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 5391 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 5392 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 5393 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 5394 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 5395 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 5396 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 5397 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 5398 .type = ARM_CP_ALIAS, 5399 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 5400 .access = PL2_RW, 5401 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 5402 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 5403 .type = ARM_CP_ALIAS, 5404 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 5405 .access = PL2_RW, 5406 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 5407 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 5408 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 5409 .access = PL2_RW, .writefn = vbar_write, 5410 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 5411 .resetvalue = 0 }, 5412 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 5413 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 5414 .access = PL3_RW, .type = ARM_CP_ALIAS, 5415 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 5416 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 5417 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 5418 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 5419 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), 5420 .readfn = cptr_el2_read, .writefn = cptr_el2_write }, 5421 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 5422 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 5423 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 5424 .resetvalue = 0 }, 5425 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 5426 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 5427 .access = PL2_RW, .type = ARM_CP_ALIAS, 5428 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 5429 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 5430 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 5431 .access = PL2_RW, .type = ARM_CP_CONST, 5432 .resetvalue = 0 }, 5433 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 5434 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 5435 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 5436 .access = PL2_RW, .type = ARM_CP_CONST, 5437 .resetvalue = 0 }, 5438 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 5439 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 5440 .access = PL2_RW, .type = ARM_CP_CONST, 5441 .resetvalue = 0 }, 5442 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 5443 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 5444 .access = PL2_RW, .type = ARM_CP_CONST, 5445 .resetvalue = 0 }, 5446 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 5447 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 5448 .access = PL2_RW, .writefn = vmsa_tcr_el12_write, 5449 /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */ 5450 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 5451 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 5452 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5453 .type = ARM_CP_ALIAS, 5454 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5455 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5456 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 5457 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 5458 .access = PL2_RW, 5459 /* no .writefn needed as this can't cause an ASID change; 5460 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 5461 */ 5462 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 5463 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 5464 .cp = 15, .opc1 = 6, .crm = 2, 5465 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5466 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5467 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 5468 .writefn = vttbr_write }, 5469 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 5470 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 5471 .access = PL2_RW, .writefn = vttbr_write, 5472 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 5473 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 5474 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 5475 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 5476 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 5477 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 5478 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 5479 .access = PL2_RW, .resetvalue = 0, 5480 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 5481 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 5482 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 5483 .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, 5484 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5485 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 5486 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 5487 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 5488 { .name = "TLBIALLNSNH", 5489 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 5490 .type = ARM_CP_NO_RAW, .access = PL2_W, 5491 .writefn = tlbiall_nsnh_write }, 5492 { .name = "TLBIALLNSNHIS", 5493 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 5494 .type = ARM_CP_NO_RAW, .access = PL2_W, 5495 .writefn = tlbiall_nsnh_is_write }, 5496 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5497 .type = ARM_CP_NO_RAW, .access = PL2_W, 5498 .writefn = tlbiall_hyp_write }, 5499 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5500 .type = ARM_CP_NO_RAW, .access = PL2_W, 5501 .writefn = tlbiall_hyp_is_write }, 5502 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5503 .type = ARM_CP_NO_RAW, .access = PL2_W, 5504 .writefn = tlbimva_hyp_write }, 5505 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5506 .type = ARM_CP_NO_RAW, .access = PL2_W, 5507 .writefn = tlbimva_hyp_is_write }, 5508 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 5509 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 5510 .type = ARM_CP_NO_RAW, .access = PL2_W, 5511 .writefn = tlbi_aa64_alle2_write }, 5512 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 5513 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 5514 .type = ARM_CP_NO_RAW, .access = PL2_W, 5515 .writefn = tlbi_aa64_vae2_write }, 5516 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 5517 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 5518 .access = PL2_W, .type = ARM_CP_NO_RAW, 5519 .writefn = tlbi_aa64_vae2_write }, 5520 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 5521 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 5522 .access = PL2_W, .type = ARM_CP_NO_RAW, 5523 .writefn = tlbi_aa64_alle2is_write }, 5524 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 5525 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 5526 .type = ARM_CP_NO_RAW, .access = PL2_W, 5527 .writefn = tlbi_aa64_vae2is_write }, 5528 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 5529 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 5530 .access = PL2_W, .type = ARM_CP_NO_RAW, 5531 .writefn = tlbi_aa64_vae2is_write }, 5532 #ifndef CONFIG_USER_ONLY 5533 /* Unlike the other EL2-related AT operations, these must 5534 * UNDEF from EL3 if EL2 is not implemented, which is why we 5535 * define them here rather than with the rest of the AT ops. 5536 */ 5537 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 5538 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5539 .access = PL2_W, .accessfn = at_s1e2_access, 5540 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5541 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 5542 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5543 .access = PL2_W, .accessfn = at_s1e2_access, 5544 .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 }, 5545 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 5546 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 5547 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 5548 * to behave as if SCR.NS was 1. 5549 */ 5550 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 5551 .access = PL2_W, 5552 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5553 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 5554 .access = PL2_W, 5555 .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, 5556 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 5557 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 5558 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 5559 * reset values as IMPDEF. We choose to reset to 3 to comply with 5560 * both ARMv7 and ARMv8. 5561 */ 5562 .access = PL2_RW, .resetvalue = 3, 5563 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 5564 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 5565 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 5566 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 5567 .writefn = gt_cntvoff_write, 5568 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5569 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 5570 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 5571 .writefn = gt_cntvoff_write, 5572 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 5573 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 5574 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 5575 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5576 .type = ARM_CP_IO, .access = PL2_RW, 5577 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5578 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 5579 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 5580 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 5581 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 5582 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 5583 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 5584 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 5585 .resetfn = gt_hyp_timer_reset, 5586 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 5587 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 5588 .type = ARM_CP_IO, 5589 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 5590 .access = PL2_RW, 5591 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 5592 .resetvalue = 0, 5593 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 5594 #endif 5595 /* The only field of MDCR_EL2 that has a defined architectural reset value 5596 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 5597 * don't implement any PMU event counters, so using zero as a reset 5598 * value for MDCR_EL2 is okay 5599 */ 5600 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 5601 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 5602 .access = PL2_RW, .resetvalue = 0, 5603 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 5604 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 5605 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5606 .access = PL2_RW, .accessfn = access_el3_aa32ns, 5607 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5608 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 5609 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 5610 .access = PL2_RW, 5611 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 5612 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 5613 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 5614 .access = PL2_RW, 5615 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 5616 REGINFO_SENTINEL 5617 }; 5618 5619 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 5620 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 5621 .type = ARM_CP_ALIAS | ARM_CP_IO, 5622 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 5623 .access = PL2_RW, 5624 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 5625 .writefn = hcr_writehigh }, 5626 REGINFO_SENTINEL 5627 }; 5628 5629 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 5630 bool isread) 5631 { 5632 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 5633 * At Secure EL1 it traps to EL3. 5634 */ 5635 if (arm_current_el(env) == 3) { 5636 return CP_ACCESS_OK; 5637 } 5638 if (arm_is_secure_below_el3(env)) { 5639 return CP_ACCESS_TRAP_EL3; 5640 } 5641 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 5642 if (isread) { 5643 return CP_ACCESS_OK; 5644 } 5645 return CP_ACCESS_TRAP_UNCATEGORIZED; 5646 } 5647 5648 static const ARMCPRegInfo el3_cp_reginfo[] = { 5649 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 5650 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 5651 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 5652 .resetvalue = 0, .writefn = scr_write }, 5653 { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, 5654 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 5655 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5656 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 5657 .writefn = scr_write }, 5658 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 5659 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 5660 .access = PL3_RW, .resetvalue = 0, 5661 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 5662 { .name = "SDER", 5663 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 5664 .access = PL3_RW, .resetvalue = 0, 5665 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 5666 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 5667 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 5668 .writefn = vbar_write, .resetvalue = 0, 5669 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 5670 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 5671 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 5672 .access = PL3_RW, .resetvalue = 0, 5673 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 5674 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 5675 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 5676 .access = PL3_RW, 5677 /* no .writefn needed as this can't cause an ASID change; 5678 * we must provide a .raw_writefn and .resetfn because we handle 5679 * reset and migration for the AArch32 TTBCR(S), which might be 5680 * using mask and base_mask. 5681 */ 5682 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 5683 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 5684 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 5685 .type = ARM_CP_ALIAS, 5686 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 5687 .access = PL3_RW, 5688 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 5689 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 5690 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 5691 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 5692 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 5693 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 5694 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 5695 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 5696 .type = ARM_CP_ALIAS, 5697 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 5698 .access = PL3_RW, 5699 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 5700 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 5701 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 5702 .access = PL3_RW, .writefn = vbar_write, 5703 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 5704 .resetvalue = 0 }, 5705 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 5706 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 5707 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 5708 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 5709 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 5710 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 5711 .access = PL3_RW, .resetvalue = 0, 5712 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 5713 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 5714 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 5715 .access = PL3_RW, .type = ARM_CP_CONST, 5716 .resetvalue = 0 }, 5717 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 5718 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 5719 .access = PL3_RW, .type = ARM_CP_CONST, 5720 .resetvalue = 0 }, 5721 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 5722 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 5723 .access = PL3_RW, .type = ARM_CP_CONST, 5724 .resetvalue = 0 }, 5725 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 5726 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 5727 .access = PL3_W, .type = ARM_CP_NO_RAW, 5728 .writefn = tlbi_aa64_alle3is_write }, 5729 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 5730 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 5731 .access = PL3_W, .type = ARM_CP_NO_RAW, 5732 .writefn = tlbi_aa64_vae3is_write }, 5733 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 5734 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 5735 .access = PL3_W, .type = ARM_CP_NO_RAW, 5736 .writefn = tlbi_aa64_vae3is_write }, 5737 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 5738 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 5739 .access = PL3_W, .type = ARM_CP_NO_RAW, 5740 .writefn = tlbi_aa64_alle3_write }, 5741 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 5742 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 5743 .access = PL3_W, .type = ARM_CP_NO_RAW, 5744 .writefn = tlbi_aa64_vae3_write }, 5745 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 5746 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 5747 .access = PL3_W, .type = ARM_CP_NO_RAW, 5748 .writefn = tlbi_aa64_vae3_write }, 5749 REGINFO_SENTINEL 5750 }; 5751 5752 #ifndef CONFIG_USER_ONLY 5753 /* Test if system register redirection is to occur in the current state. */ 5754 static bool redirect_for_e2h(CPUARMState *env) 5755 { 5756 return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); 5757 } 5758 5759 static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) 5760 { 5761 CPReadFn *readfn; 5762 5763 if (redirect_for_e2h(env)) { 5764 /* Switch to the saved EL2 version of the register. */ 5765 ri = ri->opaque; 5766 readfn = ri->readfn; 5767 } else { 5768 readfn = ri->orig_readfn; 5769 } 5770 if (readfn == NULL) { 5771 readfn = raw_read; 5772 } 5773 return readfn(env, ri); 5774 } 5775 5776 static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, 5777 uint64_t value) 5778 { 5779 CPWriteFn *writefn; 5780 5781 if (redirect_for_e2h(env)) { 5782 /* Switch to the saved EL2 version of the register. */ 5783 ri = ri->opaque; 5784 writefn = ri->writefn; 5785 } else { 5786 writefn = ri->orig_writefn; 5787 } 5788 if (writefn == NULL) { 5789 writefn = raw_write; 5790 } 5791 writefn(env, ri, value); 5792 } 5793 5794 static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) 5795 { 5796 struct E2HAlias { 5797 uint32_t src_key, dst_key, new_key; 5798 const char *src_name, *dst_name, *new_name; 5799 bool (*feature)(const ARMISARegisters *id); 5800 }; 5801 5802 #define K(op0, op1, crn, crm, op2) \ 5803 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) 5804 5805 static const struct E2HAlias aliases[] = { 5806 { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), 5807 "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, 5808 { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), 5809 "CPACR", "CPTR_EL2", "CPACR_EL12" }, 5810 { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), 5811 "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, 5812 { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), 5813 "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, 5814 { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), 5815 "TCR_EL1", "TCR_EL2", "TCR_EL12" }, 5816 { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), 5817 "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, 5818 { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), 5819 "ELR_EL1", "ELR_EL2", "ELR_EL12" }, 5820 { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), 5821 "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, 5822 { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), 5823 "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, 5824 { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), 5825 "ESR_EL1", "ESR_EL2", "ESR_EL12" }, 5826 { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), 5827 "FAR_EL1", "FAR_EL2", "FAR_EL12" }, 5828 { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), 5829 "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, 5830 { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), 5831 "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, 5832 { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), 5833 "VBAR", "VBAR_EL2", "VBAR_EL12" }, 5834 { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), 5835 "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, 5836 { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), 5837 "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, 5838 5839 /* 5840 * Note that redirection of ZCR is mentioned in the description 5841 * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but 5842 * not in the summary table. 5843 */ 5844 { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), 5845 "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, 5846 5847 /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ 5848 /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ 5849 }; 5850 #undef K 5851 5852 size_t i; 5853 5854 for (i = 0; i < ARRAY_SIZE(aliases); i++) { 5855 const struct E2HAlias *a = &aliases[i]; 5856 ARMCPRegInfo *src_reg, *dst_reg; 5857 5858 if (a->feature && !a->feature(&cpu->isar)) { 5859 continue; 5860 } 5861 5862 src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key); 5863 dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key); 5864 g_assert(src_reg != NULL); 5865 g_assert(dst_reg != NULL); 5866 5867 /* Cross-compare names to detect typos in the keys. */ 5868 g_assert(strcmp(src_reg->name, a->src_name) == 0); 5869 g_assert(strcmp(dst_reg->name, a->dst_name) == 0); 5870 5871 /* None of the core system registers use opaque; we will. */ 5872 g_assert(src_reg->opaque == NULL); 5873 5874 /* Create alias before redirection so we dup the right data. */ 5875 if (a->new_key) { 5876 ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); 5877 uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t)); 5878 bool ok; 5879 5880 new_reg->name = a->new_name; 5881 new_reg->type |= ARM_CP_ALIAS; 5882 /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ 5883 new_reg->access &= PL2_RW | PL3_RW; 5884 5885 ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg); 5886 g_assert(ok); 5887 } 5888 5889 src_reg->opaque = dst_reg; 5890 src_reg->orig_readfn = src_reg->readfn ?: raw_read; 5891 src_reg->orig_writefn = src_reg->writefn ?: raw_write; 5892 if (!src_reg->raw_readfn) { 5893 src_reg->raw_readfn = raw_read; 5894 } 5895 if (!src_reg->raw_writefn) { 5896 src_reg->raw_writefn = raw_write; 5897 } 5898 src_reg->readfn = el2_e2h_read; 5899 src_reg->writefn = el2_e2h_write; 5900 } 5901 } 5902 #endif 5903 5904 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 5905 bool isread) 5906 { 5907 int cur_el = arm_current_el(env); 5908 5909 if (cur_el < 2) { 5910 uint64_t hcr = arm_hcr_el2_eff(env); 5911 5912 if (cur_el == 0) { 5913 if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { 5914 if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { 5915 return CP_ACCESS_TRAP_EL2; 5916 } 5917 } else { 5918 if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 5919 return CP_ACCESS_TRAP; 5920 } 5921 if (hcr & HCR_TID2) { 5922 return CP_ACCESS_TRAP_EL2; 5923 } 5924 } 5925 } else if (hcr & HCR_TID2) { 5926 return CP_ACCESS_TRAP_EL2; 5927 } 5928 } 5929 5930 if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { 5931 return CP_ACCESS_TRAP_EL2; 5932 } 5933 5934 return CP_ACCESS_OK; 5935 } 5936 5937 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 5938 uint64_t value) 5939 { 5940 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 5941 * read via a bit in OSLSR_EL1. 5942 */ 5943 int oslock; 5944 5945 if (ri->state == ARM_CP_STATE_AA32) { 5946 oslock = (value == 0xC5ACCE55); 5947 } else { 5948 oslock = value & 1; 5949 } 5950 5951 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 5952 } 5953 5954 static const ARMCPRegInfo debug_cp_reginfo[] = { 5955 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 5956 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 5957 * unlike DBGDRAR it is never accessible from EL0. 5958 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 5959 * accessor. 5960 */ 5961 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 5962 .access = PL0_R, .accessfn = access_tdra, 5963 .type = ARM_CP_CONST, .resetvalue = 0 }, 5964 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 5965 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5966 .access = PL1_R, .accessfn = access_tdra, 5967 .type = ARM_CP_CONST, .resetvalue = 0 }, 5968 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 5969 .access = PL0_R, .accessfn = access_tdra, 5970 .type = ARM_CP_CONST, .resetvalue = 0 }, 5971 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 5972 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 5973 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 5974 .access = PL1_RW, .accessfn = access_tda, 5975 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 5976 .resetvalue = 0 }, 5977 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 5978 * We don't implement the configurable EL0 access. 5979 */ 5980 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 5981 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 5982 .type = ARM_CP_ALIAS, 5983 .access = PL1_R, .accessfn = access_tda, 5984 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 5985 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 5986 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 5987 .access = PL1_W, .type = ARM_CP_NO_RAW, 5988 .accessfn = access_tdosa, 5989 .writefn = oslar_write }, 5990 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 5991 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 5992 .access = PL1_R, .resetvalue = 10, 5993 .accessfn = access_tdosa, 5994 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 5995 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 5996 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 5997 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 5998 .access = PL1_RW, .accessfn = access_tdosa, 5999 .type = ARM_CP_NOP }, 6000 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 6001 * implement vector catch debug events yet. 6002 */ 6003 { .name = "DBGVCR", 6004 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 6005 .access = PL1_RW, .accessfn = access_tda, 6006 .type = ARM_CP_NOP }, 6007 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 6008 * to save and restore a 32-bit guest's DBGVCR) 6009 */ 6010 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 6011 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 6012 .access = PL2_RW, .accessfn = access_tda, 6013 .type = ARM_CP_NOP }, 6014 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 6015 * Channel but Linux may try to access this register. The 32-bit 6016 * alias is DBGDCCINT. 6017 */ 6018 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 6019 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 6020 .access = PL1_RW, .accessfn = access_tda, 6021 .type = ARM_CP_NOP }, 6022 REGINFO_SENTINEL 6023 }; 6024 6025 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 6026 /* 64 bit access versions of the (dummy) debug registers */ 6027 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 6028 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 6029 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 6030 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 6031 REGINFO_SENTINEL 6032 }; 6033 6034 /* Return the exception level to which exceptions should be taken 6035 * via SVEAccessTrap. If an exception should be routed through 6036 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 6037 * take care of raising that exception. 6038 * C.f. the ARM pseudocode function CheckSVEEnabled. 6039 */ 6040 int sve_exception_el(CPUARMState *env, int el) 6041 { 6042 #ifndef CONFIG_USER_ONLY 6043 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 6044 6045 if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 6046 bool disabled = false; 6047 6048 /* The CPACR.ZEN controls traps to EL1: 6049 * 0, 2 : trap EL0 and EL1 accesses 6050 * 1 : trap only EL0 accesses 6051 * 3 : trap no accesses 6052 */ 6053 if (!extract32(env->cp15.cpacr_el1, 16, 1)) { 6054 disabled = true; 6055 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { 6056 disabled = el == 0; 6057 } 6058 if (disabled) { 6059 /* route_to_el2 */ 6060 return hcr_el2 & HCR_TGE ? 2 : 1; 6061 } 6062 6063 /* Check CPACR.FPEN. */ 6064 if (!extract32(env->cp15.cpacr_el1, 20, 1)) { 6065 disabled = true; 6066 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { 6067 disabled = el == 0; 6068 } 6069 if (disabled) { 6070 return 0; 6071 } 6072 } 6073 6074 /* CPTR_EL2. Since TZ and TFP are positive, 6075 * they will be zero when EL2 is not present. 6076 */ 6077 if (el <= 2 && !arm_is_secure_below_el3(env)) { 6078 if (env->cp15.cptr_el[2] & CPTR_TZ) { 6079 return 2; 6080 } 6081 if (env->cp15.cptr_el[2] & CPTR_TFP) { 6082 return 0; 6083 } 6084 } 6085 6086 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 6087 if (arm_feature(env, ARM_FEATURE_EL3) 6088 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 6089 return 3; 6090 } 6091 #endif 6092 return 0; 6093 } 6094 6095 static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len) 6096 { 6097 uint32_t end_len; 6098 6099 end_len = start_len &= 0xf; 6100 if (!test_bit(start_len, cpu->sve_vq_map)) { 6101 end_len = find_last_bit(cpu->sve_vq_map, start_len); 6102 assert(end_len < start_len); 6103 } 6104 return end_len; 6105 } 6106 6107 /* 6108 * Given that SVE is enabled, return the vector length for EL. 6109 */ 6110 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 6111 { 6112 ARMCPU *cpu = env_archcpu(env); 6113 uint32_t zcr_len = cpu->sve_max_vq - 1; 6114 6115 if (el <= 1) { 6116 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 6117 } 6118 if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { 6119 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 6120 } 6121 if (arm_feature(env, ARM_FEATURE_EL3)) { 6122 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 6123 } 6124 6125 return sve_zcr_get_valid_len(cpu, zcr_len); 6126 } 6127 6128 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6129 uint64_t value) 6130 { 6131 int cur_el = arm_current_el(env); 6132 int old_len = sve_zcr_len_for_el(env, cur_el); 6133 int new_len; 6134 6135 /* Bits other than [3:0] are RAZ/WI. */ 6136 QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); 6137 raw_write(env, ri, value & 0xf); 6138 6139 /* 6140 * Because we arrived here, we know both FP and SVE are enabled; 6141 * otherwise we would have trapped access to the ZCR_ELn register. 6142 */ 6143 new_len = sve_zcr_len_for_el(env, cur_el); 6144 if (new_len < old_len) { 6145 aarch64_sve_narrow_vq(env, new_len + 1); 6146 } 6147 } 6148 6149 static const ARMCPRegInfo zcr_el1_reginfo = { 6150 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 6151 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 6152 .access = PL1_RW, .type = ARM_CP_SVE, 6153 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 6154 .writefn = zcr_write, .raw_writefn = raw_write 6155 }; 6156 6157 static const ARMCPRegInfo zcr_el2_reginfo = { 6158 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6159 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6160 .access = PL2_RW, .type = ARM_CP_SVE, 6161 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 6162 .writefn = zcr_write, .raw_writefn = raw_write 6163 }; 6164 6165 static const ARMCPRegInfo zcr_no_el2_reginfo = { 6166 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 6167 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 6168 .access = PL2_RW, .type = ARM_CP_SVE, 6169 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 6170 }; 6171 6172 static const ARMCPRegInfo zcr_el3_reginfo = { 6173 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 6174 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 6175 .access = PL3_RW, .type = ARM_CP_SVE, 6176 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 6177 .writefn = zcr_write, .raw_writefn = raw_write 6178 }; 6179 6180 void hw_watchpoint_update(ARMCPU *cpu, int n) 6181 { 6182 CPUARMState *env = &cpu->env; 6183 vaddr len = 0; 6184 vaddr wvr = env->cp15.dbgwvr[n]; 6185 uint64_t wcr = env->cp15.dbgwcr[n]; 6186 int mask; 6187 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 6188 6189 if (env->cpu_watchpoint[n]) { 6190 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 6191 env->cpu_watchpoint[n] = NULL; 6192 } 6193 6194 if (!extract64(wcr, 0, 1)) { 6195 /* E bit clear : watchpoint disabled */ 6196 return; 6197 } 6198 6199 switch (extract64(wcr, 3, 2)) { 6200 case 0: 6201 /* LSC 00 is reserved and must behave as if the wp is disabled */ 6202 return; 6203 case 1: 6204 flags |= BP_MEM_READ; 6205 break; 6206 case 2: 6207 flags |= BP_MEM_WRITE; 6208 break; 6209 case 3: 6210 flags |= BP_MEM_ACCESS; 6211 break; 6212 } 6213 6214 /* Attempts to use both MASK and BAS fields simultaneously are 6215 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 6216 * thus generating a watchpoint for every byte in the masked region. 6217 */ 6218 mask = extract64(wcr, 24, 4); 6219 if (mask == 1 || mask == 2) { 6220 /* Reserved values of MASK; we must act as if the mask value was 6221 * some non-reserved value, or as if the watchpoint were disabled. 6222 * We choose the latter. 6223 */ 6224 return; 6225 } else if (mask) { 6226 /* Watchpoint covers an aligned area up to 2GB in size */ 6227 len = 1ULL << mask; 6228 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 6229 * whether the watchpoint fires when the unmasked bits match; we opt 6230 * to generate the exceptions. 6231 */ 6232 wvr &= ~(len - 1); 6233 } else { 6234 /* Watchpoint covers bytes defined by the byte address select bits */ 6235 int bas = extract64(wcr, 5, 8); 6236 int basstart; 6237 6238 if (bas == 0) { 6239 /* This must act as if the watchpoint is disabled */ 6240 return; 6241 } 6242 6243 if (extract64(wvr, 2, 1)) { 6244 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 6245 * ignored, and BAS[3:0] define which bytes to watch. 6246 */ 6247 bas &= 0xf; 6248 } 6249 /* The BAS bits are supposed to be programmed to indicate a contiguous 6250 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 6251 * we fire for each byte in the word/doubleword addressed by the WVR. 6252 * We choose to ignore any non-zero bits after the first range of 1s. 6253 */ 6254 basstart = ctz32(bas); 6255 len = cto32(bas >> basstart); 6256 wvr += basstart; 6257 } 6258 6259 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 6260 &env->cpu_watchpoint[n]); 6261 } 6262 6263 void hw_watchpoint_update_all(ARMCPU *cpu) 6264 { 6265 int i; 6266 CPUARMState *env = &cpu->env; 6267 6268 /* Completely clear out existing QEMU watchpoints and our array, to 6269 * avoid possible stale entries following migration load. 6270 */ 6271 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 6272 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 6273 6274 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 6275 hw_watchpoint_update(cpu, i); 6276 } 6277 } 6278 6279 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6280 uint64_t value) 6281 { 6282 ARMCPU *cpu = env_archcpu(env); 6283 int i = ri->crm; 6284 6285 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 6286 * register reads and behaves as if values written are sign extended. 6287 * Bits [1:0] are RES0. 6288 */ 6289 value = sextract64(value, 0, 49) & ~3ULL; 6290 6291 raw_write(env, ri, value); 6292 hw_watchpoint_update(cpu, i); 6293 } 6294 6295 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6296 uint64_t value) 6297 { 6298 ARMCPU *cpu = env_archcpu(env); 6299 int i = ri->crm; 6300 6301 raw_write(env, ri, value); 6302 hw_watchpoint_update(cpu, i); 6303 } 6304 6305 void hw_breakpoint_update(ARMCPU *cpu, int n) 6306 { 6307 CPUARMState *env = &cpu->env; 6308 uint64_t bvr = env->cp15.dbgbvr[n]; 6309 uint64_t bcr = env->cp15.dbgbcr[n]; 6310 vaddr addr; 6311 int bt; 6312 int flags = BP_CPU; 6313 6314 if (env->cpu_breakpoint[n]) { 6315 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 6316 env->cpu_breakpoint[n] = NULL; 6317 } 6318 6319 if (!extract64(bcr, 0, 1)) { 6320 /* E bit clear : watchpoint disabled */ 6321 return; 6322 } 6323 6324 bt = extract64(bcr, 20, 4); 6325 6326 switch (bt) { 6327 case 4: /* unlinked address mismatch (reserved if AArch64) */ 6328 case 5: /* linked address mismatch (reserved if AArch64) */ 6329 qemu_log_mask(LOG_UNIMP, 6330 "arm: address mismatch breakpoint types not implemented\n"); 6331 return; 6332 case 0: /* unlinked address match */ 6333 case 1: /* linked address match */ 6334 { 6335 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 6336 * we behave as if the register was sign extended. Bits [1:0] are 6337 * RES0. The BAS field is used to allow setting breakpoints on 16 6338 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 6339 * a bp will fire if the addresses covered by the bp and the addresses 6340 * covered by the insn overlap but the insn doesn't start at the 6341 * start of the bp address range. We choose to require the insn and 6342 * the bp to have the same address. The constraints on writing to 6343 * BAS enforced in dbgbcr_write mean we have only four cases: 6344 * 0b0000 => no breakpoint 6345 * 0b0011 => breakpoint on addr 6346 * 0b1100 => breakpoint on addr + 2 6347 * 0b1111 => breakpoint on addr 6348 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 6349 */ 6350 int bas = extract64(bcr, 5, 4); 6351 addr = sextract64(bvr, 0, 49) & ~3ULL; 6352 if (bas == 0) { 6353 return; 6354 } 6355 if (bas == 0xc) { 6356 addr += 2; 6357 } 6358 break; 6359 } 6360 case 2: /* unlinked context ID match */ 6361 case 8: /* unlinked VMID match (reserved if no EL2) */ 6362 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 6363 qemu_log_mask(LOG_UNIMP, 6364 "arm: unlinked context breakpoint types not implemented\n"); 6365 return; 6366 case 9: /* linked VMID match (reserved if no EL2) */ 6367 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 6368 case 3: /* linked context ID match */ 6369 default: 6370 /* We must generate no events for Linked context matches (unless 6371 * they are linked to by some other bp/wp, which is handled in 6372 * updates for the linking bp/wp). We choose to also generate no events 6373 * for reserved values. 6374 */ 6375 return; 6376 } 6377 6378 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 6379 } 6380 6381 void hw_breakpoint_update_all(ARMCPU *cpu) 6382 { 6383 int i; 6384 CPUARMState *env = &cpu->env; 6385 6386 /* Completely clear out existing QEMU breakpoints and our array, to 6387 * avoid possible stale entries following migration load. 6388 */ 6389 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 6390 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 6391 6392 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 6393 hw_breakpoint_update(cpu, i); 6394 } 6395 } 6396 6397 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6398 uint64_t value) 6399 { 6400 ARMCPU *cpu = env_archcpu(env); 6401 int i = ri->crm; 6402 6403 raw_write(env, ri, value); 6404 hw_breakpoint_update(cpu, i); 6405 } 6406 6407 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 6408 uint64_t value) 6409 { 6410 ARMCPU *cpu = env_archcpu(env); 6411 int i = ri->crm; 6412 6413 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 6414 * copy of BAS[0]. 6415 */ 6416 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 6417 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 6418 6419 raw_write(env, ri, value); 6420 hw_breakpoint_update(cpu, i); 6421 } 6422 6423 static void define_debug_regs(ARMCPU *cpu) 6424 { 6425 /* Define v7 and v8 architectural debug registers. 6426 * These are just dummy implementations for now. 6427 */ 6428 int i; 6429 int wrps, brps, ctx_cmps; 6430 ARMCPRegInfo dbgdidr = { 6431 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 6432 .access = PL0_R, .accessfn = access_tda, 6433 .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, 6434 }; 6435 6436 /* Note that all these register fields hold "number of Xs minus 1". */ 6437 brps = arm_num_brps(cpu); 6438 wrps = arm_num_wrps(cpu); 6439 ctx_cmps = arm_num_ctx_cmps(cpu); 6440 6441 assert(ctx_cmps <= brps); 6442 6443 define_one_arm_cp_reg(cpu, &dbgdidr); 6444 define_arm_cp_regs(cpu, debug_cp_reginfo); 6445 6446 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 6447 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 6448 } 6449 6450 for (i = 0; i < brps; i++) { 6451 ARMCPRegInfo dbgregs[] = { 6452 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 6453 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 6454 .access = PL1_RW, .accessfn = access_tda, 6455 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 6456 .writefn = dbgbvr_write, .raw_writefn = raw_write 6457 }, 6458 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 6459 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 6460 .access = PL1_RW, .accessfn = access_tda, 6461 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 6462 .writefn = dbgbcr_write, .raw_writefn = raw_write 6463 }, 6464 REGINFO_SENTINEL 6465 }; 6466 define_arm_cp_regs(cpu, dbgregs); 6467 } 6468 6469 for (i = 0; i < wrps; i++) { 6470 ARMCPRegInfo dbgregs[] = { 6471 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 6472 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 6473 .access = PL1_RW, .accessfn = access_tda, 6474 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 6475 .writefn = dbgwvr_write, .raw_writefn = raw_write 6476 }, 6477 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 6478 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 6479 .access = PL1_RW, .accessfn = access_tda, 6480 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 6481 .writefn = dbgwcr_write, .raw_writefn = raw_write 6482 }, 6483 REGINFO_SENTINEL 6484 }; 6485 define_arm_cp_regs(cpu, dbgregs); 6486 } 6487 } 6488 6489 static void define_pmu_regs(ARMCPU *cpu) 6490 { 6491 /* 6492 * v7 performance monitor control register: same implementor 6493 * field as main ID register, and we implement four counters in 6494 * addition to the cycle count register. 6495 */ 6496 unsigned int i, pmcrn = 4; 6497 ARMCPRegInfo pmcr = { 6498 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 6499 .access = PL0_RW, 6500 .type = ARM_CP_IO | ARM_CP_ALIAS, 6501 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 6502 .accessfn = pmreg_access, .writefn = pmcr_write, 6503 .raw_writefn = raw_write, 6504 }; 6505 ARMCPRegInfo pmcr64 = { 6506 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 6507 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 6508 .access = PL0_RW, .accessfn = pmreg_access, 6509 .type = ARM_CP_IO, 6510 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 6511 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | 6512 PMCRLC, 6513 .writefn = pmcr_write, .raw_writefn = raw_write, 6514 }; 6515 define_one_arm_cp_reg(cpu, &pmcr); 6516 define_one_arm_cp_reg(cpu, &pmcr64); 6517 for (i = 0; i < pmcrn; i++) { 6518 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 6519 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 6520 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 6521 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 6522 ARMCPRegInfo pmev_regs[] = { 6523 { .name = pmevcntr_name, .cp = 15, .crn = 14, 6524 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6525 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6526 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6527 .accessfn = pmreg_access }, 6528 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 6529 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 6530 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6531 .type = ARM_CP_IO, 6532 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 6533 .raw_readfn = pmevcntr_rawread, 6534 .raw_writefn = pmevcntr_rawwrite }, 6535 { .name = pmevtyper_name, .cp = 15, .crn = 14, 6536 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 6537 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 6538 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6539 .accessfn = pmreg_access }, 6540 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 6541 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 6542 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 6543 .type = ARM_CP_IO, 6544 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 6545 .raw_writefn = pmevtyper_rawwrite }, 6546 REGINFO_SENTINEL 6547 }; 6548 define_arm_cp_regs(cpu, pmev_regs); 6549 g_free(pmevcntr_name); 6550 g_free(pmevcntr_el0_name); 6551 g_free(pmevtyper_name); 6552 g_free(pmevtyper_el0_name); 6553 } 6554 if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { 6555 ARMCPRegInfo v81_pmu_regs[] = { 6556 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 6557 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 6558 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6559 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 6560 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 6561 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 6562 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6563 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 6564 REGINFO_SENTINEL 6565 }; 6566 define_arm_cp_regs(cpu, v81_pmu_regs); 6567 } 6568 if (cpu_isar_feature(any_pmu_8_4, cpu)) { 6569 static const ARMCPRegInfo v84_pmmir = { 6570 .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, 6571 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, 6572 .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6573 .resetvalue = 0 6574 }; 6575 define_one_arm_cp_reg(cpu, &v84_pmmir); 6576 } 6577 } 6578 6579 /* We don't know until after realize whether there's a GICv3 6580 * attached, and that is what registers the gicv3 sysregs. 6581 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 6582 * at runtime. 6583 */ 6584 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 6585 { 6586 ARMCPU *cpu = env_archcpu(env); 6587 uint64_t pfr1 = cpu->id_pfr1; 6588 6589 if (env->gicv3state) { 6590 pfr1 |= 1 << 28; 6591 } 6592 return pfr1; 6593 } 6594 6595 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 6596 { 6597 ARMCPU *cpu = env_archcpu(env); 6598 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 6599 6600 if (env->gicv3state) { 6601 pfr0 |= 1 << 24; 6602 } 6603 return pfr0; 6604 } 6605 6606 /* Shared logic between LORID and the rest of the LOR* registers. 6607 * Secure state has already been delt with. 6608 */ 6609 static CPAccessResult access_lor_ns(CPUARMState *env) 6610 { 6611 int el = arm_current_el(env); 6612 6613 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 6614 return CP_ACCESS_TRAP_EL2; 6615 } 6616 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 6617 return CP_ACCESS_TRAP_EL3; 6618 } 6619 return CP_ACCESS_OK; 6620 } 6621 6622 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, 6623 bool isread) 6624 { 6625 if (arm_is_secure_below_el3(env)) { 6626 /* Access ok in secure mode. */ 6627 return CP_ACCESS_OK; 6628 } 6629 return access_lor_ns(env); 6630 } 6631 6632 static CPAccessResult access_lor_other(CPUARMState *env, 6633 const ARMCPRegInfo *ri, bool isread) 6634 { 6635 if (arm_is_secure_below_el3(env)) { 6636 /* Access denied in secure mode. */ 6637 return CP_ACCESS_TRAP; 6638 } 6639 return access_lor_ns(env); 6640 } 6641 6642 /* 6643 * A trivial implementation of ARMv8.1-LOR leaves all of these 6644 * registers fixed at 0, which indicates that there are zero 6645 * supported Limited Ordering regions. 6646 */ 6647 static const ARMCPRegInfo lor_reginfo[] = { 6648 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 6649 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 6650 .access = PL1_RW, .accessfn = access_lor_other, 6651 .type = ARM_CP_CONST, .resetvalue = 0 }, 6652 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 6653 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 6654 .access = PL1_RW, .accessfn = access_lor_other, 6655 .type = ARM_CP_CONST, .resetvalue = 0 }, 6656 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 6657 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 6658 .access = PL1_RW, .accessfn = access_lor_other, 6659 .type = ARM_CP_CONST, .resetvalue = 0 }, 6660 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 6661 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 6662 .access = PL1_RW, .accessfn = access_lor_other, 6663 .type = ARM_CP_CONST, .resetvalue = 0 }, 6664 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 6665 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 6666 .access = PL1_R, .accessfn = access_lorid, 6667 .type = ARM_CP_CONST, .resetvalue = 0 }, 6668 REGINFO_SENTINEL 6669 }; 6670 6671 #ifdef TARGET_AARCH64 6672 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 6673 bool isread) 6674 { 6675 int el = arm_current_el(env); 6676 6677 if (el < 2 && 6678 arm_feature(env, ARM_FEATURE_EL2) && 6679 !(arm_hcr_el2_eff(env) & HCR_APK)) { 6680 return CP_ACCESS_TRAP_EL2; 6681 } 6682 if (el < 3 && 6683 arm_feature(env, ARM_FEATURE_EL3) && 6684 !(env->cp15.scr_el3 & SCR_APK)) { 6685 return CP_ACCESS_TRAP_EL3; 6686 } 6687 return CP_ACCESS_OK; 6688 } 6689 6690 static const ARMCPRegInfo pauth_reginfo[] = { 6691 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6692 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 6693 .access = PL1_RW, .accessfn = access_pauth, 6694 .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, 6695 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6696 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 6697 .access = PL1_RW, .accessfn = access_pauth, 6698 .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, 6699 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6700 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 6701 .access = PL1_RW, .accessfn = access_pauth, 6702 .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, 6703 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6704 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 6705 .access = PL1_RW, .accessfn = access_pauth, 6706 .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, 6707 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6708 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 6709 .access = PL1_RW, .accessfn = access_pauth, 6710 .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, 6711 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6712 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 6713 .access = PL1_RW, .accessfn = access_pauth, 6714 .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, 6715 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6716 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 6717 .access = PL1_RW, .accessfn = access_pauth, 6718 .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, 6719 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6720 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 6721 .access = PL1_RW, .accessfn = access_pauth, 6722 .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, 6723 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 6724 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 6725 .access = PL1_RW, .accessfn = access_pauth, 6726 .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, 6727 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 6728 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 6729 .access = PL1_RW, .accessfn = access_pauth, 6730 .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, 6731 REGINFO_SENTINEL 6732 }; 6733 6734 static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 6735 { 6736 Error *err = NULL; 6737 uint64_t ret; 6738 6739 /* Success sets NZCV = 0000. */ 6740 env->NF = env->CF = env->VF = 0, env->ZF = 1; 6741 6742 if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { 6743 /* 6744 * ??? Failed, for unknown reasons in the crypto subsystem. 6745 * The best we can do is log the reason and return the 6746 * timed-out indication to the guest. There is no reason 6747 * we know to expect this failure to be transitory, so the 6748 * guest may well hang retrying the operation. 6749 */ 6750 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", 6751 ri->name, error_get_pretty(err)); 6752 error_free(err); 6753 6754 env->ZF = 0; /* NZCF = 0100 */ 6755 return 0; 6756 } 6757 return ret; 6758 } 6759 6760 /* We do not support re-seeding, so the two registers operate the same. */ 6761 static const ARMCPRegInfo rndr_reginfo[] = { 6762 { .name = "RNDR", .state = ARM_CP_STATE_AA64, 6763 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6764 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, 6765 .access = PL0_R, .readfn = rndr_readfn }, 6766 { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, 6767 .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, 6768 .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, 6769 .access = PL0_R, .readfn = rndr_readfn }, 6770 REGINFO_SENTINEL 6771 }; 6772 6773 #ifndef CONFIG_USER_ONLY 6774 static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, 6775 uint64_t value) 6776 { 6777 ARMCPU *cpu = env_archcpu(env); 6778 /* CTR_EL0 System register -> DminLine, bits [19:16] */ 6779 uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); 6780 uint64_t vaddr_in = (uint64_t) value; 6781 uint64_t vaddr = vaddr_in & ~(dline_size - 1); 6782 void *haddr; 6783 int mem_idx = cpu_mmu_index(env, false); 6784 6785 /* This won't be crossing page boundaries */ 6786 haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); 6787 if (haddr) { 6788 6789 ram_addr_t offset; 6790 MemoryRegion *mr; 6791 6792 /* RCU lock is already being held */ 6793 mr = memory_region_from_host(haddr, &offset); 6794 6795 if (mr) { 6796 memory_region_do_writeback(mr, offset, dline_size); 6797 } 6798 } 6799 } 6800 6801 static const ARMCPRegInfo dcpop_reg[] = { 6802 { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, 6803 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, 6804 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6805 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 6806 REGINFO_SENTINEL 6807 }; 6808 6809 static const ARMCPRegInfo dcpodp_reg[] = { 6810 { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, 6811 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, 6812 .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, 6813 .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, 6814 REGINFO_SENTINEL 6815 }; 6816 #endif /*CONFIG_USER_ONLY*/ 6817 6818 #endif 6819 6820 static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, 6821 bool isread) 6822 { 6823 int el = arm_current_el(env); 6824 6825 if (el == 0) { 6826 uint64_t sctlr = arm_sctlr(env, el); 6827 if (!(sctlr & SCTLR_EnRCTX)) { 6828 return CP_ACCESS_TRAP; 6829 } 6830 } else if (el == 1) { 6831 uint64_t hcr = arm_hcr_el2_eff(env); 6832 if (hcr & HCR_NV) { 6833 return CP_ACCESS_TRAP_EL2; 6834 } 6835 } 6836 return CP_ACCESS_OK; 6837 } 6838 6839 static const ARMCPRegInfo predinv_reginfo[] = { 6840 { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, 6841 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, 6842 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6843 { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, 6844 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, 6845 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6846 { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, 6847 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, 6848 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6849 /* 6850 * Note the AArch32 opcodes have a different OPC1. 6851 */ 6852 { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, 6853 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, 6854 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6855 { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, 6856 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, 6857 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6858 { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, 6859 .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, 6860 .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, 6861 REGINFO_SENTINEL 6862 }; 6863 6864 static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) 6865 { 6866 /* Read the high 32 bits of the current CCSIDR */ 6867 return extract64(ccsidr_read(env, ri), 32, 32); 6868 } 6869 6870 static const ARMCPRegInfo ccsidr2_reginfo[] = { 6871 { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, 6872 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, 6873 .access = PL1_R, 6874 .accessfn = access_aa64_tid2, 6875 .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, 6876 REGINFO_SENTINEL 6877 }; 6878 6879 static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 6880 bool isread) 6881 { 6882 if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { 6883 return CP_ACCESS_TRAP_EL2; 6884 } 6885 6886 return CP_ACCESS_OK; 6887 } 6888 6889 static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, 6890 bool isread) 6891 { 6892 if (arm_feature(env, ARM_FEATURE_V8)) { 6893 return access_aa64_tid3(env, ri, isread); 6894 } 6895 6896 return CP_ACCESS_OK; 6897 } 6898 6899 static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, 6900 bool isread) 6901 { 6902 if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { 6903 return CP_ACCESS_TRAP_EL2; 6904 } 6905 6906 return CP_ACCESS_OK; 6907 } 6908 6909 static const ARMCPRegInfo jazelle_regs[] = { 6910 { .name = "JIDR", 6911 .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, 6912 .access = PL1_R, .accessfn = access_jazelle, 6913 .type = ARM_CP_CONST, .resetvalue = 0 }, 6914 { .name = "JOSCR", 6915 .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, 6916 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 6917 { .name = "JMCR", 6918 .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, 6919 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 6920 REGINFO_SENTINEL 6921 }; 6922 6923 static const ARMCPRegInfo vhe_reginfo[] = { 6924 { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, 6925 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, 6926 .access = PL2_RW, 6927 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) }, 6928 { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, 6929 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, 6930 .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, 6931 .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, 6932 #ifndef CONFIG_USER_ONLY 6933 { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, 6934 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, 6935 .fieldoffset = 6936 offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), 6937 .type = ARM_CP_IO, .access = PL2_RW, 6938 .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, 6939 { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 6940 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, 6941 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 6942 .resetfn = gt_hv_timer_reset, 6943 .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, 6944 { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, 6945 .type = ARM_CP_IO, 6946 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, 6947 .access = PL2_RW, 6948 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), 6949 .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, 6950 { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, 6951 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, 6952 .type = ARM_CP_IO | ARM_CP_ALIAS, 6953 .access = PL2_RW, .accessfn = e2h_access, 6954 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 6955 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, 6956 { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, 6957 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, 6958 .type = ARM_CP_IO | ARM_CP_ALIAS, 6959 .access = PL2_RW, .accessfn = e2h_access, 6960 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 6961 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, 6962 { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, 6963 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, 6964 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 6965 .access = PL2_RW, .accessfn = e2h_access, 6966 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, 6967 { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, 6968 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, 6969 .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, 6970 .access = PL2_RW, .accessfn = e2h_access, 6971 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, 6972 { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, 6973 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, 6974 .type = ARM_CP_IO | ARM_CP_ALIAS, 6975 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 6976 .access = PL2_RW, .accessfn = e2h_access, 6977 .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, 6978 { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, 6979 .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, 6980 .type = ARM_CP_IO | ARM_CP_ALIAS, 6981 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 6982 .access = PL2_RW, .accessfn = e2h_access, 6983 .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, 6984 #endif 6985 REGINFO_SENTINEL 6986 }; 6987 6988 #ifndef CONFIG_USER_ONLY 6989 static const ARMCPRegInfo ats1e1_reginfo[] = { 6990 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 6991 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 6992 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6993 .writefn = ats_write64 }, 6994 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 6995 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 6996 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 6997 .writefn = ats_write64 }, 6998 REGINFO_SENTINEL 6999 }; 7000 7001 static const ARMCPRegInfo ats1cp_reginfo[] = { 7002 { .name = "ATS1CPRP", 7003 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, 7004 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7005 .writefn = ats_write }, 7006 { .name = "ATS1CPWP", 7007 .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, 7008 .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, 7009 .writefn = ats_write }, 7010 REGINFO_SENTINEL 7011 }; 7012 #endif 7013 7014 /* 7015 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and 7016 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field 7017 * is non-zero, which is never for ARMv7, optionally in ARMv8 7018 * and mandatorily for ARMv8.2 and up. 7019 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's 7020 * implementation is RAZ/WI we can ignore this detail, as we 7021 * do for ACTLR. 7022 */ 7023 static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { 7024 { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, 7025 .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, 7026 .access = PL1_RW, .accessfn = access_tacr, 7027 .type = ARM_CP_CONST, .resetvalue = 0 }, 7028 { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 7029 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 7030 .access = PL2_RW, .type = ARM_CP_CONST, 7031 .resetvalue = 0 }, 7032 REGINFO_SENTINEL 7033 }; 7034 7035 void register_cp_regs_for_features(ARMCPU *cpu) 7036 { 7037 /* Register all the coprocessor registers based on feature bits */ 7038 CPUARMState *env = &cpu->env; 7039 if (arm_feature(env, ARM_FEATURE_M)) { 7040 /* M profile has no coprocessor registers */ 7041 return; 7042 } 7043 7044 define_arm_cp_regs(cpu, cp_reginfo); 7045 if (!arm_feature(env, ARM_FEATURE_V8)) { 7046 /* Must go early as it is full of wildcards that may be 7047 * overridden by later definitions. 7048 */ 7049 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 7050 } 7051 7052 if (arm_feature(env, ARM_FEATURE_V6)) { 7053 /* The ID registers all have impdef reset values */ 7054 ARMCPRegInfo v6_idregs[] = { 7055 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 7056 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 7057 .access = PL1_R, .type = ARM_CP_CONST, 7058 .accessfn = access_aa32_tid3, 7059 .resetvalue = cpu->id_pfr0 }, 7060 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 7061 * the value of the GIC field until after we define these regs. 7062 */ 7063 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 7064 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 7065 .access = PL1_R, .type = ARM_CP_NO_RAW, 7066 .accessfn = access_aa32_tid3, 7067 .readfn = id_pfr1_read, 7068 .writefn = arm_cp_write_ignore }, 7069 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 7070 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 7071 .access = PL1_R, .type = ARM_CP_CONST, 7072 .accessfn = access_aa32_tid3, 7073 .resetvalue = cpu->isar.id_dfr0 }, 7074 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 7075 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 7076 .access = PL1_R, .type = ARM_CP_CONST, 7077 .accessfn = access_aa32_tid3, 7078 .resetvalue = cpu->id_afr0 }, 7079 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 7080 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 7081 .access = PL1_R, .type = ARM_CP_CONST, 7082 .accessfn = access_aa32_tid3, 7083 .resetvalue = cpu->isar.id_mmfr0 }, 7084 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 7085 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 7086 .access = PL1_R, .type = ARM_CP_CONST, 7087 .accessfn = access_aa32_tid3, 7088 .resetvalue = cpu->isar.id_mmfr1 }, 7089 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 7090 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 7091 .access = PL1_R, .type = ARM_CP_CONST, 7092 .accessfn = access_aa32_tid3, 7093 .resetvalue = cpu->isar.id_mmfr2 }, 7094 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 7095 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 7096 .access = PL1_R, .type = ARM_CP_CONST, 7097 .accessfn = access_aa32_tid3, 7098 .resetvalue = cpu->isar.id_mmfr3 }, 7099 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 7100 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 7101 .access = PL1_R, .type = ARM_CP_CONST, 7102 .accessfn = access_aa32_tid3, 7103 .resetvalue = cpu->isar.id_isar0 }, 7104 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 7105 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 7106 .access = PL1_R, .type = ARM_CP_CONST, 7107 .accessfn = access_aa32_tid3, 7108 .resetvalue = cpu->isar.id_isar1 }, 7109 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 7110 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 7111 .access = PL1_R, .type = ARM_CP_CONST, 7112 .accessfn = access_aa32_tid3, 7113 .resetvalue = cpu->isar.id_isar2 }, 7114 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 7115 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 7116 .access = PL1_R, .type = ARM_CP_CONST, 7117 .accessfn = access_aa32_tid3, 7118 .resetvalue = cpu->isar.id_isar3 }, 7119 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 7120 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 7121 .access = PL1_R, .type = ARM_CP_CONST, 7122 .accessfn = access_aa32_tid3, 7123 .resetvalue = cpu->isar.id_isar4 }, 7124 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 7125 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 7126 .access = PL1_R, .type = ARM_CP_CONST, 7127 .accessfn = access_aa32_tid3, 7128 .resetvalue = cpu->isar.id_isar5 }, 7129 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 7130 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 7131 .access = PL1_R, .type = ARM_CP_CONST, 7132 .accessfn = access_aa32_tid3, 7133 .resetvalue = cpu->isar.id_mmfr4 }, 7134 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 7135 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 7136 .access = PL1_R, .type = ARM_CP_CONST, 7137 .accessfn = access_aa32_tid3, 7138 .resetvalue = cpu->isar.id_isar6 }, 7139 REGINFO_SENTINEL 7140 }; 7141 define_arm_cp_regs(cpu, v6_idregs); 7142 define_arm_cp_regs(cpu, v6_cp_reginfo); 7143 } else { 7144 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 7145 } 7146 if (arm_feature(env, ARM_FEATURE_V6K)) { 7147 define_arm_cp_regs(cpu, v6k_cp_reginfo); 7148 } 7149 if (arm_feature(env, ARM_FEATURE_V7MP) && 7150 !arm_feature(env, ARM_FEATURE_PMSA)) { 7151 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 7152 } 7153 if (arm_feature(env, ARM_FEATURE_V7VE)) { 7154 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 7155 } 7156 if (arm_feature(env, ARM_FEATURE_V7)) { 7157 ARMCPRegInfo clidr = { 7158 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 7159 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 7160 .access = PL1_R, .type = ARM_CP_CONST, 7161 .accessfn = access_aa64_tid2, 7162 .resetvalue = cpu->clidr 7163 }; 7164 define_one_arm_cp_reg(cpu, &clidr); 7165 define_arm_cp_regs(cpu, v7_cp_reginfo); 7166 define_debug_regs(cpu); 7167 define_pmu_regs(cpu); 7168 } else { 7169 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 7170 } 7171 if (arm_feature(env, ARM_FEATURE_V8)) { 7172 /* AArch64 ID registers, which all have impdef reset values. 7173 * Note that within the ID register ranges the unused slots 7174 * must all RAZ, not UNDEF; future architecture versions may 7175 * define new registers here. 7176 */ 7177 ARMCPRegInfo v8_idregs[] = { 7178 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 7179 * know the right value for the GIC field until after we 7180 * define these regs. 7181 */ 7182 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 7183 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 7184 .access = PL1_R, .type = ARM_CP_NO_RAW, 7185 .accessfn = access_aa64_tid3, 7186 .readfn = id_aa64pfr0_read, 7187 .writefn = arm_cp_write_ignore }, 7188 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 7189 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 7190 .access = PL1_R, .type = ARM_CP_CONST, 7191 .accessfn = access_aa64_tid3, 7192 .resetvalue = cpu->isar.id_aa64pfr1}, 7193 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7194 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 7195 .access = PL1_R, .type = ARM_CP_CONST, 7196 .accessfn = access_aa64_tid3, 7197 .resetvalue = 0 }, 7198 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7199 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 7200 .access = PL1_R, .type = ARM_CP_CONST, 7201 .accessfn = access_aa64_tid3, 7202 .resetvalue = 0 }, 7203 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 7204 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 7205 .access = PL1_R, .type = ARM_CP_CONST, 7206 .accessfn = access_aa64_tid3, 7207 /* At present, only SVEver == 0 is defined anyway. */ 7208 .resetvalue = 0 }, 7209 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7210 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 7211 .access = PL1_R, .type = ARM_CP_CONST, 7212 .accessfn = access_aa64_tid3, 7213 .resetvalue = 0 }, 7214 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7215 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 7216 .access = PL1_R, .type = ARM_CP_CONST, 7217 .accessfn = access_aa64_tid3, 7218 .resetvalue = 0 }, 7219 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7220 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 7221 .access = PL1_R, .type = ARM_CP_CONST, 7222 .accessfn = access_aa64_tid3, 7223 .resetvalue = 0 }, 7224 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 7225 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 7226 .access = PL1_R, .type = ARM_CP_CONST, 7227 .accessfn = access_aa64_tid3, 7228 .resetvalue = cpu->isar.id_aa64dfr0 }, 7229 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 7230 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 7231 .access = PL1_R, .type = ARM_CP_CONST, 7232 .accessfn = access_aa64_tid3, 7233 .resetvalue = cpu->isar.id_aa64dfr1 }, 7234 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7235 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 7236 .access = PL1_R, .type = ARM_CP_CONST, 7237 .accessfn = access_aa64_tid3, 7238 .resetvalue = 0 }, 7239 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7240 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 7241 .access = PL1_R, .type = ARM_CP_CONST, 7242 .accessfn = access_aa64_tid3, 7243 .resetvalue = 0 }, 7244 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 7245 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 7246 .access = PL1_R, .type = ARM_CP_CONST, 7247 .accessfn = access_aa64_tid3, 7248 .resetvalue = cpu->id_aa64afr0 }, 7249 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 7250 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 7251 .access = PL1_R, .type = ARM_CP_CONST, 7252 .accessfn = access_aa64_tid3, 7253 .resetvalue = cpu->id_aa64afr1 }, 7254 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7255 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 7256 .access = PL1_R, .type = ARM_CP_CONST, 7257 .accessfn = access_aa64_tid3, 7258 .resetvalue = 0 }, 7259 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7260 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 7261 .access = PL1_R, .type = ARM_CP_CONST, 7262 .accessfn = access_aa64_tid3, 7263 .resetvalue = 0 }, 7264 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 7265 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 7266 .access = PL1_R, .type = ARM_CP_CONST, 7267 .accessfn = access_aa64_tid3, 7268 .resetvalue = cpu->isar.id_aa64isar0 }, 7269 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 7270 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 7271 .access = PL1_R, .type = ARM_CP_CONST, 7272 .accessfn = access_aa64_tid3, 7273 .resetvalue = cpu->isar.id_aa64isar1 }, 7274 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7275 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 7276 .access = PL1_R, .type = ARM_CP_CONST, 7277 .accessfn = access_aa64_tid3, 7278 .resetvalue = 0 }, 7279 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7280 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 7281 .access = PL1_R, .type = ARM_CP_CONST, 7282 .accessfn = access_aa64_tid3, 7283 .resetvalue = 0 }, 7284 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7285 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 7286 .access = PL1_R, .type = ARM_CP_CONST, 7287 .accessfn = access_aa64_tid3, 7288 .resetvalue = 0 }, 7289 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7290 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 7291 .access = PL1_R, .type = ARM_CP_CONST, 7292 .accessfn = access_aa64_tid3, 7293 .resetvalue = 0 }, 7294 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7295 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 7296 .access = PL1_R, .type = ARM_CP_CONST, 7297 .accessfn = access_aa64_tid3, 7298 .resetvalue = 0 }, 7299 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7300 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 7301 .access = PL1_R, .type = ARM_CP_CONST, 7302 .accessfn = access_aa64_tid3, 7303 .resetvalue = 0 }, 7304 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 7305 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 7306 .access = PL1_R, .type = ARM_CP_CONST, 7307 .accessfn = access_aa64_tid3, 7308 .resetvalue = cpu->isar.id_aa64mmfr0 }, 7309 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 7310 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 7311 .access = PL1_R, .type = ARM_CP_CONST, 7312 .accessfn = access_aa64_tid3, 7313 .resetvalue = cpu->isar.id_aa64mmfr1 }, 7314 { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, 7315 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 7316 .access = PL1_R, .type = ARM_CP_CONST, 7317 .accessfn = access_aa64_tid3, 7318 .resetvalue = cpu->isar.id_aa64mmfr2 }, 7319 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7320 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 7321 .access = PL1_R, .type = ARM_CP_CONST, 7322 .accessfn = access_aa64_tid3, 7323 .resetvalue = 0 }, 7324 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7325 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 7326 .access = PL1_R, .type = ARM_CP_CONST, 7327 .accessfn = access_aa64_tid3, 7328 .resetvalue = 0 }, 7329 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7330 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 7331 .access = PL1_R, .type = ARM_CP_CONST, 7332 .accessfn = access_aa64_tid3, 7333 .resetvalue = 0 }, 7334 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7335 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 7336 .access = PL1_R, .type = ARM_CP_CONST, 7337 .accessfn = access_aa64_tid3, 7338 .resetvalue = 0 }, 7339 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7340 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 7341 .access = PL1_R, .type = ARM_CP_CONST, 7342 .accessfn = access_aa64_tid3, 7343 .resetvalue = 0 }, 7344 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 7345 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 7346 .access = PL1_R, .type = ARM_CP_CONST, 7347 .accessfn = access_aa64_tid3, 7348 .resetvalue = cpu->isar.mvfr0 }, 7349 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 7350 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 7351 .access = PL1_R, .type = ARM_CP_CONST, 7352 .accessfn = access_aa64_tid3, 7353 .resetvalue = cpu->isar.mvfr1 }, 7354 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 7355 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 7356 .access = PL1_R, .type = ARM_CP_CONST, 7357 .accessfn = access_aa64_tid3, 7358 .resetvalue = cpu->isar.mvfr2 }, 7359 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7360 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 7361 .access = PL1_R, .type = ARM_CP_CONST, 7362 .accessfn = access_aa64_tid3, 7363 .resetvalue = 0 }, 7364 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7365 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 7366 .access = PL1_R, .type = ARM_CP_CONST, 7367 .accessfn = access_aa64_tid3, 7368 .resetvalue = 0 }, 7369 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7370 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 7371 .access = PL1_R, .type = ARM_CP_CONST, 7372 .accessfn = access_aa64_tid3, 7373 .resetvalue = 0 }, 7374 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7375 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 7376 .access = PL1_R, .type = ARM_CP_CONST, 7377 .accessfn = access_aa64_tid3, 7378 .resetvalue = 0 }, 7379 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 7380 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 7381 .access = PL1_R, .type = ARM_CP_CONST, 7382 .accessfn = access_aa64_tid3, 7383 .resetvalue = 0 }, 7384 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 7385 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 7386 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7387 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 7388 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 7389 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 7390 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7391 .resetvalue = cpu->pmceid0 }, 7392 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 7393 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 7394 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7395 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 7396 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 7397 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 7398 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 7399 .resetvalue = cpu->pmceid1 }, 7400 REGINFO_SENTINEL 7401 }; 7402 #ifdef CONFIG_USER_ONLY 7403 ARMCPRegUserSpaceInfo v8_user_idregs[] = { 7404 { .name = "ID_AA64PFR0_EL1", 7405 .exported_bits = 0x000f000f00ff0000, 7406 .fixed_bits = 0x0000000000000011 }, 7407 { .name = "ID_AA64PFR1_EL1", 7408 .exported_bits = 0x00000000000000f0 }, 7409 { .name = "ID_AA64PFR*_EL1_RESERVED", 7410 .is_glob = true }, 7411 { .name = "ID_AA64ZFR0_EL1" }, 7412 { .name = "ID_AA64MMFR0_EL1", 7413 .fixed_bits = 0x00000000ff000000 }, 7414 { .name = "ID_AA64MMFR1_EL1" }, 7415 { .name = "ID_AA64MMFR*_EL1_RESERVED", 7416 .is_glob = true }, 7417 { .name = "ID_AA64DFR0_EL1", 7418 .fixed_bits = 0x0000000000000006 }, 7419 { .name = "ID_AA64DFR1_EL1" }, 7420 { .name = "ID_AA64DFR*_EL1_RESERVED", 7421 .is_glob = true }, 7422 { .name = "ID_AA64AFR*", 7423 .is_glob = true }, 7424 { .name = "ID_AA64ISAR0_EL1", 7425 .exported_bits = 0x00fffffff0fffff0 }, 7426 { .name = "ID_AA64ISAR1_EL1", 7427 .exported_bits = 0x000000f0ffffffff }, 7428 { .name = "ID_AA64ISAR*_EL1_RESERVED", 7429 .is_glob = true }, 7430 REGUSERINFO_SENTINEL 7431 }; 7432 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 7433 #endif 7434 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 7435 if (!arm_feature(env, ARM_FEATURE_EL3) && 7436 !arm_feature(env, ARM_FEATURE_EL2)) { 7437 ARMCPRegInfo rvbar = { 7438 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 7439 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 7440 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 7441 }; 7442 define_one_arm_cp_reg(cpu, &rvbar); 7443 } 7444 define_arm_cp_regs(cpu, v8_idregs); 7445 define_arm_cp_regs(cpu, v8_cp_reginfo); 7446 } 7447 if (arm_feature(env, ARM_FEATURE_EL2)) { 7448 uint64_t vmpidr_def = mpidr_read_val(env); 7449 ARMCPRegInfo vpidr_regs[] = { 7450 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 7451 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7452 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7453 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 7454 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 7455 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 7456 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7457 .access = PL2_RW, .resetvalue = cpu->midr, 7458 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7459 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 7460 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7461 .access = PL2_RW, .accessfn = access_el3_aa32ns, 7462 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 7463 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 7464 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 7465 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7466 .access = PL2_RW, 7467 .resetvalue = vmpidr_def, 7468 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 7469 REGINFO_SENTINEL 7470 }; 7471 define_arm_cp_regs(cpu, vpidr_regs); 7472 define_arm_cp_regs(cpu, el2_cp_reginfo); 7473 if (arm_feature(env, ARM_FEATURE_V8)) { 7474 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 7475 } 7476 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 7477 if (!arm_feature(env, ARM_FEATURE_EL3)) { 7478 ARMCPRegInfo rvbar = { 7479 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 7480 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 7481 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 7482 }; 7483 define_one_arm_cp_reg(cpu, &rvbar); 7484 } 7485 } else { 7486 /* If EL2 is missing but higher ELs are enabled, we need to 7487 * register the no_el2 reginfos. 7488 */ 7489 if (arm_feature(env, ARM_FEATURE_EL3)) { 7490 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 7491 * of MIDR_EL1 and MPIDR_EL1. 7492 */ 7493 ARMCPRegInfo vpidr_regs[] = { 7494 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7495 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 7496 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 7497 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 7498 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 7499 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 7500 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 7501 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 7502 .type = ARM_CP_NO_RAW, 7503 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 7504 REGINFO_SENTINEL 7505 }; 7506 define_arm_cp_regs(cpu, vpidr_regs); 7507 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 7508 if (arm_feature(env, ARM_FEATURE_V8)) { 7509 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 7510 } 7511 } 7512 } 7513 if (arm_feature(env, ARM_FEATURE_EL3)) { 7514 define_arm_cp_regs(cpu, el3_cp_reginfo); 7515 ARMCPRegInfo el3_regs[] = { 7516 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 7517 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 7518 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 7519 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 7520 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 7521 .access = PL3_RW, 7522 .raw_writefn = raw_write, .writefn = sctlr_write, 7523 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 7524 .resetvalue = cpu->reset_sctlr }, 7525 REGINFO_SENTINEL 7526 }; 7527 7528 define_arm_cp_regs(cpu, el3_regs); 7529 } 7530 /* The behaviour of NSACR is sufficiently various that we don't 7531 * try to describe it in a single reginfo: 7532 * if EL3 is 64 bit, then trap to EL3 from S EL1, 7533 * reads as constant 0xc00 from NS EL1 and NS EL2 7534 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 7535 * if v7 without EL3, register doesn't exist 7536 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 7537 */ 7538 if (arm_feature(env, ARM_FEATURE_EL3)) { 7539 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7540 ARMCPRegInfo nsacr = { 7541 .name = "NSACR", .type = ARM_CP_CONST, 7542 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7543 .access = PL1_RW, .accessfn = nsacr_access, 7544 .resetvalue = 0xc00 7545 }; 7546 define_one_arm_cp_reg(cpu, &nsacr); 7547 } else { 7548 ARMCPRegInfo nsacr = { 7549 .name = "NSACR", 7550 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7551 .access = PL3_RW | PL1_R, 7552 .resetvalue = 0, 7553 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 7554 }; 7555 define_one_arm_cp_reg(cpu, &nsacr); 7556 } 7557 } else { 7558 if (arm_feature(env, ARM_FEATURE_V8)) { 7559 ARMCPRegInfo nsacr = { 7560 .name = "NSACR", .type = ARM_CP_CONST, 7561 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 7562 .access = PL1_R, 7563 .resetvalue = 0xc00 7564 }; 7565 define_one_arm_cp_reg(cpu, &nsacr); 7566 } 7567 } 7568 7569 if (arm_feature(env, ARM_FEATURE_PMSA)) { 7570 if (arm_feature(env, ARM_FEATURE_V6)) { 7571 /* PMSAv6 not implemented */ 7572 assert(arm_feature(env, ARM_FEATURE_V7)); 7573 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 7574 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 7575 } else { 7576 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 7577 } 7578 } else { 7579 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 7580 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 7581 /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ 7582 if (cpu_isar_feature(aa32_hpd, cpu)) { 7583 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 7584 } 7585 } 7586 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 7587 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 7588 } 7589 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 7590 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 7591 } 7592 if (arm_feature(env, ARM_FEATURE_VAPA)) { 7593 define_arm_cp_regs(cpu, vapa_cp_reginfo); 7594 } 7595 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 7596 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 7597 } 7598 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 7599 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 7600 } 7601 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 7602 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 7603 } 7604 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 7605 define_arm_cp_regs(cpu, omap_cp_reginfo); 7606 } 7607 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 7608 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 7609 } 7610 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 7611 define_arm_cp_regs(cpu, xscale_cp_reginfo); 7612 } 7613 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 7614 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 7615 } 7616 if (arm_feature(env, ARM_FEATURE_LPAE)) { 7617 define_arm_cp_regs(cpu, lpae_cp_reginfo); 7618 } 7619 if (cpu_isar_feature(aa32_jazelle, cpu)) { 7620 define_arm_cp_regs(cpu, jazelle_regs); 7621 } 7622 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 7623 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 7624 * be read-only (ie write causes UNDEF exception). 7625 */ 7626 { 7627 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 7628 /* Pre-v8 MIDR space. 7629 * Note that the MIDR isn't a simple constant register because 7630 * of the TI925 behaviour where writes to another register can 7631 * cause the MIDR value to change. 7632 * 7633 * Unimplemented registers in the c15 0 0 0 space default to 7634 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 7635 * and friends override accordingly. 7636 */ 7637 { .name = "MIDR", 7638 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 7639 .access = PL1_R, .resetvalue = cpu->midr, 7640 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 7641 .readfn = midr_read, 7642 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 7643 .type = ARM_CP_OVERRIDE }, 7644 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 7645 { .name = "DUMMY", 7646 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 7647 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7648 { .name = "DUMMY", 7649 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 7650 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7651 { .name = "DUMMY", 7652 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 7653 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7654 { .name = "DUMMY", 7655 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 7656 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7657 { .name = "DUMMY", 7658 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 7659 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 7660 REGINFO_SENTINEL 7661 }; 7662 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 7663 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 7664 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 7665 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 7666 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 7667 .readfn = midr_read }, 7668 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 7669 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 7670 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 7671 .access = PL1_R, .resetvalue = cpu->midr }, 7672 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 7673 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 7674 .access = PL1_R, .resetvalue = cpu->midr }, 7675 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 7676 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 7677 .access = PL1_R, 7678 .accessfn = access_aa64_tid1, 7679 .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 7680 REGINFO_SENTINEL 7681 }; 7682 ARMCPRegInfo id_cp_reginfo[] = { 7683 /* These are common to v8 and pre-v8 */ 7684 { .name = "CTR", 7685 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 7686 .access = PL1_R, .accessfn = ctr_el0_access, 7687 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 7688 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 7689 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 7690 .access = PL0_R, .accessfn = ctr_el0_access, 7691 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 7692 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 7693 { .name = "TCMTR", 7694 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 7695 .access = PL1_R, 7696 .accessfn = access_aa32_tid1, 7697 .type = ARM_CP_CONST, .resetvalue = 0 }, 7698 REGINFO_SENTINEL 7699 }; 7700 /* TLBTR is specific to VMSA */ 7701 ARMCPRegInfo id_tlbtr_reginfo = { 7702 .name = "TLBTR", 7703 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 7704 .access = PL1_R, 7705 .accessfn = access_aa32_tid1, 7706 .type = ARM_CP_CONST, .resetvalue = 0, 7707 }; 7708 /* MPUIR is specific to PMSA V6+ */ 7709 ARMCPRegInfo id_mpuir_reginfo = { 7710 .name = "MPUIR", 7711 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 7712 .access = PL1_R, .type = ARM_CP_CONST, 7713 .resetvalue = cpu->pmsav7_dregion << 8 7714 }; 7715 ARMCPRegInfo crn0_wi_reginfo = { 7716 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 7717 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 7718 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 7719 }; 7720 #ifdef CONFIG_USER_ONLY 7721 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 7722 { .name = "MIDR_EL1", 7723 .exported_bits = 0x00000000ffffffff }, 7724 { .name = "REVIDR_EL1" }, 7725 REGUSERINFO_SENTINEL 7726 }; 7727 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 7728 #endif 7729 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 7730 arm_feature(env, ARM_FEATURE_STRONGARM)) { 7731 ARMCPRegInfo *r; 7732 /* Register the blanket "writes ignored" value first to cover the 7733 * whole space. Then update the specific ID registers to allow write 7734 * access, so that they ignore writes rather than causing them to 7735 * UNDEF. 7736 */ 7737 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 7738 for (r = id_pre_v8_midr_cp_reginfo; 7739 r->type != ARM_CP_SENTINEL; r++) { 7740 r->access = PL1_RW; 7741 } 7742 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 7743 r->access = PL1_RW; 7744 } 7745 id_mpuir_reginfo.access = PL1_RW; 7746 id_tlbtr_reginfo.access = PL1_RW; 7747 } 7748 if (arm_feature(env, ARM_FEATURE_V8)) { 7749 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 7750 } else { 7751 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 7752 } 7753 define_arm_cp_regs(cpu, id_cp_reginfo); 7754 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 7755 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 7756 } else if (arm_feature(env, ARM_FEATURE_V7)) { 7757 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 7758 } 7759 } 7760 7761 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 7762 ARMCPRegInfo mpidr_cp_reginfo[] = { 7763 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 7764 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 7765 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 7766 REGINFO_SENTINEL 7767 }; 7768 #ifdef CONFIG_USER_ONLY 7769 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 7770 { .name = "MPIDR_EL1", 7771 .fixed_bits = 0x0000000080000000 }, 7772 REGUSERINFO_SENTINEL 7773 }; 7774 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 7775 #endif 7776 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 7777 } 7778 7779 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 7780 ARMCPRegInfo auxcr_reginfo[] = { 7781 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 7782 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 7783 .access = PL1_RW, .accessfn = access_tacr, 7784 .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, 7785 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 7786 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 7787 .access = PL2_RW, .type = ARM_CP_CONST, 7788 .resetvalue = 0 }, 7789 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 7790 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 7791 .access = PL3_RW, .type = ARM_CP_CONST, 7792 .resetvalue = 0 }, 7793 REGINFO_SENTINEL 7794 }; 7795 define_arm_cp_regs(cpu, auxcr_reginfo); 7796 if (cpu_isar_feature(aa32_ac2, cpu)) { 7797 define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); 7798 } 7799 } 7800 7801 if (arm_feature(env, ARM_FEATURE_CBAR)) { 7802 /* 7803 * CBAR is IMPDEF, but common on Arm Cortex-A implementations. 7804 * There are two flavours: 7805 * (1) older 32-bit only cores have a simple 32-bit CBAR 7806 * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a 7807 * 32-bit register visible to AArch32 at a different encoding 7808 * to the "flavour 1" register and with the bits rearranged to 7809 * be able to squash a 64-bit address into the 32-bit view. 7810 * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but 7811 * in future if we support AArch32-only configs of some of the 7812 * AArch64 cores we might need to add a specific feature flag 7813 * to indicate cores with "flavour 2" CBAR. 7814 */ 7815 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7816 /* 32 bit view is [31:18] 0...0 [43:32]. */ 7817 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 7818 | extract64(cpu->reset_cbar, 32, 12); 7819 ARMCPRegInfo cbar_reginfo[] = { 7820 { .name = "CBAR", 7821 .type = ARM_CP_CONST, 7822 .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, 7823 .access = PL1_R, .resetvalue = cbar32 }, 7824 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 7825 .type = ARM_CP_CONST, 7826 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 7827 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 7828 REGINFO_SENTINEL 7829 }; 7830 /* We don't implement a r/w 64 bit CBAR currently */ 7831 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 7832 define_arm_cp_regs(cpu, cbar_reginfo); 7833 } else { 7834 ARMCPRegInfo cbar = { 7835 .name = "CBAR", 7836 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 7837 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 7838 .fieldoffset = offsetof(CPUARMState, 7839 cp15.c15_config_base_address) 7840 }; 7841 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 7842 cbar.access = PL1_R; 7843 cbar.fieldoffset = 0; 7844 cbar.type = ARM_CP_CONST; 7845 } 7846 define_one_arm_cp_reg(cpu, &cbar); 7847 } 7848 } 7849 7850 if (arm_feature(env, ARM_FEATURE_VBAR)) { 7851 ARMCPRegInfo vbar_cp_reginfo[] = { 7852 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 7853 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 7854 .access = PL1_RW, .writefn = vbar_write, 7855 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 7856 offsetof(CPUARMState, cp15.vbar_ns) }, 7857 .resetvalue = 0 }, 7858 REGINFO_SENTINEL 7859 }; 7860 define_arm_cp_regs(cpu, vbar_cp_reginfo); 7861 } 7862 7863 /* Generic registers whose values depend on the implementation */ 7864 { 7865 ARMCPRegInfo sctlr = { 7866 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 7867 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 7868 .access = PL1_RW, .accessfn = access_tvm_trvm, 7869 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 7870 offsetof(CPUARMState, cp15.sctlr_ns) }, 7871 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 7872 .raw_writefn = raw_write, 7873 }; 7874 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 7875 /* Normally we would always end the TB on an SCTLR write, but Linux 7876 * arch/arm/mach-pxa/sleep.S expects two instructions following 7877 * an MMU enable to execute from cache. Imitate this behaviour. 7878 */ 7879 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 7880 } 7881 define_one_arm_cp_reg(cpu, &sctlr); 7882 } 7883 7884 if (cpu_isar_feature(aa64_lor, cpu)) { 7885 define_arm_cp_regs(cpu, lor_reginfo); 7886 } 7887 if (cpu_isar_feature(aa64_pan, cpu)) { 7888 define_one_arm_cp_reg(cpu, &pan_reginfo); 7889 } 7890 #ifndef CONFIG_USER_ONLY 7891 if (cpu_isar_feature(aa64_ats1e1, cpu)) { 7892 define_arm_cp_regs(cpu, ats1e1_reginfo); 7893 } 7894 if (cpu_isar_feature(aa32_ats1e1, cpu)) { 7895 define_arm_cp_regs(cpu, ats1cp_reginfo); 7896 } 7897 #endif 7898 if (cpu_isar_feature(aa64_uao, cpu)) { 7899 define_one_arm_cp_reg(cpu, &uao_reginfo); 7900 } 7901 7902 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 7903 define_arm_cp_regs(cpu, vhe_reginfo); 7904 } 7905 7906 if (cpu_isar_feature(aa64_sve, cpu)) { 7907 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 7908 if (arm_feature(env, ARM_FEATURE_EL2)) { 7909 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 7910 } else { 7911 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 7912 } 7913 if (arm_feature(env, ARM_FEATURE_EL3)) { 7914 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 7915 } 7916 } 7917 7918 #ifdef TARGET_AARCH64 7919 if (cpu_isar_feature(aa64_pauth, cpu)) { 7920 define_arm_cp_regs(cpu, pauth_reginfo); 7921 } 7922 if (cpu_isar_feature(aa64_rndr, cpu)) { 7923 define_arm_cp_regs(cpu, rndr_reginfo); 7924 } 7925 #ifndef CONFIG_USER_ONLY 7926 /* Data Cache clean instructions up to PoP */ 7927 if (cpu_isar_feature(aa64_dcpop, cpu)) { 7928 define_one_arm_cp_reg(cpu, dcpop_reg); 7929 7930 if (cpu_isar_feature(aa64_dcpodp, cpu)) { 7931 define_one_arm_cp_reg(cpu, dcpodp_reg); 7932 } 7933 } 7934 #endif /*CONFIG_USER_ONLY*/ 7935 #endif 7936 7937 if (cpu_isar_feature(any_predinv, cpu)) { 7938 define_arm_cp_regs(cpu, predinv_reginfo); 7939 } 7940 7941 if (cpu_isar_feature(any_ccidx, cpu)) { 7942 define_arm_cp_regs(cpu, ccsidr2_reginfo); 7943 } 7944 7945 #ifndef CONFIG_USER_ONLY 7946 /* 7947 * Register redirections and aliases must be done last, 7948 * after the registers from the other extensions have been defined. 7949 */ 7950 if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { 7951 define_arm_vh_e2h_redirects_aliases(cpu); 7952 } 7953 #endif 7954 } 7955 7956 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 7957 { 7958 CPUState *cs = CPU(cpu); 7959 CPUARMState *env = &cpu->env; 7960 7961 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 7962 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 7963 aarch64_fpu_gdb_set_reg, 7964 34, "aarch64-fpu.xml", 0); 7965 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 7966 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7967 51, "arm-neon.xml", 0); 7968 } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { 7969 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7970 35, "arm-vfp3.xml", 0); 7971 } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) { 7972 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 7973 19, "arm-vfp.xml", 0); 7974 } 7975 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 7976 arm_gen_dynamic_xml(cs), 7977 "system-registers.xml", 0); 7978 } 7979 7980 /* Sort alphabetically by type name, except for "any". */ 7981 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 7982 { 7983 ObjectClass *class_a = (ObjectClass *)a; 7984 ObjectClass *class_b = (ObjectClass *)b; 7985 const char *name_a, *name_b; 7986 7987 name_a = object_class_get_name(class_a); 7988 name_b = object_class_get_name(class_b); 7989 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 7990 return 1; 7991 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 7992 return -1; 7993 } else { 7994 return strcmp(name_a, name_b); 7995 } 7996 } 7997 7998 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 7999 { 8000 ObjectClass *oc = data; 8001 const char *typename; 8002 char *name; 8003 8004 typename = object_class_get_name(oc); 8005 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 8006 qemu_printf(" %s\n", name); 8007 g_free(name); 8008 } 8009 8010 void arm_cpu_list(void) 8011 { 8012 GSList *list; 8013 8014 list = object_class_get_list(TYPE_ARM_CPU, false); 8015 list = g_slist_sort(list, arm_cpu_list_compare); 8016 qemu_printf("Available CPUs:\n"); 8017 g_slist_foreach(list, arm_cpu_list_entry, NULL); 8018 g_slist_free(list); 8019 } 8020 8021 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 8022 { 8023 ObjectClass *oc = data; 8024 CpuDefinitionInfoList **cpu_list = user_data; 8025 CpuDefinitionInfoList *entry; 8026 CpuDefinitionInfo *info; 8027 const char *typename; 8028 8029 typename = object_class_get_name(oc); 8030 info = g_malloc0(sizeof(*info)); 8031 info->name = g_strndup(typename, 8032 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 8033 info->q_typename = g_strdup(typename); 8034 8035 entry = g_malloc0(sizeof(*entry)); 8036 entry->value = info; 8037 entry->next = *cpu_list; 8038 *cpu_list = entry; 8039 } 8040 8041 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 8042 { 8043 CpuDefinitionInfoList *cpu_list = NULL; 8044 GSList *list; 8045 8046 list = object_class_get_list(TYPE_ARM_CPU, false); 8047 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 8048 g_slist_free(list); 8049 8050 return cpu_list; 8051 } 8052 8053 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 8054 void *opaque, int state, int secstate, 8055 int crm, int opc1, int opc2, 8056 const char *name) 8057 { 8058 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 8059 * add a single reginfo struct to the hash table. 8060 */ 8061 uint32_t *key = g_new(uint32_t, 1); 8062 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 8063 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 8064 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 8065 8066 r2->name = g_strdup(name); 8067 /* Reset the secure state to the specific incoming state. This is 8068 * necessary as the register may have been defined with both states. 8069 */ 8070 r2->secure = secstate; 8071 8072 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 8073 /* Register is banked (using both entries in array). 8074 * Overwriting fieldoffset as the array is only used to define 8075 * banked registers but later only fieldoffset is used. 8076 */ 8077 r2->fieldoffset = r->bank_fieldoffsets[ns]; 8078 } 8079 8080 if (state == ARM_CP_STATE_AA32) { 8081 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 8082 /* If the register is banked then we don't need to migrate or 8083 * reset the 32-bit instance in certain cases: 8084 * 8085 * 1) If the register has both 32-bit and 64-bit instances then we 8086 * can count on the 64-bit instance taking care of the 8087 * non-secure bank. 8088 * 2) If ARMv8 is enabled then we can count on a 64-bit version 8089 * taking care of the secure bank. This requires that separate 8090 * 32 and 64-bit definitions are provided. 8091 */ 8092 if ((r->state == ARM_CP_STATE_BOTH && ns) || 8093 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 8094 r2->type |= ARM_CP_ALIAS; 8095 } 8096 } else if ((secstate != r->secure) && !ns) { 8097 /* The register is not banked so we only want to allow migration of 8098 * the non-secure instance. 8099 */ 8100 r2->type |= ARM_CP_ALIAS; 8101 } 8102 8103 if (r->state == ARM_CP_STATE_BOTH) { 8104 /* We assume it is a cp15 register if the .cp field is left unset. 8105 */ 8106 if (r2->cp == 0) { 8107 r2->cp = 15; 8108 } 8109 8110 #ifdef HOST_WORDS_BIGENDIAN 8111 if (r2->fieldoffset) { 8112 r2->fieldoffset += sizeof(uint32_t); 8113 } 8114 #endif 8115 } 8116 } 8117 if (state == ARM_CP_STATE_AA64) { 8118 /* To allow abbreviation of ARMCPRegInfo 8119 * definitions, we treat cp == 0 as equivalent to 8120 * the value for "standard guest-visible sysreg". 8121 * STATE_BOTH definitions are also always "standard 8122 * sysreg" in their AArch64 view (the .cp value may 8123 * be non-zero for the benefit of the AArch32 view). 8124 */ 8125 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 8126 r2->cp = CP_REG_ARM64_SYSREG_CP; 8127 } 8128 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 8129 r2->opc0, opc1, opc2); 8130 } else { 8131 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 8132 } 8133 if (opaque) { 8134 r2->opaque = opaque; 8135 } 8136 /* reginfo passed to helpers is correct for the actual access, 8137 * and is never ARM_CP_STATE_BOTH: 8138 */ 8139 r2->state = state; 8140 /* Make sure reginfo passed to helpers for wildcarded regs 8141 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 8142 */ 8143 r2->crm = crm; 8144 r2->opc1 = opc1; 8145 r2->opc2 = opc2; 8146 /* By convention, for wildcarded registers only the first 8147 * entry is used for migration; the others are marked as 8148 * ALIAS so we don't try to transfer the register 8149 * multiple times. Special registers (ie NOP/WFI) are 8150 * never migratable and not even raw-accessible. 8151 */ 8152 if ((r->type & ARM_CP_SPECIAL)) { 8153 r2->type |= ARM_CP_NO_RAW; 8154 } 8155 if (((r->crm == CP_ANY) && crm != 0) || 8156 ((r->opc1 == CP_ANY) && opc1 != 0) || 8157 ((r->opc2 == CP_ANY) && opc2 != 0)) { 8158 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 8159 } 8160 8161 /* Check that raw accesses are either forbidden or handled. Note that 8162 * we can't assert this earlier because the setup of fieldoffset for 8163 * banked registers has to be done first. 8164 */ 8165 if (!(r2->type & ARM_CP_NO_RAW)) { 8166 assert(!raw_accessors_invalid(r2)); 8167 } 8168 8169 /* Overriding of an existing definition must be explicitly 8170 * requested. 8171 */ 8172 if (!(r->type & ARM_CP_OVERRIDE)) { 8173 ARMCPRegInfo *oldreg; 8174 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 8175 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 8176 fprintf(stderr, "Register redefined: cp=%d %d bit " 8177 "crn=%d crm=%d opc1=%d opc2=%d, " 8178 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 8179 r2->crn, r2->crm, r2->opc1, r2->opc2, 8180 oldreg->name, r2->name); 8181 g_assert_not_reached(); 8182 } 8183 } 8184 g_hash_table_insert(cpu->cp_regs, key, r2); 8185 } 8186 8187 8188 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 8189 const ARMCPRegInfo *r, void *opaque) 8190 { 8191 /* Define implementations of coprocessor registers. 8192 * We store these in a hashtable because typically 8193 * there are less than 150 registers in a space which 8194 * is 16*16*16*8*8 = 262144 in size. 8195 * Wildcarding is supported for the crm, opc1 and opc2 fields. 8196 * If a register is defined twice then the second definition is 8197 * used, so this can be used to define some generic registers and 8198 * then override them with implementation specific variations. 8199 * At least one of the original and the second definition should 8200 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 8201 * against accidental use. 8202 * 8203 * The state field defines whether the register is to be 8204 * visible in the AArch32 or AArch64 execution state. If the 8205 * state is set to ARM_CP_STATE_BOTH then we synthesise a 8206 * reginfo structure for the AArch32 view, which sees the lower 8207 * 32 bits of the 64 bit register. 8208 * 8209 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 8210 * be wildcarded. AArch64 registers are always considered to be 64 8211 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 8212 * the register, if any. 8213 */ 8214 int crm, opc1, opc2, state; 8215 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 8216 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 8217 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 8218 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 8219 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 8220 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 8221 /* 64 bit registers have only CRm and Opc1 fields */ 8222 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 8223 /* op0 only exists in the AArch64 encodings */ 8224 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 8225 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 8226 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 8227 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 8228 * encodes a minimum access level for the register. We roll this 8229 * runtime check into our general permission check code, so check 8230 * here that the reginfo's specified permissions are strict enough 8231 * to encompass the generic architectural permission check. 8232 */ 8233 if (r->state != ARM_CP_STATE_AA32) { 8234 int mask = 0; 8235 switch (r->opc1) { 8236 case 0: 8237 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 8238 mask = PL0U_R | PL1_RW; 8239 break; 8240 case 1: case 2: 8241 /* min_EL EL1 */ 8242 mask = PL1_RW; 8243 break; 8244 case 3: 8245 /* min_EL EL0 */ 8246 mask = PL0_RW; 8247 break; 8248 case 4: 8249 case 5: 8250 /* min_EL EL2 */ 8251 mask = PL2_RW; 8252 break; 8253 case 6: 8254 /* min_EL EL3 */ 8255 mask = PL3_RW; 8256 break; 8257 case 7: 8258 /* min_EL EL1, secure mode only (we don't check the latter) */ 8259 mask = PL1_RW; 8260 break; 8261 default: 8262 /* broken reginfo with out-of-range opc1 */ 8263 assert(false); 8264 break; 8265 } 8266 /* assert our permissions are not too lax (stricter is fine) */ 8267 assert((r->access & ~mask) == 0); 8268 } 8269 8270 /* Check that the register definition has enough info to handle 8271 * reads and writes if they are permitted. 8272 */ 8273 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 8274 if (r->access & PL3_R) { 8275 assert((r->fieldoffset || 8276 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8277 r->readfn); 8278 } 8279 if (r->access & PL3_W) { 8280 assert((r->fieldoffset || 8281 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 8282 r->writefn); 8283 } 8284 } 8285 /* Bad type field probably means missing sentinel at end of reg list */ 8286 assert(cptype_valid(r->type)); 8287 for (crm = crmmin; crm <= crmmax; crm++) { 8288 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 8289 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 8290 for (state = ARM_CP_STATE_AA32; 8291 state <= ARM_CP_STATE_AA64; state++) { 8292 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 8293 continue; 8294 } 8295 if (state == ARM_CP_STATE_AA32) { 8296 /* Under AArch32 CP registers can be common 8297 * (same for secure and non-secure world) or banked. 8298 */ 8299 char *name; 8300 8301 switch (r->secure) { 8302 case ARM_CP_SECSTATE_S: 8303 case ARM_CP_SECSTATE_NS: 8304 add_cpreg_to_hashtable(cpu, r, opaque, state, 8305 r->secure, crm, opc1, opc2, 8306 r->name); 8307 break; 8308 default: 8309 name = g_strdup_printf("%s_S", r->name); 8310 add_cpreg_to_hashtable(cpu, r, opaque, state, 8311 ARM_CP_SECSTATE_S, 8312 crm, opc1, opc2, name); 8313 g_free(name); 8314 add_cpreg_to_hashtable(cpu, r, opaque, state, 8315 ARM_CP_SECSTATE_NS, 8316 crm, opc1, opc2, r->name); 8317 break; 8318 } 8319 } else { 8320 /* AArch64 registers get mapped to non-secure instance 8321 * of AArch32 */ 8322 add_cpreg_to_hashtable(cpu, r, opaque, state, 8323 ARM_CP_SECSTATE_NS, 8324 crm, opc1, opc2, r->name); 8325 } 8326 } 8327 } 8328 } 8329 } 8330 } 8331 8332 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 8333 const ARMCPRegInfo *regs, void *opaque) 8334 { 8335 /* Define a whole list of registers */ 8336 const ARMCPRegInfo *r; 8337 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 8338 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 8339 } 8340 } 8341 8342 /* 8343 * Modify ARMCPRegInfo for access from userspace. 8344 * 8345 * This is a data driven modification directed by 8346 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 8347 * user-space cannot alter any values and dynamic values pertaining to 8348 * execution state are hidden from user space view anyway. 8349 */ 8350 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) 8351 { 8352 const ARMCPRegUserSpaceInfo *m; 8353 ARMCPRegInfo *r; 8354 8355 for (m = mods; m->name; m++) { 8356 GPatternSpec *pat = NULL; 8357 if (m->is_glob) { 8358 pat = g_pattern_spec_new(m->name); 8359 } 8360 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 8361 if (pat && g_pattern_match_string(pat, r->name)) { 8362 r->type = ARM_CP_CONST; 8363 r->access = PL0U_R; 8364 r->resetvalue = 0; 8365 /* continue */ 8366 } else if (strcmp(r->name, m->name) == 0) { 8367 r->type = ARM_CP_CONST; 8368 r->access = PL0U_R; 8369 r->resetvalue &= m->exported_bits; 8370 r->resetvalue |= m->fixed_bits; 8371 break; 8372 } 8373 } 8374 if (pat) { 8375 g_pattern_spec_free(pat); 8376 } 8377 } 8378 } 8379 8380 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 8381 { 8382 return g_hash_table_lookup(cpregs, &encoded_cp); 8383 } 8384 8385 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 8386 uint64_t value) 8387 { 8388 /* Helper coprocessor write function for write-ignore registers */ 8389 } 8390 8391 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 8392 { 8393 /* Helper coprocessor write function for read-as-zero registers */ 8394 return 0; 8395 } 8396 8397 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 8398 { 8399 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 8400 } 8401 8402 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 8403 { 8404 /* Return true if it is not valid for us to switch to 8405 * this CPU mode (ie all the UNPREDICTABLE cases in 8406 * the ARM ARM CPSRWriteByInstr pseudocode). 8407 */ 8408 8409 /* Changes to or from Hyp via MSR and CPS are illegal. */ 8410 if (write_type == CPSRWriteByInstr && 8411 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 8412 mode == ARM_CPU_MODE_HYP)) { 8413 return 1; 8414 } 8415 8416 switch (mode) { 8417 case ARM_CPU_MODE_USR: 8418 return 0; 8419 case ARM_CPU_MODE_SYS: 8420 case ARM_CPU_MODE_SVC: 8421 case ARM_CPU_MODE_ABT: 8422 case ARM_CPU_MODE_UND: 8423 case ARM_CPU_MODE_IRQ: 8424 case ARM_CPU_MODE_FIQ: 8425 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 8426 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 8427 */ 8428 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 8429 * and CPS are treated as illegal mode changes. 8430 */ 8431 if (write_type == CPSRWriteByInstr && 8432 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 8433 (arm_hcr_el2_eff(env) & HCR_TGE)) { 8434 return 1; 8435 } 8436 return 0; 8437 case ARM_CPU_MODE_HYP: 8438 return !arm_feature(env, ARM_FEATURE_EL2) 8439 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); 8440 case ARM_CPU_MODE_MON: 8441 return arm_current_el(env) < 3; 8442 default: 8443 return 1; 8444 } 8445 } 8446 8447 uint32_t cpsr_read(CPUARMState *env) 8448 { 8449 int ZF; 8450 ZF = (env->ZF == 0); 8451 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 8452 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 8453 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 8454 | ((env->condexec_bits & 0xfc) << 8) 8455 | (env->GE << 16) | (env->daif & CPSR_AIF); 8456 } 8457 8458 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 8459 CPSRWriteType write_type) 8460 { 8461 uint32_t changed_daif; 8462 8463 if (mask & CPSR_NZCV) { 8464 env->ZF = (~val) & CPSR_Z; 8465 env->NF = val; 8466 env->CF = (val >> 29) & 1; 8467 env->VF = (val << 3) & 0x80000000; 8468 } 8469 if (mask & CPSR_Q) 8470 env->QF = ((val & CPSR_Q) != 0); 8471 if (mask & CPSR_T) 8472 env->thumb = ((val & CPSR_T) != 0); 8473 if (mask & CPSR_IT_0_1) { 8474 env->condexec_bits &= ~3; 8475 env->condexec_bits |= (val >> 25) & 3; 8476 } 8477 if (mask & CPSR_IT_2_7) { 8478 env->condexec_bits &= 3; 8479 env->condexec_bits |= (val >> 8) & 0xfc; 8480 } 8481 if (mask & CPSR_GE) { 8482 env->GE = (val >> 16) & 0xf; 8483 } 8484 8485 /* In a V7 implementation that includes the security extensions but does 8486 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 8487 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 8488 * bits respectively. 8489 * 8490 * In a V8 implementation, it is permitted for privileged software to 8491 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 8492 */ 8493 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 8494 arm_feature(env, ARM_FEATURE_EL3) && 8495 !arm_feature(env, ARM_FEATURE_EL2) && 8496 !arm_is_secure(env)) { 8497 8498 changed_daif = (env->daif ^ val) & mask; 8499 8500 if (changed_daif & CPSR_A) { 8501 /* Check to see if we are allowed to change the masking of async 8502 * abort exceptions from a non-secure state. 8503 */ 8504 if (!(env->cp15.scr_el3 & SCR_AW)) { 8505 qemu_log_mask(LOG_GUEST_ERROR, 8506 "Ignoring attempt to switch CPSR_A flag from " 8507 "non-secure world with SCR.AW bit clear\n"); 8508 mask &= ~CPSR_A; 8509 } 8510 } 8511 8512 if (changed_daif & CPSR_F) { 8513 /* Check to see if we are allowed to change the masking of FIQ 8514 * exceptions from a non-secure state. 8515 */ 8516 if (!(env->cp15.scr_el3 & SCR_FW)) { 8517 qemu_log_mask(LOG_GUEST_ERROR, 8518 "Ignoring attempt to switch CPSR_F flag from " 8519 "non-secure world with SCR.FW bit clear\n"); 8520 mask &= ~CPSR_F; 8521 } 8522 8523 /* Check whether non-maskable FIQ (NMFI) support is enabled. 8524 * If this bit is set software is not allowed to mask 8525 * FIQs, but is allowed to set CPSR_F to 0. 8526 */ 8527 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 8528 (val & CPSR_F)) { 8529 qemu_log_mask(LOG_GUEST_ERROR, 8530 "Ignoring attempt to enable CPSR_F flag " 8531 "(non-maskable FIQ [NMFI] support enabled)\n"); 8532 mask &= ~CPSR_F; 8533 } 8534 } 8535 } 8536 8537 env->daif &= ~(CPSR_AIF & mask); 8538 env->daif |= val & CPSR_AIF & mask; 8539 8540 if (write_type != CPSRWriteRaw && 8541 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 8542 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 8543 /* Note that we can only get here in USR mode if this is a 8544 * gdb stub write; for this case we follow the architectural 8545 * behaviour for guest writes in USR mode of ignoring an attempt 8546 * to switch mode. (Those are caught by translate.c for writes 8547 * triggered by guest instructions.) 8548 */ 8549 mask &= ~CPSR_M; 8550 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 8551 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 8552 * v7, and has defined behaviour in v8: 8553 * + leave CPSR.M untouched 8554 * + allow changes to the other CPSR fields 8555 * + set PSTATE.IL 8556 * For user changes via the GDB stub, we don't set PSTATE.IL, 8557 * as this would be unnecessarily harsh for a user error. 8558 */ 8559 mask &= ~CPSR_M; 8560 if (write_type != CPSRWriteByGDBStub && 8561 arm_feature(env, ARM_FEATURE_V8)) { 8562 mask |= CPSR_IL; 8563 val |= CPSR_IL; 8564 } 8565 qemu_log_mask(LOG_GUEST_ERROR, 8566 "Illegal AArch32 mode switch attempt from %s to %s\n", 8567 aarch32_mode_name(env->uncached_cpsr), 8568 aarch32_mode_name(val)); 8569 } else { 8570 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 8571 write_type == CPSRWriteExceptionReturn ? 8572 "Exception return from AArch32" : 8573 "AArch32 mode switch from", 8574 aarch32_mode_name(env->uncached_cpsr), 8575 aarch32_mode_name(val), env->regs[15]); 8576 switch_mode(env, val & CPSR_M); 8577 } 8578 } 8579 mask &= ~CACHED_CPSR_BITS; 8580 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 8581 } 8582 8583 /* Sign/zero extend */ 8584 uint32_t HELPER(sxtb16)(uint32_t x) 8585 { 8586 uint32_t res; 8587 res = (uint16_t)(int8_t)x; 8588 res |= (uint32_t)(int8_t)(x >> 16) << 16; 8589 return res; 8590 } 8591 8592 uint32_t HELPER(uxtb16)(uint32_t x) 8593 { 8594 uint32_t res; 8595 res = (uint16_t)(uint8_t)x; 8596 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 8597 return res; 8598 } 8599 8600 int32_t HELPER(sdiv)(int32_t num, int32_t den) 8601 { 8602 if (den == 0) 8603 return 0; 8604 if (num == INT_MIN && den == -1) 8605 return INT_MIN; 8606 return num / den; 8607 } 8608 8609 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 8610 { 8611 if (den == 0) 8612 return 0; 8613 return num / den; 8614 } 8615 8616 uint32_t HELPER(rbit)(uint32_t x) 8617 { 8618 return revbit32(x); 8619 } 8620 8621 #ifdef CONFIG_USER_ONLY 8622 8623 static void switch_mode(CPUARMState *env, int mode) 8624 { 8625 ARMCPU *cpu = env_archcpu(env); 8626 8627 if (mode != ARM_CPU_MODE_USR) { 8628 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 8629 } 8630 } 8631 8632 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 8633 uint32_t cur_el, bool secure) 8634 { 8635 return 1; 8636 } 8637 8638 void aarch64_sync_64_to_32(CPUARMState *env) 8639 { 8640 g_assert_not_reached(); 8641 } 8642 8643 #else 8644 8645 static void switch_mode(CPUARMState *env, int mode) 8646 { 8647 int old_mode; 8648 int i; 8649 8650 old_mode = env->uncached_cpsr & CPSR_M; 8651 if (mode == old_mode) 8652 return; 8653 8654 if (old_mode == ARM_CPU_MODE_FIQ) { 8655 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 8656 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 8657 } else if (mode == ARM_CPU_MODE_FIQ) { 8658 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 8659 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 8660 } 8661 8662 i = bank_number(old_mode); 8663 env->banked_r13[i] = env->regs[13]; 8664 env->banked_spsr[i] = env->spsr; 8665 8666 i = bank_number(mode); 8667 env->regs[13] = env->banked_r13[i]; 8668 env->spsr = env->banked_spsr[i]; 8669 8670 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 8671 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 8672 } 8673 8674 /* Physical Interrupt Target EL Lookup Table 8675 * 8676 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 8677 * 8678 * The below multi-dimensional table is used for looking up the target 8679 * exception level given numerous condition criteria. Specifically, the 8680 * target EL is based on SCR and HCR routing controls as well as the 8681 * currently executing EL and secure state. 8682 * 8683 * Dimensions: 8684 * target_el_table[2][2][2][2][2][4] 8685 * | | | | | +--- Current EL 8686 * | | | | +------ Non-secure(0)/Secure(1) 8687 * | | | +--------- HCR mask override 8688 * | | +------------ SCR exec state control 8689 * | +--------------- SCR mask override 8690 * +------------------ 32-bit(0)/64-bit(1) EL3 8691 * 8692 * The table values are as such: 8693 * 0-3 = EL0-EL3 8694 * -1 = Cannot occur 8695 * 8696 * The ARM ARM target EL table includes entries indicating that an "exception 8697 * is not taken". The two cases where this is applicable are: 8698 * 1) An exception is taken from EL3 but the SCR does not have the exception 8699 * routed to EL3. 8700 * 2) An exception is taken from EL2 but the HCR does not have the exception 8701 * routed to EL2. 8702 * In these two cases, the below table contain a target of EL1. This value is 8703 * returned as it is expected that the consumer of the table data will check 8704 * for "target EL >= current EL" to ensure the exception is not taken. 8705 * 8706 * SCR HCR 8707 * 64 EA AMO From 8708 * BIT IRQ IMO Non-secure Secure 8709 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 8710 */ 8711 static const int8_t target_el_table[2][2][2][2][2][4] = { 8712 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 8713 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 8714 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 8715 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 8716 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 8717 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 8718 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 8719 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 8720 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 8721 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 8722 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 8723 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 8724 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 8725 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 8726 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 8727 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 8728 }; 8729 8730 /* 8731 * Determine the target EL for physical exceptions 8732 */ 8733 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 8734 uint32_t cur_el, bool secure) 8735 { 8736 CPUARMState *env = cs->env_ptr; 8737 bool rw; 8738 bool scr; 8739 bool hcr; 8740 int target_el; 8741 /* Is the highest EL AArch64? */ 8742 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 8743 uint64_t hcr_el2; 8744 8745 if (arm_feature(env, ARM_FEATURE_EL3)) { 8746 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 8747 } else { 8748 /* Either EL2 is the highest EL (and so the EL2 register width 8749 * is given by is64); or there is no EL2 or EL3, in which case 8750 * the value of 'rw' does not affect the table lookup anyway. 8751 */ 8752 rw = is64; 8753 } 8754 8755 hcr_el2 = arm_hcr_el2_eff(env); 8756 switch (excp_idx) { 8757 case EXCP_IRQ: 8758 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 8759 hcr = hcr_el2 & HCR_IMO; 8760 break; 8761 case EXCP_FIQ: 8762 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 8763 hcr = hcr_el2 & HCR_FMO; 8764 break; 8765 default: 8766 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 8767 hcr = hcr_el2 & HCR_AMO; 8768 break; 8769 }; 8770 8771 /* 8772 * For these purposes, TGE and AMO/IMO/FMO both force the 8773 * interrupt to EL2. Fold TGE into the bit extracted above. 8774 */ 8775 hcr |= (hcr_el2 & HCR_TGE) != 0; 8776 8777 /* Perform a table-lookup for the target EL given the current state */ 8778 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 8779 8780 assert(target_el > 0); 8781 8782 return target_el; 8783 } 8784 8785 void arm_log_exception(int idx) 8786 { 8787 if (qemu_loglevel_mask(CPU_LOG_INT)) { 8788 const char *exc = NULL; 8789 static const char * const excnames[] = { 8790 [EXCP_UDEF] = "Undefined Instruction", 8791 [EXCP_SWI] = "SVC", 8792 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 8793 [EXCP_DATA_ABORT] = "Data Abort", 8794 [EXCP_IRQ] = "IRQ", 8795 [EXCP_FIQ] = "FIQ", 8796 [EXCP_BKPT] = "Breakpoint", 8797 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 8798 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 8799 [EXCP_HVC] = "Hypervisor Call", 8800 [EXCP_HYP_TRAP] = "Hypervisor Trap", 8801 [EXCP_SMC] = "Secure Monitor Call", 8802 [EXCP_VIRQ] = "Virtual IRQ", 8803 [EXCP_VFIQ] = "Virtual FIQ", 8804 [EXCP_SEMIHOST] = "Semihosting call", 8805 [EXCP_NOCP] = "v7M NOCP UsageFault", 8806 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 8807 [EXCP_STKOF] = "v8M STKOF UsageFault", 8808 [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", 8809 [EXCP_LSERR] = "v8M LSERR UsageFault", 8810 [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", 8811 }; 8812 8813 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 8814 exc = excnames[idx]; 8815 } 8816 if (!exc) { 8817 exc = "unknown"; 8818 } 8819 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 8820 } 8821 } 8822 8823 /* 8824 * Function used to synchronize QEMU's AArch64 register set with AArch32 8825 * register set. This is necessary when switching between AArch32 and AArch64 8826 * execution state. 8827 */ 8828 void aarch64_sync_32_to_64(CPUARMState *env) 8829 { 8830 int i; 8831 uint32_t mode = env->uncached_cpsr & CPSR_M; 8832 8833 /* We can blanket copy R[0:7] to X[0:7] */ 8834 for (i = 0; i < 8; i++) { 8835 env->xregs[i] = env->regs[i]; 8836 } 8837 8838 /* 8839 * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 8840 * Otherwise, they come from the banked user regs. 8841 */ 8842 if (mode == ARM_CPU_MODE_FIQ) { 8843 for (i = 8; i < 13; i++) { 8844 env->xregs[i] = env->usr_regs[i - 8]; 8845 } 8846 } else { 8847 for (i = 8; i < 13; i++) { 8848 env->xregs[i] = env->regs[i]; 8849 } 8850 } 8851 8852 /* 8853 * Registers x13-x23 are the various mode SP and FP registers. Registers 8854 * r13 and r14 are only copied if we are in that mode, otherwise we copy 8855 * from the mode banked register. 8856 */ 8857 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8858 env->xregs[13] = env->regs[13]; 8859 env->xregs[14] = env->regs[14]; 8860 } else { 8861 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 8862 /* HYP is an exception in that it is copied from r14 */ 8863 if (mode == ARM_CPU_MODE_HYP) { 8864 env->xregs[14] = env->regs[14]; 8865 } else { 8866 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 8867 } 8868 } 8869 8870 if (mode == ARM_CPU_MODE_HYP) { 8871 env->xregs[15] = env->regs[13]; 8872 } else { 8873 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 8874 } 8875 8876 if (mode == ARM_CPU_MODE_IRQ) { 8877 env->xregs[16] = env->regs[14]; 8878 env->xregs[17] = env->regs[13]; 8879 } else { 8880 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 8881 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 8882 } 8883 8884 if (mode == ARM_CPU_MODE_SVC) { 8885 env->xregs[18] = env->regs[14]; 8886 env->xregs[19] = env->regs[13]; 8887 } else { 8888 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 8889 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 8890 } 8891 8892 if (mode == ARM_CPU_MODE_ABT) { 8893 env->xregs[20] = env->regs[14]; 8894 env->xregs[21] = env->regs[13]; 8895 } else { 8896 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 8897 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 8898 } 8899 8900 if (mode == ARM_CPU_MODE_UND) { 8901 env->xregs[22] = env->regs[14]; 8902 env->xregs[23] = env->regs[13]; 8903 } else { 8904 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 8905 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 8906 } 8907 8908 /* 8909 * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 8910 * mode, then we can copy from r8-r14. Otherwise, we copy from the 8911 * FIQ bank for r8-r14. 8912 */ 8913 if (mode == ARM_CPU_MODE_FIQ) { 8914 for (i = 24; i < 31; i++) { 8915 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 8916 } 8917 } else { 8918 for (i = 24; i < 29; i++) { 8919 env->xregs[i] = env->fiq_regs[i - 24]; 8920 } 8921 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 8922 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 8923 } 8924 8925 env->pc = env->regs[15]; 8926 } 8927 8928 /* 8929 * Function used to synchronize QEMU's AArch32 register set with AArch64 8930 * register set. This is necessary when switching between AArch32 and AArch64 8931 * execution state. 8932 */ 8933 void aarch64_sync_64_to_32(CPUARMState *env) 8934 { 8935 int i; 8936 uint32_t mode = env->uncached_cpsr & CPSR_M; 8937 8938 /* We can blanket copy X[0:7] to R[0:7] */ 8939 for (i = 0; i < 8; i++) { 8940 env->regs[i] = env->xregs[i]; 8941 } 8942 8943 /* 8944 * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 8945 * Otherwise, we copy x8-x12 into the banked user regs. 8946 */ 8947 if (mode == ARM_CPU_MODE_FIQ) { 8948 for (i = 8; i < 13; i++) { 8949 env->usr_regs[i - 8] = env->xregs[i]; 8950 } 8951 } else { 8952 for (i = 8; i < 13; i++) { 8953 env->regs[i] = env->xregs[i]; 8954 } 8955 } 8956 8957 /* 8958 * Registers r13 & r14 depend on the current mode. 8959 * If we are in a given mode, we copy the corresponding x registers to r13 8960 * and r14. Otherwise, we copy the x register to the banked r13 and r14 8961 * for the mode. 8962 */ 8963 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8964 env->regs[13] = env->xregs[13]; 8965 env->regs[14] = env->xregs[14]; 8966 } else { 8967 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 8968 8969 /* 8970 * HYP is an exception in that it does not have its own banked r14 but 8971 * shares the USR r14 8972 */ 8973 if (mode == ARM_CPU_MODE_HYP) { 8974 env->regs[14] = env->xregs[14]; 8975 } else { 8976 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 8977 } 8978 } 8979 8980 if (mode == ARM_CPU_MODE_HYP) { 8981 env->regs[13] = env->xregs[15]; 8982 } else { 8983 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 8984 } 8985 8986 if (mode == ARM_CPU_MODE_IRQ) { 8987 env->regs[14] = env->xregs[16]; 8988 env->regs[13] = env->xregs[17]; 8989 } else { 8990 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 8991 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 8992 } 8993 8994 if (mode == ARM_CPU_MODE_SVC) { 8995 env->regs[14] = env->xregs[18]; 8996 env->regs[13] = env->xregs[19]; 8997 } else { 8998 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 8999 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 9000 } 9001 9002 if (mode == ARM_CPU_MODE_ABT) { 9003 env->regs[14] = env->xregs[20]; 9004 env->regs[13] = env->xregs[21]; 9005 } else { 9006 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 9007 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 9008 } 9009 9010 if (mode == ARM_CPU_MODE_UND) { 9011 env->regs[14] = env->xregs[22]; 9012 env->regs[13] = env->xregs[23]; 9013 } else { 9014 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 9015 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 9016 } 9017 9018 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9019 * mode, then we can copy to r8-r14. Otherwise, we copy to the 9020 * FIQ bank for r8-r14. 9021 */ 9022 if (mode == ARM_CPU_MODE_FIQ) { 9023 for (i = 24; i < 31; i++) { 9024 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 9025 } 9026 } else { 9027 for (i = 24; i < 29; i++) { 9028 env->fiq_regs[i - 24] = env->xregs[i]; 9029 } 9030 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 9031 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 9032 } 9033 9034 env->regs[15] = env->pc; 9035 } 9036 9037 static void take_aarch32_exception(CPUARMState *env, int new_mode, 9038 uint32_t mask, uint32_t offset, 9039 uint32_t newpc) 9040 { 9041 int new_el; 9042 9043 /* Change the CPU state so as to actually take the exception. */ 9044 switch_mode(env, new_mode); 9045 new_el = arm_current_el(env); 9046 9047 /* 9048 * For exceptions taken to AArch32 we must clear the SS bit in both 9049 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 9050 */ 9051 env->uncached_cpsr &= ~PSTATE_SS; 9052 env->spsr = cpsr_read(env); 9053 /* Clear IT bits. */ 9054 env->condexec_bits = 0; 9055 /* Switch to the new mode, and to the correct instruction set. */ 9056 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 9057 /* Set new mode endianness */ 9058 env->uncached_cpsr &= ~CPSR_E; 9059 if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { 9060 env->uncached_cpsr |= CPSR_E; 9061 } 9062 /* J and IL must always be cleared for exception entry */ 9063 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 9064 env->daif |= mask; 9065 9066 if (new_mode == ARM_CPU_MODE_HYP) { 9067 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 9068 env->elr_el[2] = env->regs[15]; 9069 } else { 9070 /* CPSR.PAN is normally preserved preserved unless... */ 9071 if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { 9072 switch (new_el) { 9073 case 3: 9074 if (!arm_is_secure_below_el3(env)) { 9075 /* ... the target is EL3, from non-secure state. */ 9076 env->uncached_cpsr &= ~CPSR_PAN; 9077 break; 9078 } 9079 /* ... the target is EL3, from secure state ... */ 9080 /* fall through */ 9081 case 1: 9082 /* ... the target is EL1 and SCTLR.SPAN is 0. */ 9083 if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { 9084 env->uncached_cpsr |= CPSR_PAN; 9085 } 9086 break; 9087 } 9088 } 9089 /* 9090 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 9091 * and we should just guard the thumb mode on V4 9092 */ 9093 if (arm_feature(env, ARM_FEATURE_V4T)) { 9094 env->thumb = 9095 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 9096 } 9097 env->regs[14] = env->regs[15] + offset; 9098 } 9099 env->regs[15] = newpc; 9100 arm_rebuild_hflags(env); 9101 } 9102 9103 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 9104 { 9105 /* 9106 * Handle exception entry to Hyp mode; this is sufficiently 9107 * different to entry to other AArch32 modes that we handle it 9108 * separately here. 9109 * 9110 * The vector table entry used is always the 0x14 Hyp mode entry point, 9111 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. 9112 * The offset applied to the preferred return address is always zero 9113 * (see DDI0487C.a section G1.12.3). 9114 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 9115 */ 9116 uint32_t addr, mask; 9117 ARMCPU *cpu = ARM_CPU(cs); 9118 CPUARMState *env = &cpu->env; 9119 9120 switch (cs->exception_index) { 9121 case EXCP_UDEF: 9122 addr = 0x04; 9123 break; 9124 case EXCP_SWI: 9125 addr = 0x14; 9126 break; 9127 case EXCP_BKPT: 9128 /* Fall through to prefetch abort. */ 9129 case EXCP_PREFETCH_ABORT: 9130 env->cp15.ifar_s = env->exception.vaddress; 9131 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 9132 (uint32_t)env->exception.vaddress); 9133 addr = 0x0c; 9134 break; 9135 case EXCP_DATA_ABORT: 9136 env->cp15.dfar_s = env->exception.vaddress; 9137 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 9138 (uint32_t)env->exception.vaddress); 9139 addr = 0x10; 9140 break; 9141 case EXCP_IRQ: 9142 addr = 0x18; 9143 break; 9144 case EXCP_FIQ: 9145 addr = 0x1c; 9146 break; 9147 case EXCP_HVC: 9148 addr = 0x08; 9149 break; 9150 case EXCP_HYP_TRAP: 9151 addr = 0x14; 9152 break; 9153 default: 9154 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9155 } 9156 9157 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 9158 if (!arm_feature(env, ARM_FEATURE_V8)) { 9159 /* 9160 * QEMU syndrome values are v8-style. v7 has the IL bit 9161 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 9162 * If this is a v7 CPU, squash the IL bit in those cases. 9163 */ 9164 if (cs->exception_index == EXCP_PREFETCH_ABORT || 9165 (cs->exception_index == EXCP_DATA_ABORT && 9166 !(env->exception.syndrome & ARM_EL_ISV)) || 9167 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 9168 env->exception.syndrome &= ~ARM_EL_IL; 9169 } 9170 } 9171 env->cp15.esr_el[2] = env->exception.syndrome; 9172 } 9173 9174 if (arm_current_el(env) != 2 && addr < 0x14) { 9175 addr = 0x14; 9176 } 9177 9178 mask = 0; 9179 if (!(env->cp15.scr_el3 & SCR_EA)) { 9180 mask |= CPSR_A; 9181 } 9182 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 9183 mask |= CPSR_I; 9184 } 9185 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 9186 mask |= CPSR_F; 9187 } 9188 9189 addr += env->cp15.hvbar; 9190 9191 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 9192 } 9193 9194 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 9195 { 9196 ARMCPU *cpu = ARM_CPU(cs); 9197 CPUARMState *env = &cpu->env; 9198 uint32_t addr; 9199 uint32_t mask; 9200 int new_mode; 9201 uint32_t offset; 9202 uint32_t moe; 9203 9204 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 9205 switch (syn_get_ec(env->exception.syndrome)) { 9206 case EC_BREAKPOINT: 9207 case EC_BREAKPOINT_SAME_EL: 9208 moe = 1; 9209 break; 9210 case EC_WATCHPOINT: 9211 case EC_WATCHPOINT_SAME_EL: 9212 moe = 10; 9213 break; 9214 case EC_AA32_BKPT: 9215 moe = 3; 9216 break; 9217 case EC_VECTORCATCH: 9218 moe = 5; 9219 break; 9220 default: 9221 moe = 0; 9222 break; 9223 } 9224 9225 if (moe) { 9226 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 9227 } 9228 9229 if (env->exception.target_el == 2) { 9230 arm_cpu_do_interrupt_aarch32_hyp(cs); 9231 return; 9232 } 9233 9234 switch (cs->exception_index) { 9235 case EXCP_UDEF: 9236 new_mode = ARM_CPU_MODE_UND; 9237 addr = 0x04; 9238 mask = CPSR_I; 9239 if (env->thumb) 9240 offset = 2; 9241 else 9242 offset = 4; 9243 break; 9244 case EXCP_SWI: 9245 new_mode = ARM_CPU_MODE_SVC; 9246 addr = 0x08; 9247 mask = CPSR_I; 9248 /* The PC already points to the next instruction. */ 9249 offset = 0; 9250 break; 9251 case EXCP_BKPT: 9252 /* Fall through to prefetch abort. */ 9253 case EXCP_PREFETCH_ABORT: 9254 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 9255 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 9256 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 9257 env->exception.fsr, (uint32_t)env->exception.vaddress); 9258 new_mode = ARM_CPU_MODE_ABT; 9259 addr = 0x0c; 9260 mask = CPSR_A | CPSR_I; 9261 offset = 4; 9262 break; 9263 case EXCP_DATA_ABORT: 9264 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 9265 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 9266 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 9267 env->exception.fsr, 9268 (uint32_t)env->exception.vaddress); 9269 new_mode = ARM_CPU_MODE_ABT; 9270 addr = 0x10; 9271 mask = CPSR_A | CPSR_I; 9272 offset = 8; 9273 break; 9274 case EXCP_IRQ: 9275 new_mode = ARM_CPU_MODE_IRQ; 9276 addr = 0x18; 9277 /* Disable IRQ and imprecise data aborts. */ 9278 mask = CPSR_A | CPSR_I; 9279 offset = 4; 9280 if (env->cp15.scr_el3 & SCR_IRQ) { 9281 /* IRQ routed to monitor mode */ 9282 new_mode = ARM_CPU_MODE_MON; 9283 mask |= CPSR_F; 9284 } 9285 break; 9286 case EXCP_FIQ: 9287 new_mode = ARM_CPU_MODE_FIQ; 9288 addr = 0x1c; 9289 /* Disable FIQ, IRQ and imprecise data aborts. */ 9290 mask = CPSR_A | CPSR_I | CPSR_F; 9291 if (env->cp15.scr_el3 & SCR_FIQ) { 9292 /* FIQ routed to monitor mode */ 9293 new_mode = ARM_CPU_MODE_MON; 9294 } 9295 offset = 4; 9296 break; 9297 case EXCP_VIRQ: 9298 new_mode = ARM_CPU_MODE_IRQ; 9299 addr = 0x18; 9300 /* Disable IRQ and imprecise data aborts. */ 9301 mask = CPSR_A | CPSR_I; 9302 offset = 4; 9303 break; 9304 case EXCP_VFIQ: 9305 new_mode = ARM_CPU_MODE_FIQ; 9306 addr = 0x1c; 9307 /* Disable FIQ, IRQ and imprecise data aborts. */ 9308 mask = CPSR_A | CPSR_I | CPSR_F; 9309 offset = 4; 9310 break; 9311 case EXCP_SMC: 9312 new_mode = ARM_CPU_MODE_MON; 9313 addr = 0x08; 9314 mask = CPSR_A | CPSR_I | CPSR_F; 9315 offset = 0; 9316 break; 9317 default: 9318 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9319 return; /* Never happens. Keep compiler happy. */ 9320 } 9321 9322 if (new_mode == ARM_CPU_MODE_MON) { 9323 addr += env->cp15.mvbar; 9324 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 9325 /* High vectors. When enabled, base address cannot be remapped. */ 9326 addr += 0xffff0000; 9327 } else { 9328 /* ARM v7 architectures provide a vector base address register to remap 9329 * the interrupt vector table. 9330 * This register is only followed in non-monitor mode, and is banked. 9331 * Note: only bits 31:5 are valid. 9332 */ 9333 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 9334 } 9335 9336 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 9337 env->cp15.scr_el3 &= ~SCR_NS; 9338 } 9339 9340 take_aarch32_exception(env, new_mode, mask, offset, addr); 9341 } 9342 9343 /* Handle exception entry to a target EL which is using AArch64 */ 9344 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 9345 { 9346 ARMCPU *cpu = ARM_CPU(cs); 9347 CPUARMState *env = &cpu->env; 9348 unsigned int new_el = env->exception.target_el; 9349 target_ulong addr = env->cp15.vbar_el[new_el]; 9350 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 9351 unsigned int old_mode; 9352 unsigned int cur_el = arm_current_el(env); 9353 9354 /* 9355 * Note that new_el can never be 0. If cur_el is 0, then 9356 * el0_a64 is is_a64(), else el0_a64 is ignored. 9357 */ 9358 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 9359 9360 if (cur_el < new_el) { 9361 /* Entry vector offset depends on whether the implemented EL 9362 * immediately lower than the target level is using AArch32 or AArch64 9363 */ 9364 bool is_aa64; 9365 uint64_t hcr; 9366 9367 switch (new_el) { 9368 case 3: 9369 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 9370 break; 9371 case 2: 9372 hcr = arm_hcr_el2_eff(env); 9373 if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 9374 is_aa64 = (hcr & HCR_RW) != 0; 9375 break; 9376 } 9377 /* fall through */ 9378 case 1: 9379 is_aa64 = is_a64(env); 9380 break; 9381 default: 9382 g_assert_not_reached(); 9383 } 9384 9385 if (is_aa64) { 9386 addr += 0x400; 9387 } else { 9388 addr += 0x600; 9389 } 9390 } else if (pstate_read(env) & PSTATE_SP) { 9391 addr += 0x200; 9392 } 9393 9394 switch (cs->exception_index) { 9395 case EXCP_PREFETCH_ABORT: 9396 case EXCP_DATA_ABORT: 9397 env->cp15.far_el[new_el] = env->exception.vaddress; 9398 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 9399 env->cp15.far_el[new_el]); 9400 /* fall through */ 9401 case EXCP_BKPT: 9402 case EXCP_UDEF: 9403 case EXCP_SWI: 9404 case EXCP_HVC: 9405 case EXCP_HYP_TRAP: 9406 case EXCP_SMC: 9407 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { 9408 /* 9409 * QEMU internal FP/SIMD syndromes from AArch32 include the 9410 * TA and coproc fields which are only exposed if the exception 9411 * is taken to AArch32 Hyp mode. Mask them out to get a valid 9412 * AArch64 format syndrome. 9413 */ 9414 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 9415 } 9416 env->cp15.esr_el[new_el] = env->exception.syndrome; 9417 break; 9418 case EXCP_IRQ: 9419 case EXCP_VIRQ: 9420 addr += 0x80; 9421 break; 9422 case EXCP_FIQ: 9423 case EXCP_VFIQ: 9424 addr += 0x100; 9425 break; 9426 default: 9427 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9428 } 9429 9430 if (is_a64(env)) { 9431 old_mode = pstate_read(env); 9432 aarch64_save_sp(env, arm_current_el(env)); 9433 env->elr_el[new_el] = env->pc; 9434 } else { 9435 old_mode = cpsr_read(env); 9436 env->elr_el[new_el] = env->regs[15]; 9437 9438 aarch64_sync_32_to_64(env); 9439 9440 env->condexec_bits = 0; 9441 } 9442 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; 9443 9444 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 9445 env->elr_el[new_el]); 9446 9447 if (cpu_isar_feature(aa64_pan, cpu)) { 9448 /* The value of PSTATE.PAN is normally preserved, except when ... */ 9449 new_mode |= old_mode & PSTATE_PAN; 9450 switch (new_el) { 9451 case 2: 9452 /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ 9453 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) 9454 != (HCR_E2H | HCR_TGE)) { 9455 break; 9456 } 9457 /* fall through */ 9458 case 1: 9459 /* ... the target is EL1 ... */ 9460 /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ 9461 if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { 9462 new_mode |= PSTATE_PAN; 9463 } 9464 break; 9465 } 9466 } 9467 9468 pstate_write(env, PSTATE_DAIF | new_mode); 9469 env->aarch64 = 1; 9470 aarch64_restore_sp(env, new_el); 9471 helper_rebuild_hflags_a64(env, new_el); 9472 9473 env->pc = addr; 9474 9475 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 9476 new_el, env->pc, pstate_read(env)); 9477 } 9478 9479 /* 9480 * Do semihosting call and set the appropriate return value. All the 9481 * permission and validity checks have been done at translate time. 9482 * 9483 * We only see semihosting exceptions in TCG only as they are not 9484 * trapped to the hypervisor in KVM. 9485 */ 9486 #ifdef CONFIG_TCG 9487 static void handle_semihosting(CPUState *cs) 9488 { 9489 ARMCPU *cpu = ARM_CPU(cs); 9490 CPUARMState *env = &cpu->env; 9491 9492 if (is_a64(env)) { 9493 qemu_log_mask(CPU_LOG_INT, 9494 "...handling as semihosting call 0x%" PRIx64 "\n", 9495 env->xregs[0]); 9496 env->xregs[0] = do_arm_semihosting(env); 9497 env->pc += 4; 9498 } else { 9499 qemu_log_mask(CPU_LOG_INT, 9500 "...handling as semihosting call 0x%x\n", 9501 env->regs[0]); 9502 env->regs[0] = do_arm_semihosting(env); 9503 env->regs[15] += env->thumb ? 2 : 4; 9504 } 9505 } 9506 #endif 9507 9508 /* Handle a CPU exception for A and R profile CPUs. 9509 * Do any appropriate logging, handle PSCI calls, and then hand off 9510 * to the AArch64-entry or AArch32-entry function depending on the 9511 * target exception level's register width. 9512 */ 9513 void arm_cpu_do_interrupt(CPUState *cs) 9514 { 9515 ARMCPU *cpu = ARM_CPU(cs); 9516 CPUARMState *env = &cpu->env; 9517 unsigned int new_el = env->exception.target_el; 9518 9519 assert(!arm_feature(env, ARM_FEATURE_M)); 9520 9521 arm_log_exception(cs->exception_index); 9522 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 9523 new_el); 9524 if (qemu_loglevel_mask(CPU_LOG_INT) 9525 && !excp_is_internal(cs->exception_index)) { 9526 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 9527 syn_get_ec(env->exception.syndrome), 9528 env->exception.syndrome); 9529 } 9530 9531 if (arm_is_psci_call(cpu, cs->exception_index)) { 9532 arm_handle_psci_call(cpu); 9533 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 9534 return; 9535 } 9536 9537 /* 9538 * Semihosting semantics depend on the register width of the code 9539 * that caused the exception, not the target exception level, so 9540 * must be handled here. 9541 */ 9542 #ifdef CONFIG_TCG 9543 if (cs->exception_index == EXCP_SEMIHOST) { 9544 handle_semihosting(cs); 9545 return; 9546 } 9547 #endif 9548 9549 /* Hooks may change global state so BQL should be held, also the 9550 * BQL needs to be held for any modification of 9551 * cs->interrupt_request. 9552 */ 9553 g_assert(qemu_mutex_iothread_locked()); 9554 9555 arm_call_pre_el_change_hook(cpu); 9556 9557 assert(!excp_is_internal(cs->exception_index)); 9558 if (arm_el_is_aa64(env, new_el)) { 9559 arm_cpu_do_interrupt_aarch64(cs); 9560 } else { 9561 arm_cpu_do_interrupt_aarch32(cs); 9562 } 9563 9564 arm_call_el_change_hook(cpu); 9565 9566 if (!kvm_enabled()) { 9567 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 9568 } 9569 } 9570 #endif /* !CONFIG_USER_ONLY */ 9571 9572 /* Return the exception level which controls this address translation regime */ 9573 static uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 9574 { 9575 switch (mmu_idx) { 9576 case ARMMMUIdx_E20_0: 9577 case ARMMMUIdx_E20_2: 9578 case ARMMMUIdx_E20_2_PAN: 9579 case ARMMMUIdx_Stage2: 9580 case ARMMMUIdx_E2: 9581 return 2; 9582 case ARMMMUIdx_SE3: 9583 return 3; 9584 case ARMMMUIdx_SE10_0: 9585 return arm_el_is_aa64(env, 3) ? 1 : 3; 9586 case ARMMMUIdx_SE10_1: 9587 case ARMMMUIdx_SE10_1_PAN: 9588 case ARMMMUIdx_Stage1_E0: 9589 case ARMMMUIdx_Stage1_E1: 9590 case ARMMMUIdx_Stage1_E1_PAN: 9591 case ARMMMUIdx_E10_0: 9592 case ARMMMUIdx_E10_1: 9593 case ARMMMUIdx_E10_1_PAN: 9594 case ARMMMUIdx_MPrivNegPri: 9595 case ARMMMUIdx_MUserNegPri: 9596 case ARMMMUIdx_MPriv: 9597 case ARMMMUIdx_MUser: 9598 case ARMMMUIdx_MSPrivNegPri: 9599 case ARMMMUIdx_MSUserNegPri: 9600 case ARMMMUIdx_MSPriv: 9601 case ARMMMUIdx_MSUser: 9602 return 1; 9603 default: 9604 g_assert_not_reached(); 9605 } 9606 } 9607 9608 uint64_t arm_sctlr(CPUARMState *env, int el) 9609 { 9610 /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ 9611 if (el == 0) { 9612 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); 9613 el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1); 9614 } 9615 return env->cp15.sctlr_el[el]; 9616 } 9617 9618 /* Return the SCTLR value which controls this address translation regime */ 9619 static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 9620 { 9621 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 9622 } 9623 9624 #ifndef CONFIG_USER_ONLY 9625 9626 /* Return true if the specified stage of address translation is disabled */ 9627 static inline bool regime_translation_disabled(CPUARMState *env, 9628 ARMMMUIdx mmu_idx) 9629 { 9630 if (arm_feature(env, ARM_FEATURE_M)) { 9631 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 9632 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 9633 case R_V7M_MPU_CTRL_ENABLE_MASK: 9634 /* Enabled, but not for HardFault and NMI */ 9635 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 9636 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 9637 /* Enabled for all cases */ 9638 return false; 9639 case 0: 9640 default: 9641 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 9642 * we warned about that in armv7m_nvic.c when the guest set it. 9643 */ 9644 return true; 9645 } 9646 } 9647 9648 if (mmu_idx == ARMMMUIdx_Stage2) { 9649 /* HCR.DC means HCR.VM behaves as 1 */ 9650 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; 9651 } 9652 9653 if (env->cp15.hcr_el2 & HCR_TGE) { 9654 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 9655 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 9656 return true; 9657 } 9658 } 9659 9660 if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { 9661 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 9662 return true; 9663 } 9664 9665 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 9666 } 9667 9668 static inline bool regime_translation_big_endian(CPUARMState *env, 9669 ARMMMUIdx mmu_idx) 9670 { 9671 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 9672 } 9673 9674 /* Return the TTBR associated with this translation regime */ 9675 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 9676 int ttbrn) 9677 { 9678 if (mmu_idx == ARMMMUIdx_Stage2) { 9679 return env->cp15.vttbr_el2; 9680 } 9681 if (ttbrn == 0) { 9682 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 9683 } else { 9684 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 9685 } 9686 } 9687 9688 #endif /* !CONFIG_USER_ONLY */ 9689 9690 /* Return the TCR controlling this translation regime */ 9691 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 9692 { 9693 if (mmu_idx == ARMMMUIdx_Stage2) { 9694 return &env->cp15.vtcr_el2; 9695 } 9696 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 9697 } 9698 9699 /* Convert a possible stage1+2 MMU index into the appropriate 9700 * stage 1 MMU index 9701 */ 9702 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 9703 { 9704 switch (mmu_idx) { 9705 case ARMMMUIdx_E10_0: 9706 return ARMMMUIdx_Stage1_E0; 9707 case ARMMMUIdx_E10_1: 9708 return ARMMMUIdx_Stage1_E1; 9709 case ARMMMUIdx_E10_1_PAN: 9710 return ARMMMUIdx_Stage1_E1_PAN; 9711 default: 9712 return mmu_idx; 9713 } 9714 } 9715 9716 /* Return true if the translation regime is using LPAE format page tables */ 9717 static inline bool regime_using_lpae_format(CPUARMState *env, 9718 ARMMMUIdx mmu_idx) 9719 { 9720 int el = regime_el(env, mmu_idx); 9721 if (el == 2 || arm_el_is_aa64(env, el)) { 9722 return true; 9723 } 9724 if (arm_feature(env, ARM_FEATURE_LPAE) 9725 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 9726 return true; 9727 } 9728 return false; 9729 } 9730 9731 /* Returns true if the stage 1 translation regime is using LPAE format page 9732 * tables. Used when raising alignment exceptions, whose FSR changes depending 9733 * on whether the long or short descriptor format is in use. */ 9734 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 9735 { 9736 mmu_idx = stage_1_mmu_idx(mmu_idx); 9737 9738 return regime_using_lpae_format(env, mmu_idx); 9739 } 9740 9741 #ifndef CONFIG_USER_ONLY 9742 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 9743 { 9744 switch (mmu_idx) { 9745 case ARMMMUIdx_SE10_0: 9746 case ARMMMUIdx_E20_0: 9747 case ARMMMUIdx_Stage1_E0: 9748 case ARMMMUIdx_MUser: 9749 case ARMMMUIdx_MSUser: 9750 case ARMMMUIdx_MUserNegPri: 9751 case ARMMMUIdx_MSUserNegPri: 9752 return true; 9753 default: 9754 return false; 9755 case ARMMMUIdx_E10_0: 9756 case ARMMMUIdx_E10_1: 9757 case ARMMMUIdx_E10_1_PAN: 9758 g_assert_not_reached(); 9759 } 9760 } 9761 9762 /* Translate section/page access permissions to page 9763 * R/W protection flags 9764 * 9765 * @env: CPUARMState 9766 * @mmu_idx: MMU index indicating required translation regime 9767 * @ap: The 3-bit access permissions (AP[2:0]) 9768 * @domain_prot: The 2-bit domain access permissions 9769 */ 9770 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 9771 int ap, int domain_prot) 9772 { 9773 bool is_user = regime_is_user(env, mmu_idx); 9774 9775 if (domain_prot == 3) { 9776 return PAGE_READ | PAGE_WRITE; 9777 } 9778 9779 switch (ap) { 9780 case 0: 9781 if (arm_feature(env, ARM_FEATURE_V7)) { 9782 return 0; 9783 } 9784 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 9785 case SCTLR_S: 9786 return is_user ? 0 : PAGE_READ; 9787 case SCTLR_R: 9788 return PAGE_READ; 9789 default: 9790 return 0; 9791 } 9792 case 1: 9793 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9794 case 2: 9795 if (is_user) { 9796 return PAGE_READ; 9797 } else { 9798 return PAGE_READ | PAGE_WRITE; 9799 } 9800 case 3: 9801 return PAGE_READ | PAGE_WRITE; 9802 case 4: /* Reserved. */ 9803 return 0; 9804 case 5: 9805 return is_user ? 0 : PAGE_READ; 9806 case 6: 9807 return PAGE_READ; 9808 case 7: 9809 if (!arm_feature(env, ARM_FEATURE_V6K)) { 9810 return 0; 9811 } 9812 return PAGE_READ; 9813 default: 9814 g_assert_not_reached(); 9815 } 9816 } 9817 9818 /* Translate section/page access permissions to page 9819 * R/W protection flags. 9820 * 9821 * @ap: The 2-bit simple AP (AP[2:1]) 9822 * @is_user: TRUE if accessing from PL0 9823 */ 9824 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 9825 { 9826 switch (ap) { 9827 case 0: 9828 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9829 case 1: 9830 return PAGE_READ | PAGE_WRITE; 9831 case 2: 9832 return is_user ? 0 : PAGE_READ; 9833 case 3: 9834 return PAGE_READ; 9835 default: 9836 g_assert_not_reached(); 9837 } 9838 } 9839 9840 static inline int 9841 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 9842 { 9843 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 9844 } 9845 9846 /* Translate S2 section/page access permissions to protection flags 9847 * 9848 * @env: CPUARMState 9849 * @s2ap: The 2-bit stage2 access permissions (S2AP) 9850 * @xn: XN (execute-never) bit 9851 */ 9852 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 9853 { 9854 int prot = 0; 9855 9856 if (s2ap & 1) { 9857 prot |= PAGE_READ; 9858 } 9859 if (s2ap & 2) { 9860 prot |= PAGE_WRITE; 9861 } 9862 if (!xn) { 9863 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 9864 prot |= PAGE_EXEC; 9865 } 9866 } 9867 return prot; 9868 } 9869 9870 /* Translate section/page access permissions to protection flags 9871 * 9872 * @env: CPUARMState 9873 * @mmu_idx: MMU index indicating required translation regime 9874 * @is_aa64: TRUE if AArch64 9875 * @ap: The 2-bit simple AP (AP[2:1]) 9876 * @ns: NS (non-secure) bit 9877 * @xn: XN (execute-never) bit 9878 * @pxn: PXN (privileged execute-never) bit 9879 */ 9880 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 9881 int ap, int ns, int xn, int pxn) 9882 { 9883 bool is_user = regime_is_user(env, mmu_idx); 9884 int prot_rw, user_rw; 9885 bool have_wxn; 9886 int wxn = 0; 9887 9888 assert(mmu_idx != ARMMMUIdx_Stage2); 9889 9890 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 9891 if (is_user) { 9892 prot_rw = user_rw; 9893 } else { 9894 if (user_rw && regime_is_pan(env, mmu_idx)) { 9895 return 0; 9896 } 9897 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 9898 } 9899 9900 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 9901 return prot_rw; 9902 } 9903 9904 /* TODO have_wxn should be replaced with 9905 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 9906 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 9907 * compatible processors have EL2, which is required for [U]WXN. 9908 */ 9909 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 9910 9911 if (have_wxn) { 9912 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 9913 } 9914 9915 if (is_aa64) { 9916 if (regime_has_2_ranges(mmu_idx) && !is_user) { 9917 xn = pxn || (user_rw & PAGE_WRITE); 9918 } 9919 } else if (arm_feature(env, ARM_FEATURE_V7)) { 9920 switch (regime_el(env, mmu_idx)) { 9921 case 1: 9922 case 3: 9923 if (is_user) { 9924 xn = xn || !(user_rw & PAGE_READ); 9925 } else { 9926 int uwxn = 0; 9927 if (have_wxn) { 9928 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 9929 } 9930 xn = xn || !(prot_rw & PAGE_READ) || pxn || 9931 (uwxn && (user_rw & PAGE_WRITE)); 9932 } 9933 break; 9934 case 2: 9935 break; 9936 } 9937 } else { 9938 xn = wxn = 0; 9939 } 9940 9941 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 9942 return prot_rw; 9943 } 9944 return prot_rw | PAGE_EXEC; 9945 } 9946 9947 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 9948 uint32_t *table, uint32_t address) 9949 { 9950 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 9951 TCR *tcr = regime_tcr(env, mmu_idx); 9952 9953 if (address & tcr->mask) { 9954 if (tcr->raw_tcr & TTBCR_PD1) { 9955 /* Translation table walk disabled for TTBR1 */ 9956 return false; 9957 } 9958 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 9959 } else { 9960 if (tcr->raw_tcr & TTBCR_PD0) { 9961 /* Translation table walk disabled for TTBR0 */ 9962 return false; 9963 } 9964 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 9965 } 9966 *table |= (address >> 18) & 0x3ffc; 9967 return true; 9968 } 9969 9970 /* Translate a S1 pagetable walk through S2 if needed. */ 9971 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 9972 hwaddr addr, MemTxAttrs txattrs, 9973 ARMMMUFaultInfo *fi) 9974 { 9975 if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && 9976 !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 9977 target_ulong s2size; 9978 hwaddr s2pa; 9979 int s2prot; 9980 int ret; 9981 ARMCacheAttrs cacheattrs = {}; 9982 ARMCacheAttrs *pcacheattrs = NULL; 9983 9984 if (env->cp15.hcr_el2 & HCR_PTW) { 9985 /* 9986 * PTW means we must fault if this S1 walk touches S2 Device 9987 * memory; otherwise we don't care about the attributes and can 9988 * save the S2 translation the effort of computing them. 9989 */ 9990 pcacheattrs = &cacheattrs; 9991 } 9992 9993 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_Stage2, &s2pa, 9994 &txattrs, &s2prot, &s2size, fi, pcacheattrs); 9995 if (ret) { 9996 assert(fi->type != ARMFault_None); 9997 fi->s2addr = addr; 9998 fi->stage2 = true; 9999 fi->s1ptw = true; 10000 return ~0; 10001 } 10002 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { 10003 /* Access was to Device memory: generate Permission fault */ 10004 fi->type = ARMFault_Permission; 10005 fi->s2addr = addr; 10006 fi->stage2 = true; 10007 fi->s1ptw = true; 10008 return ~0; 10009 } 10010 addr = s2pa; 10011 } 10012 return addr; 10013 } 10014 10015 /* All loads done in the course of a page table walk go through here. */ 10016 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10017 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10018 { 10019 ARMCPU *cpu = ARM_CPU(cs); 10020 CPUARMState *env = &cpu->env; 10021 MemTxAttrs attrs = {}; 10022 MemTxResult result = MEMTX_OK; 10023 AddressSpace *as; 10024 uint32_t data; 10025 10026 attrs.secure = is_secure; 10027 as = arm_addressspace(cs, attrs); 10028 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 10029 if (fi->s1ptw) { 10030 return 0; 10031 } 10032 if (regime_translation_big_endian(env, mmu_idx)) { 10033 data = address_space_ldl_be(as, addr, attrs, &result); 10034 } else { 10035 data = address_space_ldl_le(as, addr, attrs, &result); 10036 } 10037 if (result == MEMTX_OK) { 10038 return data; 10039 } 10040 fi->type = ARMFault_SyncExternalOnWalk; 10041 fi->ea = arm_extabort_type(result); 10042 return 0; 10043 } 10044 10045 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10046 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10047 { 10048 ARMCPU *cpu = ARM_CPU(cs); 10049 CPUARMState *env = &cpu->env; 10050 MemTxAttrs attrs = {}; 10051 MemTxResult result = MEMTX_OK; 10052 AddressSpace *as; 10053 uint64_t data; 10054 10055 attrs.secure = is_secure; 10056 as = arm_addressspace(cs, attrs); 10057 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 10058 if (fi->s1ptw) { 10059 return 0; 10060 } 10061 if (regime_translation_big_endian(env, mmu_idx)) { 10062 data = address_space_ldq_be(as, addr, attrs, &result); 10063 } else { 10064 data = address_space_ldq_le(as, addr, attrs, &result); 10065 } 10066 if (result == MEMTX_OK) { 10067 return data; 10068 } 10069 fi->type = ARMFault_SyncExternalOnWalk; 10070 fi->ea = arm_extabort_type(result); 10071 return 0; 10072 } 10073 10074 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 10075 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10076 hwaddr *phys_ptr, int *prot, 10077 target_ulong *page_size, 10078 ARMMMUFaultInfo *fi) 10079 { 10080 CPUState *cs = env_cpu(env); 10081 int level = 1; 10082 uint32_t table; 10083 uint32_t desc; 10084 int type; 10085 int ap; 10086 int domain = 0; 10087 int domain_prot; 10088 hwaddr phys_addr; 10089 uint32_t dacr; 10090 10091 /* Pagetable walk. */ 10092 /* Lookup l1 descriptor. */ 10093 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10094 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10095 fi->type = ARMFault_Translation; 10096 goto do_fault; 10097 } 10098 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10099 mmu_idx, fi); 10100 if (fi->type != ARMFault_None) { 10101 goto do_fault; 10102 } 10103 type = (desc & 3); 10104 domain = (desc >> 5) & 0x0f; 10105 if (regime_el(env, mmu_idx) == 1) { 10106 dacr = env->cp15.dacr_ns; 10107 } else { 10108 dacr = env->cp15.dacr_s; 10109 } 10110 domain_prot = (dacr >> (domain * 2)) & 3; 10111 if (type == 0) { 10112 /* Section translation fault. */ 10113 fi->type = ARMFault_Translation; 10114 goto do_fault; 10115 } 10116 if (type != 2) { 10117 level = 2; 10118 } 10119 if (domain_prot == 0 || domain_prot == 2) { 10120 fi->type = ARMFault_Domain; 10121 goto do_fault; 10122 } 10123 if (type == 2) { 10124 /* 1Mb section. */ 10125 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10126 ap = (desc >> 10) & 3; 10127 *page_size = 1024 * 1024; 10128 } else { 10129 /* Lookup l2 entry. */ 10130 if (type == 1) { 10131 /* Coarse pagetable. */ 10132 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10133 } else { 10134 /* Fine pagetable. */ 10135 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 10136 } 10137 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10138 mmu_idx, fi); 10139 if (fi->type != ARMFault_None) { 10140 goto do_fault; 10141 } 10142 switch (desc & 3) { 10143 case 0: /* Page translation fault. */ 10144 fi->type = ARMFault_Translation; 10145 goto do_fault; 10146 case 1: /* 64k page. */ 10147 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10148 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 10149 *page_size = 0x10000; 10150 break; 10151 case 2: /* 4k page. */ 10152 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10153 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 10154 *page_size = 0x1000; 10155 break; 10156 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 10157 if (type == 1) { 10158 /* ARMv6/XScale extended small page format */ 10159 if (arm_feature(env, ARM_FEATURE_XSCALE) 10160 || arm_feature(env, ARM_FEATURE_V6)) { 10161 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10162 *page_size = 0x1000; 10163 } else { 10164 /* UNPREDICTABLE in ARMv5; we choose to take a 10165 * page translation fault. 10166 */ 10167 fi->type = ARMFault_Translation; 10168 goto do_fault; 10169 } 10170 } else { 10171 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 10172 *page_size = 0x400; 10173 } 10174 ap = (desc >> 4) & 3; 10175 break; 10176 default: 10177 /* Never happens, but compiler isn't smart enough to tell. */ 10178 abort(); 10179 } 10180 } 10181 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10182 *prot |= *prot ? PAGE_EXEC : 0; 10183 if (!(*prot & (1 << access_type))) { 10184 /* Access permission fault. */ 10185 fi->type = ARMFault_Permission; 10186 goto do_fault; 10187 } 10188 *phys_ptr = phys_addr; 10189 return false; 10190 do_fault: 10191 fi->domain = domain; 10192 fi->level = level; 10193 return true; 10194 } 10195 10196 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 10197 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10198 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10199 target_ulong *page_size, ARMMMUFaultInfo *fi) 10200 { 10201 CPUState *cs = env_cpu(env); 10202 int level = 1; 10203 uint32_t table; 10204 uint32_t desc; 10205 uint32_t xn; 10206 uint32_t pxn = 0; 10207 int type; 10208 int ap; 10209 int domain = 0; 10210 int domain_prot; 10211 hwaddr phys_addr; 10212 uint32_t dacr; 10213 bool ns; 10214 10215 /* Pagetable walk. */ 10216 /* Lookup l1 descriptor. */ 10217 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10218 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10219 fi->type = ARMFault_Translation; 10220 goto do_fault; 10221 } 10222 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10223 mmu_idx, fi); 10224 if (fi->type != ARMFault_None) { 10225 goto do_fault; 10226 } 10227 type = (desc & 3); 10228 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 10229 /* Section translation fault, or attempt to use the encoding 10230 * which is Reserved on implementations without PXN. 10231 */ 10232 fi->type = ARMFault_Translation; 10233 goto do_fault; 10234 } 10235 if ((type == 1) || !(desc & (1 << 18))) { 10236 /* Page or Section. */ 10237 domain = (desc >> 5) & 0x0f; 10238 } 10239 if (regime_el(env, mmu_idx) == 1) { 10240 dacr = env->cp15.dacr_ns; 10241 } else { 10242 dacr = env->cp15.dacr_s; 10243 } 10244 if (type == 1) { 10245 level = 2; 10246 } 10247 domain_prot = (dacr >> (domain * 2)) & 3; 10248 if (domain_prot == 0 || domain_prot == 2) { 10249 /* Section or Page domain fault */ 10250 fi->type = ARMFault_Domain; 10251 goto do_fault; 10252 } 10253 if (type != 1) { 10254 if (desc & (1 << 18)) { 10255 /* Supersection. */ 10256 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 10257 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 10258 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 10259 *page_size = 0x1000000; 10260 } else { 10261 /* Section. */ 10262 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10263 *page_size = 0x100000; 10264 } 10265 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 10266 xn = desc & (1 << 4); 10267 pxn = desc & 1; 10268 ns = extract32(desc, 19, 1); 10269 } else { 10270 if (arm_feature(env, ARM_FEATURE_PXN)) { 10271 pxn = (desc >> 2) & 1; 10272 } 10273 ns = extract32(desc, 3, 1); 10274 /* Lookup l2 entry. */ 10275 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10276 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10277 mmu_idx, fi); 10278 if (fi->type != ARMFault_None) { 10279 goto do_fault; 10280 } 10281 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 10282 switch (desc & 3) { 10283 case 0: /* Page translation fault. */ 10284 fi->type = ARMFault_Translation; 10285 goto do_fault; 10286 case 1: /* 64k page. */ 10287 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10288 xn = desc & (1 << 15); 10289 *page_size = 0x10000; 10290 break; 10291 case 2: case 3: /* 4k page. */ 10292 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10293 xn = desc & 1; 10294 *page_size = 0x1000; 10295 break; 10296 default: 10297 /* Never happens, but compiler isn't smart enough to tell. */ 10298 abort(); 10299 } 10300 } 10301 if (domain_prot == 3) { 10302 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10303 } else { 10304 if (pxn && !regime_is_user(env, mmu_idx)) { 10305 xn = 1; 10306 } 10307 if (xn && access_type == MMU_INST_FETCH) { 10308 fi->type = ARMFault_Permission; 10309 goto do_fault; 10310 } 10311 10312 if (arm_feature(env, ARM_FEATURE_V6K) && 10313 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 10314 /* The simplified model uses AP[0] as an access control bit. */ 10315 if ((ap & 1) == 0) { 10316 /* Access flag fault. */ 10317 fi->type = ARMFault_AccessFlag; 10318 goto do_fault; 10319 } 10320 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 10321 } else { 10322 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10323 } 10324 if (*prot && !xn) { 10325 *prot |= PAGE_EXEC; 10326 } 10327 if (!(*prot & (1 << access_type))) { 10328 /* Access permission fault. */ 10329 fi->type = ARMFault_Permission; 10330 goto do_fault; 10331 } 10332 } 10333 if (ns) { 10334 /* The NS bit will (as required by the architecture) have no effect if 10335 * the CPU doesn't support TZ or this is a non-secure translation 10336 * regime, because the attribute will already be non-secure. 10337 */ 10338 attrs->secure = false; 10339 } 10340 *phys_ptr = phys_addr; 10341 return false; 10342 do_fault: 10343 fi->domain = domain; 10344 fi->level = level; 10345 return true; 10346 } 10347 10348 /* 10349 * check_s2_mmu_setup 10350 * @cpu: ARMCPU 10351 * @is_aa64: True if the translation regime is in AArch64 state 10352 * @startlevel: Suggested starting level 10353 * @inputsize: Bitsize of IPAs 10354 * @stride: Page-table stride (See the ARM ARM) 10355 * 10356 * Returns true if the suggested S2 translation parameters are OK and 10357 * false otherwise. 10358 */ 10359 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 10360 int inputsize, int stride) 10361 { 10362 const int grainsize = stride + 3; 10363 int startsizecheck; 10364 10365 /* Negative levels are never allowed. */ 10366 if (level < 0) { 10367 return false; 10368 } 10369 10370 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 10371 if (startsizecheck < 1 || startsizecheck > stride + 4) { 10372 return false; 10373 } 10374 10375 if (is_aa64) { 10376 CPUARMState *env = &cpu->env; 10377 unsigned int pamax = arm_pamax(cpu); 10378 10379 switch (stride) { 10380 case 13: /* 64KB Pages. */ 10381 if (level == 0 || (level == 1 && pamax <= 42)) { 10382 return false; 10383 } 10384 break; 10385 case 11: /* 16KB Pages. */ 10386 if (level == 0 || (level == 1 && pamax <= 40)) { 10387 return false; 10388 } 10389 break; 10390 case 9: /* 4KB Pages. */ 10391 if (level == 0 && pamax <= 42) { 10392 return false; 10393 } 10394 break; 10395 default: 10396 g_assert_not_reached(); 10397 } 10398 10399 /* Inputsize checks. */ 10400 if (inputsize > pamax && 10401 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 10402 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 10403 return false; 10404 } 10405 } else { 10406 /* AArch32 only supports 4KB pages. Assert on that. */ 10407 assert(stride == 9); 10408 10409 if (level == 0) { 10410 return false; 10411 } 10412 } 10413 return true; 10414 } 10415 10416 /* Translate from the 4-bit stage 2 representation of 10417 * memory attributes (without cache-allocation hints) to 10418 * the 8-bit representation of the stage 1 MAIR registers 10419 * (which includes allocation hints). 10420 * 10421 * ref: shared/translation/attrs/S2AttrDecode() 10422 * .../S2ConvertAttrsHints() 10423 */ 10424 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 10425 { 10426 uint8_t hiattr = extract32(s2attrs, 2, 2); 10427 uint8_t loattr = extract32(s2attrs, 0, 2); 10428 uint8_t hihint = 0, lohint = 0; 10429 10430 if (hiattr != 0) { /* normal memory */ 10431 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 10432 hiattr = loattr = 1; /* non-cacheable */ 10433 } else { 10434 if (hiattr != 1) { /* Write-through or write-back */ 10435 hihint = 3; /* RW allocate */ 10436 } 10437 if (loattr != 1) { /* Write-through or write-back */ 10438 lohint = 3; /* RW allocate */ 10439 } 10440 } 10441 } 10442 10443 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 10444 } 10445 #endif /* !CONFIG_USER_ONLY */ 10446 10447 static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) 10448 { 10449 if (regime_has_2_ranges(mmu_idx)) { 10450 return extract64(tcr, 37, 2); 10451 } else if (mmu_idx == ARMMMUIdx_Stage2) { 10452 return 0; /* VTCR_EL2 */ 10453 } else { 10454 /* Replicate the single TBI bit so we always have 2 bits. */ 10455 return extract32(tcr, 20, 1) * 3; 10456 } 10457 } 10458 10459 static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) 10460 { 10461 if (regime_has_2_ranges(mmu_idx)) { 10462 return extract64(tcr, 51, 2); 10463 } else if (mmu_idx == ARMMMUIdx_Stage2) { 10464 return 0; /* VTCR_EL2 */ 10465 } else { 10466 /* Replicate the single TBID bit so we always have 2 bits. */ 10467 return extract32(tcr, 29, 1) * 3; 10468 } 10469 } 10470 10471 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 10472 ARMMMUIdx mmu_idx, bool data) 10473 { 10474 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10475 bool epd, hpd, using16k, using64k; 10476 int select, tsz, tbi; 10477 10478 if (!regime_has_2_ranges(mmu_idx)) { 10479 select = 0; 10480 tsz = extract32(tcr, 0, 6); 10481 using64k = extract32(tcr, 14, 1); 10482 using16k = extract32(tcr, 15, 1); 10483 if (mmu_idx == ARMMMUIdx_Stage2) { 10484 /* VTCR_EL2 */ 10485 hpd = false; 10486 } else { 10487 hpd = extract32(tcr, 24, 1); 10488 } 10489 epd = false; 10490 } else { 10491 /* 10492 * Bit 55 is always between the two regions, and is canonical for 10493 * determining if address tagging is enabled. 10494 */ 10495 select = extract64(va, 55, 1); 10496 if (!select) { 10497 tsz = extract32(tcr, 0, 6); 10498 epd = extract32(tcr, 7, 1); 10499 using64k = extract32(tcr, 14, 1); 10500 using16k = extract32(tcr, 15, 1); 10501 hpd = extract64(tcr, 41, 1); 10502 } else { 10503 int tg = extract32(tcr, 30, 2); 10504 using16k = tg == 1; 10505 using64k = tg == 3; 10506 tsz = extract32(tcr, 16, 6); 10507 epd = extract32(tcr, 23, 1); 10508 hpd = extract64(tcr, 42, 1); 10509 } 10510 } 10511 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ 10512 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ 10513 10514 /* Present TBI as a composite with TBID. */ 10515 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 10516 if (!data) { 10517 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 10518 } 10519 tbi = (tbi >> select) & 1; 10520 10521 return (ARMVAParameters) { 10522 .tsz = tsz, 10523 .select = select, 10524 .tbi = tbi, 10525 .epd = epd, 10526 .hpd = hpd, 10527 .using16k = using16k, 10528 .using64k = using64k, 10529 }; 10530 } 10531 10532 #ifndef CONFIG_USER_ONLY 10533 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 10534 ARMMMUIdx mmu_idx) 10535 { 10536 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10537 uint32_t el = regime_el(env, mmu_idx); 10538 int select, tsz; 10539 bool epd, hpd; 10540 10541 if (mmu_idx == ARMMMUIdx_Stage2) { 10542 /* VTCR */ 10543 bool sext = extract32(tcr, 4, 1); 10544 bool sign = extract32(tcr, 3, 1); 10545 10546 /* 10547 * If the sign-extend bit is not the same as t0sz[3], the result 10548 * is unpredictable. Flag this as a guest error. 10549 */ 10550 if (sign != sext) { 10551 qemu_log_mask(LOG_GUEST_ERROR, 10552 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 10553 } 10554 tsz = sextract32(tcr, 0, 4) + 8; 10555 select = 0; 10556 hpd = false; 10557 epd = false; 10558 } else if (el == 2) { 10559 /* HTCR */ 10560 tsz = extract32(tcr, 0, 3); 10561 select = 0; 10562 hpd = extract64(tcr, 24, 1); 10563 epd = false; 10564 } else { 10565 int t0sz = extract32(tcr, 0, 3); 10566 int t1sz = extract32(tcr, 16, 3); 10567 10568 if (t1sz == 0) { 10569 select = va > (0xffffffffu >> t0sz); 10570 } else { 10571 /* Note that we will detect errors later. */ 10572 select = va >= ~(0xffffffffu >> t1sz); 10573 } 10574 if (!select) { 10575 tsz = t0sz; 10576 epd = extract32(tcr, 7, 1); 10577 hpd = extract64(tcr, 41, 1); 10578 } else { 10579 tsz = t1sz; 10580 epd = extract32(tcr, 23, 1); 10581 hpd = extract64(tcr, 42, 1); 10582 } 10583 /* For aarch32, hpd0 is not enabled without t2e as well. */ 10584 hpd &= extract32(tcr, 6, 1); 10585 } 10586 10587 return (ARMVAParameters) { 10588 .tsz = tsz, 10589 .select = select, 10590 .epd = epd, 10591 .hpd = hpd, 10592 }; 10593 } 10594 10595 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 10596 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10597 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 10598 target_ulong *page_size_ptr, 10599 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10600 { 10601 ARMCPU *cpu = env_archcpu(env); 10602 CPUState *cs = CPU(cpu); 10603 /* Read an LPAE long-descriptor translation table. */ 10604 ARMFaultType fault_type = ARMFault_Translation; 10605 uint32_t level; 10606 ARMVAParameters param; 10607 uint64_t ttbr; 10608 hwaddr descaddr, indexmask, indexmask_grainsize; 10609 uint32_t tableattrs; 10610 target_ulong page_size; 10611 uint32_t attrs; 10612 int32_t stride; 10613 int addrsize, inputsize; 10614 TCR *tcr = regime_tcr(env, mmu_idx); 10615 int ap, ns, xn, pxn; 10616 uint32_t el = regime_el(env, mmu_idx); 10617 uint64_t descaddrmask; 10618 bool aarch64 = arm_el_is_aa64(env, el); 10619 bool guarded = false; 10620 10621 /* TODO: 10622 * This code does not handle the different format TCR for VTCR_EL2. 10623 * This code also does not support shareability levels. 10624 * Attribute and permission bit handling should also be checked when adding 10625 * support for those page table walks. 10626 */ 10627 if (aarch64) { 10628 param = aa64_va_parameters(env, address, mmu_idx, 10629 access_type != MMU_INST_FETCH); 10630 level = 0; 10631 addrsize = 64 - 8 * param.tbi; 10632 inputsize = 64 - param.tsz; 10633 } else { 10634 param = aa32_va_parameters(env, address, mmu_idx); 10635 level = 1; 10636 addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); 10637 inputsize = addrsize - param.tsz; 10638 } 10639 10640 /* 10641 * We determined the region when collecting the parameters, but we 10642 * have not yet validated that the address is valid for the region. 10643 * Extract the top bits and verify that they all match select. 10644 * 10645 * For aa32, if inputsize == addrsize, then we have selected the 10646 * region by exclusion in aa32_va_parameters and there is no more 10647 * validation to do here. 10648 */ 10649 if (inputsize < addrsize) { 10650 target_ulong top_bits = sextract64(address, inputsize, 10651 addrsize - inputsize); 10652 if (-top_bits != param.select) { 10653 /* The gap between the two regions is a Translation fault */ 10654 fault_type = ARMFault_Translation; 10655 goto do_fault; 10656 } 10657 } 10658 10659 if (param.using64k) { 10660 stride = 13; 10661 } else if (param.using16k) { 10662 stride = 11; 10663 } else { 10664 stride = 9; 10665 } 10666 10667 /* Note that QEMU ignores shareability and cacheability attributes, 10668 * so we don't need to do anything with the SH, ORGN, IRGN fields 10669 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 10670 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 10671 * implement any ASID-like capability so we can ignore it (instead 10672 * we will always flush the TLB any time the ASID is changed). 10673 */ 10674 ttbr = regime_ttbr(env, mmu_idx, param.select); 10675 10676 /* Here we should have set up all the parameters for the translation: 10677 * inputsize, ttbr, epd, stride, tbi 10678 */ 10679 10680 if (param.epd) { 10681 /* Translation table walk disabled => Translation fault on TLB miss 10682 * Note: This is always 0 on 64-bit EL2 and EL3. 10683 */ 10684 goto do_fault; 10685 } 10686 10687 if (mmu_idx != ARMMMUIdx_Stage2) { 10688 /* The starting level depends on the virtual address size (which can 10689 * be up to 48 bits) and the translation granule size. It indicates 10690 * the number of strides (stride bits at a time) needed to 10691 * consume the bits of the input address. In the pseudocode this is: 10692 * level = 4 - RoundUp((inputsize - grainsize) / stride) 10693 * where their 'inputsize' is our 'inputsize', 'grainsize' is 10694 * our 'stride + 3' and 'stride' is our 'stride'. 10695 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 10696 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 10697 * = 4 - (inputsize - 4) / stride; 10698 */ 10699 level = 4 - (inputsize - 4) / stride; 10700 } else { 10701 /* For stage 2 translations the starting level is specified by the 10702 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 10703 */ 10704 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 10705 uint32_t startlevel; 10706 bool ok; 10707 10708 if (!aarch64 || stride == 9) { 10709 /* AArch32 or 4KB pages */ 10710 startlevel = 2 - sl0; 10711 } else { 10712 /* 16KB or 64KB pages */ 10713 startlevel = 3 - sl0; 10714 } 10715 10716 /* Check that the starting level is valid. */ 10717 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 10718 inputsize, stride); 10719 if (!ok) { 10720 fault_type = ARMFault_Translation; 10721 goto do_fault; 10722 } 10723 level = startlevel; 10724 } 10725 10726 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 10727 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 10728 10729 /* Now we can extract the actual base address from the TTBR */ 10730 descaddr = extract64(ttbr, 0, 48); 10731 /* 10732 * We rely on this masking to clear the RES0 bits at the bottom of the TTBR 10733 * and also to mask out CnP (bit 0) which could validly be non-zero. 10734 */ 10735 descaddr &= ~indexmask; 10736 10737 /* The address field in the descriptor goes up to bit 39 for ARMv7 10738 * but up to bit 47 for ARMv8, but we use the descaddrmask 10739 * up to bit 39 for AArch32, because we don't need other bits in that case 10740 * to construct next descriptor address (anyway they should be all zeroes). 10741 */ 10742 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 10743 ~indexmask_grainsize; 10744 10745 /* Secure accesses start with the page table in secure memory and 10746 * can be downgraded to non-secure at any step. Non-secure accesses 10747 * remain non-secure. We implement this by just ORing in the NSTable/NS 10748 * bits at each step. 10749 */ 10750 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 10751 for (;;) { 10752 uint64_t descriptor; 10753 bool nstable; 10754 10755 descaddr |= (address >> (stride * (4 - level))) & indexmask; 10756 descaddr &= ~7ULL; 10757 nstable = extract32(tableattrs, 4, 1); 10758 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 10759 if (fi->type != ARMFault_None) { 10760 goto do_fault; 10761 } 10762 10763 if (!(descriptor & 1) || 10764 (!(descriptor & 2) && (level == 3))) { 10765 /* Invalid, or the Reserved level 3 encoding */ 10766 goto do_fault; 10767 } 10768 descaddr = descriptor & descaddrmask; 10769 10770 if ((descriptor & 2) && (level < 3)) { 10771 /* Table entry. The top five bits are attributes which may 10772 * propagate down through lower levels of the table (and 10773 * which are all arranged so that 0 means "no effect", so 10774 * we can gather them up by ORing in the bits at each level). 10775 */ 10776 tableattrs |= extract64(descriptor, 59, 5); 10777 level++; 10778 indexmask = indexmask_grainsize; 10779 continue; 10780 } 10781 /* Block entry at level 1 or 2, or page entry at level 3. 10782 * These are basically the same thing, although the number 10783 * of bits we pull in from the vaddr varies. 10784 */ 10785 page_size = (1ULL << ((stride * (4 - level)) + 3)); 10786 descaddr |= (address & (page_size - 1)); 10787 /* Extract attributes from the descriptor */ 10788 attrs = extract64(descriptor, 2, 10) 10789 | (extract64(descriptor, 52, 12) << 10); 10790 10791 if (mmu_idx == ARMMMUIdx_Stage2) { 10792 /* Stage 2 table descriptors do not include any attribute fields */ 10793 break; 10794 } 10795 /* Merge in attributes from table descriptors */ 10796 attrs |= nstable << 3; /* NS */ 10797 guarded = extract64(descriptor, 50, 1); /* GP */ 10798 if (param.hpd) { 10799 /* HPD disables all the table attributes except NSTable. */ 10800 break; 10801 } 10802 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 10803 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 10804 * means "force PL1 access only", which means forcing AP[1] to 0. 10805 */ 10806 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ 10807 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ 10808 break; 10809 } 10810 /* Here descaddr is the final physical address, and attributes 10811 * are all in attrs. 10812 */ 10813 fault_type = ARMFault_AccessFlag; 10814 if ((attrs & (1 << 8)) == 0) { 10815 /* Access flag */ 10816 goto do_fault; 10817 } 10818 10819 ap = extract32(attrs, 4, 2); 10820 xn = extract32(attrs, 12, 1); 10821 10822 if (mmu_idx == ARMMMUIdx_Stage2) { 10823 ns = true; 10824 *prot = get_S2prot(env, ap, xn); 10825 } else { 10826 ns = extract32(attrs, 3, 1); 10827 pxn = extract32(attrs, 11, 1); 10828 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 10829 } 10830 10831 fault_type = ARMFault_Permission; 10832 if (!(*prot & (1 << access_type))) { 10833 goto do_fault; 10834 } 10835 10836 if (ns) { 10837 /* The NS bit will (as required by the architecture) have no effect if 10838 * the CPU doesn't support TZ or this is a non-secure translation 10839 * regime, because the attribute will already be non-secure. 10840 */ 10841 txattrs->secure = false; 10842 } 10843 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ 10844 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { 10845 txattrs->target_tlb_bit0 = true; 10846 } 10847 10848 if (cacheattrs != NULL) { 10849 if (mmu_idx == ARMMMUIdx_Stage2) { 10850 cacheattrs->attrs = convert_stage2_attrs(env, 10851 extract32(attrs, 0, 4)); 10852 } else { 10853 /* Index into MAIR registers for cache attributes */ 10854 uint8_t attrindx = extract32(attrs, 0, 3); 10855 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 10856 assert(attrindx <= 7); 10857 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 10858 } 10859 cacheattrs->shareability = extract32(attrs, 6, 2); 10860 } 10861 10862 *phys_ptr = descaddr; 10863 *page_size_ptr = page_size; 10864 return false; 10865 10866 do_fault: 10867 fi->type = fault_type; 10868 fi->level = level; 10869 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 10870 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2); 10871 return true; 10872 } 10873 10874 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 10875 ARMMMUIdx mmu_idx, 10876 int32_t address, int *prot) 10877 { 10878 if (!arm_feature(env, ARM_FEATURE_M)) { 10879 *prot = PAGE_READ | PAGE_WRITE; 10880 switch (address) { 10881 case 0xF0000000 ... 0xFFFFFFFF: 10882 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 10883 /* hivecs execing is ok */ 10884 *prot |= PAGE_EXEC; 10885 } 10886 break; 10887 case 0x00000000 ... 0x7FFFFFFF: 10888 *prot |= PAGE_EXEC; 10889 break; 10890 } 10891 } else { 10892 /* Default system address map for M profile cores. 10893 * The architecture specifies which regions are execute-never; 10894 * at the MPU level no other checks are defined. 10895 */ 10896 switch (address) { 10897 case 0x00000000 ... 0x1fffffff: /* ROM */ 10898 case 0x20000000 ... 0x3fffffff: /* SRAM */ 10899 case 0x60000000 ... 0x7fffffff: /* RAM */ 10900 case 0x80000000 ... 0x9fffffff: /* RAM */ 10901 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10902 break; 10903 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 10904 case 0xa0000000 ... 0xbfffffff: /* Device */ 10905 case 0xc0000000 ... 0xdfffffff: /* Device */ 10906 case 0xe0000000 ... 0xffffffff: /* System */ 10907 *prot = PAGE_READ | PAGE_WRITE; 10908 break; 10909 default: 10910 g_assert_not_reached(); 10911 } 10912 } 10913 } 10914 10915 static bool pmsav7_use_background_region(ARMCPU *cpu, 10916 ARMMMUIdx mmu_idx, bool is_user) 10917 { 10918 /* Return true if we should use the default memory map as a 10919 * "background" region if there are no hits against any MPU regions. 10920 */ 10921 CPUARMState *env = &cpu->env; 10922 10923 if (is_user) { 10924 return false; 10925 } 10926 10927 if (arm_feature(env, ARM_FEATURE_M)) { 10928 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 10929 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 10930 } else { 10931 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 10932 } 10933 } 10934 10935 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 10936 { 10937 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 10938 return arm_feature(env, ARM_FEATURE_M) && 10939 extract32(address, 20, 12) == 0xe00; 10940 } 10941 10942 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 10943 { 10944 /* True if address is in the M profile system region 10945 * 0xe0000000 - 0xffffffff 10946 */ 10947 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 10948 } 10949 10950 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 10951 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10952 hwaddr *phys_ptr, int *prot, 10953 target_ulong *page_size, 10954 ARMMMUFaultInfo *fi) 10955 { 10956 ARMCPU *cpu = env_archcpu(env); 10957 int n; 10958 bool is_user = regime_is_user(env, mmu_idx); 10959 10960 *phys_ptr = address; 10961 *page_size = TARGET_PAGE_SIZE; 10962 *prot = 0; 10963 10964 if (regime_translation_disabled(env, mmu_idx) || 10965 m_is_ppb_region(env, address)) { 10966 /* MPU disabled or M profile PPB access: use default memory map. 10967 * The other case which uses the default memory map in the 10968 * v7M ARM ARM pseudocode is exception vector reads from the vector 10969 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 10970 * which always does a direct read using address_space_ldl(), rather 10971 * than going via this function, so we don't need to check that here. 10972 */ 10973 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 10974 } else { /* MPU enabled */ 10975 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 10976 /* region search */ 10977 uint32_t base = env->pmsav7.drbar[n]; 10978 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 10979 uint32_t rmask; 10980 bool srdis = false; 10981 10982 if (!(env->pmsav7.drsr[n] & 0x1)) { 10983 continue; 10984 } 10985 10986 if (!rsize) { 10987 qemu_log_mask(LOG_GUEST_ERROR, 10988 "DRSR[%d]: Rsize field cannot be 0\n", n); 10989 continue; 10990 } 10991 rsize++; 10992 rmask = (1ull << rsize) - 1; 10993 10994 if (base & rmask) { 10995 qemu_log_mask(LOG_GUEST_ERROR, 10996 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 10997 "to DRSR region size, mask = 0x%" PRIx32 "\n", 10998 n, base, rmask); 10999 continue; 11000 } 11001 11002 if (address < base || address > base + rmask) { 11003 /* 11004 * Address not in this region. We must check whether the 11005 * region covers addresses in the same page as our address. 11006 * In that case we must not report a size that covers the 11007 * whole page for a subsequent hit against a different MPU 11008 * region or the background region, because it would result in 11009 * incorrect TLB hits for subsequent accesses to addresses that 11010 * are in this MPU region. 11011 */ 11012 if (ranges_overlap(base, rmask, 11013 address & TARGET_PAGE_MASK, 11014 TARGET_PAGE_SIZE)) { 11015 *page_size = 1; 11016 } 11017 continue; 11018 } 11019 11020 /* Region matched */ 11021 11022 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 11023 int i, snd; 11024 uint32_t srdis_mask; 11025 11026 rsize -= 3; /* sub region size (power of 2) */ 11027 snd = ((address - base) >> rsize) & 0x7; 11028 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 11029 11030 srdis_mask = srdis ? 0x3 : 0x0; 11031 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 11032 /* This will check in groups of 2, 4 and then 8, whether 11033 * the subregion bits are consistent. rsize is incremented 11034 * back up to give the region size, considering consistent 11035 * adjacent subregions as one region. Stop testing if rsize 11036 * is already big enough for an entire QEMU page. 11037 */ 11038 int snd_rounded = snd & ~(i - 1); 11039 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 11040 snd_rounded + 8, i); 11041 if (srdis_mask ^ srdis_multi) { 11042 break; 11043 } 11044 srdis_mask = (srdis_mask << i) | srdis_mask; 11045 rsize++; 11046 } 11047 } 11048 if (srdis) { 11049 continue; 11050 } 11051 if (rsize < TARGET_PAGE_BITS) { 11052 *page_size = 1 << rsize; 11053 } 11054 break; 11055 } 11056 11057 if (n == -1) { /* no hits */ 11058 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11059 /* background fault */ 11060 fi->type = ARMFault_Background; 11061 return true; 11062 } 11063 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11064 } else { /* a MPU hit! */ 11065 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 11066 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 11067 11068 if (m_is_system_region(env, address)) { 11069 /* System space is always execute never */ 11070 xn = 1; 11071 } 11072 11073 if (is_user) { /* User mode AP bit decoding */ 11074 switch (ap) { 11075 case 0: 11076 case 1: 11077 case 5: 11078 break; /* no access */ 11079 case 3: 11080 *prot |= PAGE_WRITE; 11081 /* fall through */ 11082 case 2: 11083 case 6: 11084 *prot |= PAGE_READ | PAGE_EXEC; 11085 break; 11086 case 7: 11087 /* for v7M, same as 6; for R profile a reserved value */ 11088 if (arm_feature(env, ARM_FEATURE_M)) { 11089 *prot |= PAGE_READ | PAGE_EXEC; 11090 break; 11091 } 11092 /* fall through */ 11093 default: 11094 qemu_log_mask(LOG_GUEST_ERROR, 11095 "DRACR[%d]: Bad value for AP bits: 0x%" 11096 PRIx32 "\n", n, ap); 11097 } 11098 } else { /* Priv. mode AP bits decoding */ 11099 switch (ap) { 11100 case 0: 11101 break; /* no access */ 11102 case 1: 11103 case 2: 11104 case 3: 11105 *prot |= PAGE_WRITE; 11106 /* fall through */ 11107 case 5: 11108 case 6: 11109 *prot |= PAGE_READ | PAGE_EXEC; 11110 break; 11111 case 7: 11112 /* for v7M, same as 6; for R profile a reserved value */ 11113 if (arm_feature(env, ARM_FEATURE_M)) { 11114 *prot |= PAGE_READ | PAGE_EXEC; 11115 break; 11116 } 11117 /* fall through */ 11118 default: 11119 qemu_log_mask(LOG_GUEST_ERROR, 11120 "DRACR[%d]: Bad value for AP bits: 0x%" 11121 PRIx32 "\n", n, ap); 11122 } 11123 } 11124 11125 /* execute never */ 11126 if (xn) { 11127 *prot &= ~PAGE_EXEC; 11128 } 11129 } 11130 } 11131 11132 fi->type = ARMFault_Permission; 11133 fi->level = 1; 11134 return !(*prot & (1 << access_type)); 11135 } 11136 11137 static bool v8m_is_sau_exempt(CPUARMState *env, 11138 uint32_t address, MMUAccessType access_type) 11139 { 11140 /* The architecture specifies that certain address ranges are 11141 * exempt from v8M SAU/IDAU checks. 11142 */ 11143 return 11144 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 11145 (address >= 0xe0000000 && address <= 0xe0002fff) || 11146 (address >= 0xe000e000 && address <= 0xe000efff) || 11147 (address >= 0xe002e000 && address <= 0xe002efff) || 11148 (address >= 0xe0040000 && address <= 0xe0041fff) || 11149 (address >= 0xe00ff000 && address <= 0xe00fffff); 11150 } 11151 11152 void v8m_security_lookup(CPUARMState *env, uint32_t address, 11153 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11154 V8M_SAttributes *sattrs) 11155 { 11156 /* Look up the security attributes for this address. Compare the 11157 * pseudocode SecurityCheck() function. 11158 * We assume the caller has zero-initialized *sattrs. 11159 */ 11160 ARMCPU *cpu = env_archcpu(env); 11161 int r; 11162 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 11163 int idau_region = IREGION_NOTVALID; 11164 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 11165 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 11166 11167 if (cpu->idau) { 11168 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 11169 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 11170 11171 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 11172 &idau_nsc); 11173 } 11174 11175 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 11176 /* 0xf0000000..0xffffffff is always S for insn fetches */ 11177 return; 11178 } 11179 11180 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 11181 sattrs->ns = !regime_is_secure(env, mmu_idx); 11182 return; 11183 } 11184 11185 if (idau_region != IREGION_NOTVALID) { 11186 sattrs->irvalid = true; 11187 sattrs->iregion = idau_region; 11188 } 11189 11190 switch (env->sau.ctrl & 3) { 11191 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 11192 break; 11193 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 11194 sattrs->ns = true; 11195 break; 11196 default: /* SAU.ENABLE == 1 */ 11197 for (r = 0; r < cpu->sau_sregion; r++) { 11198 if (env->sau.rlar[r] & 1) { 11199 uint32_t base = env->sau.rbar[r] & ~0x1f; 11200 uint32_t limit = env->sau.rlar[r] | 0x1f; 11201 11202 if (base <= address && limit >= address) { 11203 if (base > addr_page_base || limit < addr_page_limit) { 11204 sattrs->subpage = true; 11205 } 11206 if (sattrs->srvalid) { 11207 /* If we hit in more than one region then we must report 11208 * as Secure, not NS-Callable, with no valid region 11209 * number info. 11210 */ 11211 sattrs->ns = false; 11212 sattrs->nsc = false; 11213 sattrs->sregion = 0; 11214 sattrs->srvalid = false; 11215 break; 11216 } else { 11217 if (env->sau.rlar[r] & 2) { 11218 sattrs->nsc = true; 11219 } else { 11220 sattrs->ns = true; 11221 } 11222 sattrs->srvalid = true; 11223 sattrs->sregion = r; 11224 } 11225 } else { 11226 /* 11227 * Address not in this region. We must check whether the 11228 * region covers addresses in the same page as our address. 11229 * In that case we must not report a size that covers the 11230 * whole page for a subsequent hit against a different MPU 11231 * region or the background region, because it would result 11232 * in incorrect TLB hits for subsequent accesses to 11233 * addresses that are in this MPU region. 11234 */ 11235 if (limit >= base && 11236 ranges_overlap(base, limit - base + 1, 11237 addr_page_base, 11238 TARGET_PAGE_SIZE)) { 11239 sattrs->subpage = true; 11240 } 11241 } 11242 } 11243 } 11244 break; 11245 } 11246 11247 /* 11248 * The IDAU will override the SAU lookup results if it specifies 11249 * higher security than the SAU does. 11250 */ 11251 if (!idau_ns) { 11252 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 11253 sattrs->ns = false; 11254 sattrs->nsc = idau_nsc; 11255 } 11256 } 11257 } 11258 11259 bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 11260 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11261 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11262 int *prot, bool *is_subpage, 11263 ARMMMUFaultInfo *fi, uint32_t *mregion) 11264 { 11265 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 11266 * that a full phys-to-virt translation does). 11267 * mregion is (if not NULL) set to the region number which matched, 11268 * or -1 if no region number is returned (MPU off, address did not 11269 * hit a region, address hit in multiple regions). 11270 * We set is_subpage to true if the region hit doesn't cover the 11271 * entire TARGET_PAGE the address is within. 11272 */ 11273 ARMCPU *cpu = env_archcpu(env); 11274 bool is_user = regime_is_user(env, mmu_idx); 11275 uint32_t secure = regime_is_secure(env, mmu_idx); 11276 int n; 11277 int matchregion = -1; 11278 bool hit = false; 11279 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 11280 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 11281 11282 *is_subpage = false; 11283 *phys_ptr = address; 11284 *prot = 0; 11285 if (mregion) { 11286 *mregion = -1; 11287 } 11288 11289 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 11290 * was an exception vector read from the vector table (which is always 11291 * done using the default system address map), because those accesses 11292 * are done in arm_v7m_load_vector(), which always does a direct 11293 * read using address_space_ldl(), rather than going via this function. 11294 */ 11295 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 11296 hit = true; 11297 } else if (m_is_ppb_region(env, address)) { 11298 hit = true; 11299 } else { 11300 if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11301 hit = true; 11302 } 11303 11304 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11305 /* region search */ 11306 /* Note that the base address is bits [31:5] from the register 11307 * with bits [4:0] all zeroes, but the limit address is bits 11308 * [31:5] from the register with bits [4:0] all ones. 11309 */ 11310 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 11311 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 11312 11313 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 11314 /* Region disabled */ 11315 continue; 11316 } 11317 11318 if (address < base || address > limit) { 11319 /* 11320 * Address not in this region. We must check whether the 11321 * region covers addresses in the same page as our address. 11322 * In that case we must not report a size that covers the 11323 * whole page for a subsequent hit against a different MPU 11324 * region or the background region, because it would result in 11325 * incorrect TLB hits for subsequent accesses to addresses that 11326 * are in this MPU region. 11327 */ 11328 if (limit >= base && 11329 ranges_overlap(base, limit - base + 1, 11330 addr_page_base, 11331 TARGET_PAGE_SIZE)) { 11332 *is_subpage = true; 11333 } 11334 continue; 11335 } 11336 11337 if (base > addr_page_base || limit < addr_page_limit) { 11338 *is_subpage = true; 11339 } 11340 11341 if (matchregion != -1) { 11342 /* Multiple regions match -- always a failure (unlike 11343 * PMSAv7 where highest-numbered-region wins) 11344 */ 11345 fi->type = ARMFault_Permission; 11346 fi->level = 1; 11347 return true; 11348 } 11349 11350 matchregion = n; 11351 hit = true; 11352 } 11353 } 11354 11355 if (!hit) { 11356 /* background fault */ 11357 fi->type = ARMFault_Background; 11358 return true; 11359 } 11360 11361 if (matchregion == -1) { 11362 /* hit using the background region */ 11363 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11364 } else { 11365 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 11366 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 11367 11368 if (m_is_system_region(env, address)) { 11369 /* System space is always execute never */ 11370 xn = 1; 11371 } 11372 11373 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 11374 if (*prot && !xn) { 11375 *prot |= PAGE_EXEC; 11376 } 11377 /* We don't need to look the attribute up in the MAIR0/MAIR1 11378 * registers because that only tells us about cacheability. 11379 */ 11380 if (mregion) { 11381 *mregion = matchregion; 11382 } 11383 } 11384 11385 fi->type = ARMFault_Permission; 11386 fi->level = 1; 11387 return !(*prot & (1 << access_type)); 11388 } 11389 11390 11391 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 11392 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11393 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11394 int *prot, target_ulong *page_size, 11395 ARMMMUFaultInfo *fi) 11396 { 11397 uint32_t secure = regime_is_secure(env, mmu_idx); 11398 V8M_SAttributes sattrs = {}; 11399 bool ret; 11400 bool mpu_is_subpage; 11401 11402 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 11403 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 11404 if (access_type == MMU_INST_FETCH) { 11405 /* Instruction fetches always use the MMU bank and the 11406 * transaction attribute determined by the fetch address, 11407 * regardless of CPU state. This is painful for QEMU 11408 * to handle, because it would mean we need to encode 11409 * into the mmu_idx not just the (user, negpri) information 11410 * for the current security state but also that for the 11411 * other security state, which would balloon the number 11412 * of mmu_idx values needed alarmingly. 11413 * Fortunately we can avoid this because it's not actually 11414 * possible to arbitrarily execute code from memory with 11415 * the wrong security attribute: it will always generate 11416 * an exception of some kind or another, apart from the 11417 * special case of an NS CPU executing an SG instruction 11418 * in S&NSC memory. So we always just fail the translation 11419 * here and sort things out in the exception handler 11420 * (including possibly emulating an SG instruction). 11421 */ 11422 if (sattrs.ns != !secure) { 11423 if (sattrs.nsc) { 11424 fi->type = ARMFault_QEMU_NSCExec; 11425 } else { 11426 fi->type = ARMFault_QEMU_SFault; 11427 } 11428 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11429 *phys_ptr = address; 11430 *prot = 0; 11431 return true; 11432 } 11433 } else { 11434 /* For data accesses we always use the MMU bank indicated 11435 * by the current CPU state, but the security attributes 11436 * might downgrade a secure access to nonsecure. 11437 */ 11438 if (sattrs.ns) { 11439 txattrs->secure = false; 11440 } else if (!secure) { 11441 /* NS access to S memory must fault. 11442 * Architecturally we should first check whether the 11443 * MPU information for this address indicates that we 11444 * are doing an unaligned access to Device memory, which 11445 * should generate a UsageFault instead. QEMU does not 11446 * currently check for that kind of unaligned access though. 11447 * If we added it we would need to do so as a special case 11448 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 11449 */ 11450 fi->type = ARMFault_QEMU_SFault; 11451 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11452 *phys_ptr = address; 11453 *prot = 0; 11454 return true; 11455 } 11456 } 11457 } 11458 11459 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 11460 txattrs, prot, &mpu_is_subpage, fi, NULL); 11461 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 11462 return ret; 11463 } 11464 11465 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 11466 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11467 hwaddr *phys_ptr, int *prot, 11468 ARMMMUFaultInfo *fi) 11469 { 11470 int n; 11471 uint32_t mask; 11472 uint32_t base; 11473 bool is_user = regime_is_user(env, mmu_idx); 11474 11475 if (regime_translation_disabled(env, mmu_idx)) { 11476 /* MPU disabled. */ 11477 *phys_ptr = address; 11478 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11479 return false; 11480 } 11481 11482 *phys_ptr = address; 11483 for (n = 7; n >= 0; n--) { 11484 base = env->cp15.c6_region[n]; 11485 if ((base & 1) == 0) { 11486 continue; 11487 } 11488 mask = 1 << ((base >> 1) & 0x1f); 11489 /* Keep this shift separate from the above to avoid an 11490 (undefined) << 32. */ 11491 mask = (mask << 1) - 1; 11492 if (((base ^ address) & ~mask) == 0) { 11493 break; 11494 } 11495 } 11496 if (n < 0) { 11497 fi->type = ARMFault_Background; 11498 return true; 11499 } 11500 11501 if (access_type == MMU_INST_FETCH) { 11502 mask = env->cp15.pmsav5_insn_ap; 11503 } else { 11504 mask = env->cp15.pmsav5_data_ap; 11505 } 11506 mask = (mask >> (n * 4)) & 0xf; 11507 switch (mask) { 11508 case 0: 11509 fi->type = ARMFault_Permission; 11510 fi->level = 1; 11511 return true; 11512 case 1: 11513 if (is_user) { 11514 fi->type = ARMFault_Permission; 11515 fi->level = 1; 11516 return true; 11517 } 11518 *prot = PAGE_READ | PAGE_WRITE; 11519 break; 11520 case 2: 11521 *prot = PAGE_READ; 11522 if (!is_user) { 11523 *prot |= PAGE_WRITE; 11524 } 11525 break; 11526 case 3: 11527 *prot = PAGE_READ | PAGE_WRITE; 11528 break; 11529 case 5: 11530 if (is_user) { 11531 fi->type = ARMFault_Permission; 11532 fi->level = 1; 11533 return true; 11534 } 11535 *prot = PAGE_READ; 11536 break; 11537 case 6: 11538 *prot = PAGE_READ; 11539 break; 11540 default: 11541 /* Bad permission. */ 11542 fi->type = ARMFault_Permission; 11543 fi->level = 1; 11544 return true; 11545 } 11546 *prot |= PAGE_EXEC; 11547 return false; 11548 } 11549 11550 /* Combine either inner or outer cacheability attributes for normal 11551 * memory, according to table D4-42 and pseudocode procedure 11552 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 11553 * 11554 * NB: only stage 1 includes allocation hints (RW bits), leading to 11555 * some asymmetry. 11556 */ 11557 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 11558 { 11559 if (s1 == 4 || s2 == 4) { 11560 /* non-cacheable has precedence */ 11561 return 4; 11562 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 11563 /* stage 1 write-through takes precedence */ 11564 return s1; 11565 } else if (extract32(s2, 2, 2) == 2) { 11566 /* stage 2 write-through takes precedence, but the allocation hint 11567 * is still taken from stage 1 11568 */ 11569 return (2 << 2) | extract32(s1, 0, 2); 11570 } else { /* write-back */ 11571 return s1; 11572 } 11573 } 11574 11575 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 11576 * and CombineS1S2Desc() 11577 * 11578 * @s1: Attributes from stage 1 walk 11579 * @s2: Attributes from stage 2 walk 11580 */ 11581 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 11582 { 11583 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 11584 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 11585 ARMCacheAttrs ret; 11586 11587 /* Combine shareability attributes (table D4-43) */ 11588 if (s1.shareability == 2 || s2.shareability == 2) { 11589 /* if either are outer-shareable, the result is outer-shareable */ 11590 ret.shareability = 2; 11591 } else if (s1.shareability == 3 || s2.shareability == 3) { 11592 /* if either are inner-shareable, the result is inner-shareable */ 11593 ret.shareability = 3; 11594 } else { 11595 /* both non-shareable */ 11596 ret.shareability = 0; 11597 } 11598 11599 /* Combine memory type and cacheability attributes */ 11600 if (s1hi == 0 || s2hi == 0) { 11601 /* Device has precedence over normal */ 11602 if (s1lo == 0 || s2lo == 0) { 11603 /* nGnRnE has precedence over anything */ 11604 ret.attrs = 0; 11605 } else if (s1lo == 4 || s2lo == 4) { 11606 /* non-Reordering has precedence over Reordering */ 11607 ret.attrs = 4; /* nGnRE */ 11608 } else if (s1lo == 8 || s2lo == 8) { 11609 /* non-Gathering has precedence over Gathering */ 11610 ret.attrs = 8; /* nGRE */ 11611 } else { 11612 ret.attrs = 0xc; /* GRE */ 11613 } 11614 11615 /* Any location for which the resultant memory type is any 11616 * type of Device memory is always treated as Outer Shareable. 11617 */ 11618 ret.shareability = 2; 11619 } else { /* Normal memory */ 11620 /* Outer/inner cacheability combine independently */ 11621 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 11622 | combine_cacheattr_nibble(s1lo, s2lo); 11623 11624 if (ret.attrs == 0x44) { 11625 /* Any location for which the resultant memory type is Normal 11626 * Inner Non-cacheable, Outer Non-cacheable is always treated 11627 * as Outer Shareable. 11628 */ 11629 ret.shareability = 2; 11630 } 11631 } 11632 11633 return ret; 11634 } 11635 11636 11637 /* get_phys_addr - get the physical address for this virtual address 11638 * 11639 * Find the physical address corresponding to the given virtual address, 11640 * by doing a translation table walk on MMU based systems or using the 11641 * MPU state on MPU based systems. 11642 * 11643 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 11644 * prot and page_size may not be filled in, and the populated fsr value provides 11645 * information on why the translation aborted, in the format of a 11646 * DFSR/IFSR fault register, with the following caveats: 11647 * * we honour the short vs long DFSR format differences. 11648 * * the WnR bit is never set (the caller must do this). 11649 * * for PSMAv5 based systems we don't bother to return a full FSR format 11650 * value. 11651 * 11652 * @env: CPUARMState 11653 * @address: virtual address to get physical address for 11654 * @access_type: 0 for read, 1 for write, 2 for execute 11655 * @mmu_idx: MMU index indicating required translation regime 11656 * @phys_ptr: set to the physical address corresponding to the virtual address 11657 * @attrs: set to the memory transaction attributes to use 11658 * @prot: set to the permissions for the page containing phys_ptr 11659 * @page_size: set to the size of the page containing phys_ptr 11660 * @fi: set to fault info if the translation fails 11661 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 11662 */ 11663 bool get_phys_addr(CPUARMState *env, target_ulong address, 11664 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11665 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 11666 target_ulong *page_size, 11667 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11668 { 11669 if (mmu_idx == ARMMMUIdx_E10_0 || 11670 mmu_idx == ARMMMUIdx_E10_1 || 11671 mmu_idx == ARMMMUIdx_E10_1_PAN) { 11672 /* Call ourselves recursively to do the stage 1 and then stage 2 11673 * translations. 11674 */ 11675 if (arm_feature(env, ARM_FEATURE_EL2)) { 11676 hwaddr ipa; 11677 int s2_prot; 11678 int ret; 11679 ARMCacheAttrs cacheattrs2 = {}; 11680 11681 ret = get_phys_addr(env, address, access_type, 11682 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 11683 prot, page_size, fi, cacheattrs); 11684 11685 /* If S1 fails or S2 is disabled, return early. */ 11686 if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) { 11687 *phys_ptr = ipa; 11688 return ret; 11689 } 11690 11691 /* S1 is done. Now do S2 translation. */ 11692 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2, 11693 phys_ptr, attrs, &s2_prot, 11694 page_size, fi, 11695 cacheattrs != NULL ? &cacheattrs2 : NULL); 11696 fi->s2addr = ipa; 11697 /* Combine the S1 and S2 perms. */ 11698 *prot &= s2_prot; 11699 11700 /* Combine the S1 and S2 cache attributes, if needed */ 11701 if (!ret && cacheattrs != NULL) { 11702 if (env->cp15.hcr_el2 & HCR_DC) { 11703 /* 11704 * HCR.DC forces the first stage attributes to 11705 * Normal Non-Shareable, 11706 * Inner Write-Back Read-Allocate Write-Allocate, 11707 * Outer Write-Back Read-Allocate Write-Allocate. 11708 */ 11709 cacheattrs->attrs = 0xff; 11710 cacheattrs->shareability = 0; 11711 } 11712 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 11713 } 11714 11715 return ret; 11716 } else { 11717 /* 11718 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 11719 */ 11720 mmu_idx = stage_1_mmu_idx(mmu_idx); 11721 } 11722 } 11723 11724 /* The page table entries may downgrade secure to non-secure, but 11725 * cannot upgrade an non-secure translation regime's attributes 11726 * to secure. 11727 */ 11728 attrs->secure = regime_is_secure(env, mmu_idx); 11729 attrs->user = regime_is_user(env, mmu_idx); 11730 11731 /* Fast Context Switch Extension. This doesn't exist at all in v8. 11732 * In v7 and earlier it affects all stage 1 translations. 11733 */ 11734 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 11735 && !arm_feature(env, ARM_FEATURE_V8)) { 11736 if (regime_el(env, mmu_idx) == 3) { 11737 address += env->cp15.fcseidr_s; 11738 } else { 11739 address += env->cp15.fcseidr_ns; 11740 } 11741 } 11742 11743 if (arm_feature(env, ARM_FEATURE_PMSA)) { 11744 bool ret; 11745 *page_size = TARGET_PAGE_SIZE; 11746 11747 if (arm_feature(env, ARM_FEATURE_V8)) { 11748 /* PMSAv8 */ 11749 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 11750 phys_ptr, attrs, prot, page_size, fi); 11751 } else if (arm_feature(env, ARM_FEATURE_V7)) { 11752 /* PMSAv7 */ 11753 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 11754 phys_ptr, prot, page_size, fi); 11755 } else { 11756 /* Pre-v7 MPU */ 11757 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 11758 phys_ptr, prot, fi); 11759 } 11760 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 11761 " mmu_idx %u -> %s (prot %c%c%c)\n", 11762 access_type == MMU_DATA_LOAD ? "reading" : 11763 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 11764 (uint32_t)address, mmu_idx, 11765 ret ? "Miss" : "Hit", 11766 *prot & PAGE_READ ? 'r' : '-', 11767 *prot & PAGE_WRITE ? 'w' : '-', 11768 *prot & PAGE_EXEC ? 'x' : '-'); 11769 11770 return ret; 11771 } 11772 11773 /* Definitely a real MMU, not an MPU */ 11774 11775 if (regime_translation_disabled(env, mmu_idx)) { 11776 /* 11777 * MMU disabled. S1 addresses within aa64 translation regimes are 11778 * still checked for bounds -- see AArch64.TranslateAddressS1Off. 11779 */ 11780 if (mmu_idx != ARMMMUIdx_Stage2) { 11781 int r_el = regime_el(env, mmu_idx); 11782 if (arm_el_is_aa64(env, r_el)) { 11783 int pamax = arm_pamax(env_archcpu(env)); 11784 uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr; 11785 int addrtop, tbi; 11786 11787 tbi = aa64_va_parameter_tbi(tcr, mmu_idx); 11788 if (access_type == MMU_INST_FETCH) { 11789 tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); 11790 } 11791 tbi = (tbi >> extract64(address, 55, 1)) & 1; 11792 addrtop = (tbi ? 55 : 63); 11793 11794 if (extract64(address, pamax, addrtop - pamax + 1) != 0) { 11795 fi->type = ARMFault_AddressSize; 11796 fi->level = 0; 11797 fi->stage2 = false; 11798 return 1; 11799 } 11800 11801 /* 11802 * When TBI is disabled, we've just validated that all of the 11803 * bits above PAMax are zero, so logically we only need to 11804 * clear the top byte for TBI. But it's clearer to follow 11805 * the pseudocode set of addrdesc.paddress. 11806 */ 11807 address = extract64(address, 0, 52); 11808 } 11809 } 11810 *phys_ptr = address; 11811 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11812 *page_size = TARGET_PAGE_SIZE; 11813 return 0; 11814 } 11815 11816 if (regime_using_lpae_format(env, mmu_idx)) { 11817 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 11818 phys_ptr, attrs, prot, page_size, 11819 fi, cacheattrs); 11820 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 11821 return get_phys_addr_v6(env, address, access_type, mmu_idx, 11822 phys_ptr, attrs, prot, page_size, fi); 11823 } else { 11824 return get_phys_addr_v5(env, address, access_type, mmu_idx, 11825 phys_ptr, prot, page_size, fi); 11826 } 11827 } 11828 11829 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 11830 MemTxAttrs *attrs) 11831 { 11832 ARMCPU *cpu = ARM_CPU(cs); 11833 CPUARMState *env = &cpu->env; 11834 hwaddr phys_addr; 11835 target_ulong page_size; 11836 int prot; 11837 bool ret; 11838 ARMMMUFaultInfo fi = {}; 11839 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 11840 11841 *attrs = (MemTxAttrs) {}; 11842 11843 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 11844 attrs, &prot, &page_size, &fi, NULL); 11845 11846 if (ret) { 11847 return -1; 11848 } 11849 return phys_addr; 11850 } 11851 11852 #endif 11853 11854 /* Note that signed overflow is undefined in C. The following routines are 11855 careful to use unsigned types where modulo arithmetic is required. 11856 Failure to do so _will_ break on newer gcc. */ 11857 11858 /* Signed saturating arithmetic. */ 11859 11860 /* Perform 16-bit signed saturating addition. */ 11861 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 11862 { 11863 uint16_t res; 11864 11865 res = a + b; 11866 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 11867 if (a & 0x8000) 11868 res = 0x8000; 11869 else 11870 res = 0x7fff; 11871 } 11872 return res; 11873 } 11874 11875 /* Perform 8-bit signed saturating addition. */ 11876 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 11877 { 11878 uint8_t res; 11879 11880 res = a + b; 11881 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 11882 if (a & 0x80) 11883 res = 0x80; 11884 else 11885 res = 0x7f; 11886 } 11887 return res; 11888 } 11889 11890 /* Perform 16-bit signed saturating subtraction. */ 11891 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 11892 { 11893 uint16_t res; 11894 11895 res = a - b; 11896 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 11897 if (a & 0x8000) 11898 res = 0x8000; 11899 else 11900 res = 0x7fff; 11901 } 11902 return res; 11903 } 11904 11905 /* Perform 8-bit signed saturating subtraction. */ 11906 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 11907 { 11908 uint8_t res; 11909 11910 res = a - b; 11911 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 11912 if (a & 0x80) 11913 res = 0x80; 11914 else 11915 res = 0x7f; 11916 } 11917 return res; 11918 } 11919 11920 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 11921 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 11922 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 11923 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 11924 #define PFX q 11925 11926 #include "op_addsub.h" 11927 11928 /* Unsigned saturating arithmetic. */ 11929 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 11930 { 11931 uint16_t res; 11932 res = a + b; 11933 if (res < a) 11934 res = 0xffff; 11935 return res; 11936 } 11937 11938 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 11939 { 11940 if (a > b) 11941 return a - b; 11942 else 11943 return 0; 11944 } 11945 11946 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 11947 { 11948 uint8_t res; 11949 res = a + b; 11950 if (res < a) 11951 res = 0xff; 11952 return res; 11953 } 11954 11955 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 11956 { 11957 if (a > b) 11958 return a - b; 11959 else 11960 return 0; 11961 } 11962 11963 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 11964 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 11965 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 11966 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 11967 #define PFX uq 11968 11969 #include "op_addsub.h" 11970 11971 /* Signed modulo arithmetic. */ 11972 #define SARITH16(a, b, n, op) do { \ 11973 int32_t sum; \ 11974 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 11975 RESULT(sum, n, 16); \ 11976 if (sum >= 0) \ 11977 ge |= 3 << (n * 2); \ 11978 } while(0) 11979 11980 #define SARITH8(a, b, n, op) do { \ 11981 int32_t sum; \ 11982 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 11983 RESULT(sum, n, 8); \ 11984 if (sum >= 0) \ 11985 ge |= 1 << n; \ 11986 } while(0) 11987 11988 11989 #define ADD16(a, b, n) SARITH16(a, b, n, +) 11990 #define SUB16(a, b, n) SARITH16(a, b, n, -) 11991 #define ADD8(a, b, n) SARITH8(a, b, n, +) 11992 #define SUB8(a, b, n) SARITH8(a, b, n, -) 11993 #define PFX s 11994 #define ARITH_GE 11995 11996 #include "op_addsub.h" 11997 11998 /* Unsigned modulo arithmetic. */ 11999 #define ADD16(a, b, n) do { \ 12000 uint32_t sum; \ 12001 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 12002 RESULT(sum, n, 16); \ 12003 if ((sum >> 16) == 1) \ 12004 ge |= 3 << (n * 2); \ 12005 } while(0) 12006 12007 #define ADD8(a, b, n) do { \ 12008 uint32_t sum; \ 12009 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 12010 RESULT(sum, n, 8); \ 12011 if ((sum >> 8) == 1) \ 12012 ge |= 1 << n; \ 12013 } while(0) 12014 12015 #define SUB16(a, b, n) do { \ 12016 uint32_t sum; \ 12017 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 12018 RESULT(sum, n, 16); \ 12019 if ((sum >> 16) == 0) \ 12020 ge |= 3 << (n * 2); \ 12021 } while(0) 12022 12023 #define SUB8(a, b, n) do { \ 12024 uint32_t sum; \ 12025 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 12026 RESULT(sum, n, 8); \ 12027 if ((sum >> 8) == 0) \ 12028 ge |= 1 << n; \ 12029 } while(0) 12030 12031 #define PFX u 12032 #define ARITH_GE 12033 12034 #include "op_addsub.h" 12035 12036 /* Halved signed arithmetic. */ 12037 #define ADD16(a, b, n) \ 12038 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 12039 #define SUB16(a, b, n) \ 12040 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 12041 #define ADD8(a, b, n) \ 12042 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 12043 #define SUB8(a, b, n) \ 12044 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 12045 #define PFX sh 12046 12047 #include "op_addsub.h" 12048 12049 /* Halved unsigned arithmetic. */ 12050 #define ADD16(a, b, n) \ 12051 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 12052 #define SUB16(a, b, n) \ 12053 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 12054 #define ADD8(a, b, n) \ 12055 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 12056 #define SUB8(a, b, n) \ 12057 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 12058 #define PFX uh 12059 12060 #include "op_addsub.h" 12061 12062 static inline uint8_t do_usad(uint8_t a, uint8_t b) 12063 { 12064 if (a > b) 12065 return a - b; 12066 else 12067 return b - a; 12068 } 12069 12070 /* Unsigned sum of absolute byte differences. */ 12071 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 12072 { 12073 uint32_t sum; 12074 sum = do_usad(a, b); 12075 sum += do_usad(a >> 8, b >> 8); 12076 sum += do_usad(a >> 16, b >>16); 12077 sum += do_usad(a >> 24, b >> 24); 12078 return sum; 12079 } 12080 12081 /* For ARMv6 SEL instruction. */ 12082 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 12083 { 12084 uint32_t mask; 12085 12086 mask = 0; 12087 if (flags & 1) 12088 mask |= 0xff; 12089 if (flags & 2) 12090 mask |= 0xff00; 12091 if (flags & 4) 12092 mask |= 0xff0000; 12093 if (flags & 8) 12094 mask |= 0xff000000; 12095 return (a & mask) | (b & ~mask); 12096 } 12097 12098 /* CRC helpers. 12099 * The upper bytes of val (above the number specified by 'bytes') must have 12100 * been zeroed out by the caller. 12101 */ 12102 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 12103 { 12104 uint8_t buf[4]; 12105 12106 stl_le_p(buf, val); 12107 12108 /* zlib crc32 converts the accumulator and output to one's complement. */ 12109 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 12110 } 12111 12112 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 12113 { 12114 uint8_t buf[4]; 12115 12116 stl_le_p(buf, val); 12117 12118 /* Linux crc32c converts the output to one's complement. */ 12119 return crc32c(acc, buf, bytes) ^ 0xffffffff; 12120 } 12121 12122 /* Return the exception level to which FP-disabled exceptions should 12123 * be taken, or 0 if FP is enabled. 12124 */ 12125 int fp_exception_el(CPUARMState *env, int cur_el) 12126 { 12127 #ifndef CONFIG_USER_ONLY 12128 /* CPACR and the CPTR registers don't exist before v6, so FP is 12129 * always accessible 12130 */ 12131 if (!arm_feature(env, ARM_FEATURE_V6)) { 12132 return 0; 12133 } 12134 12135 if (arm_feature(env, ARM_FEATURE_M)) { 12136 /* CPACR can cause a NOCP UsageFault taken to current security state */ 12137 if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { 12138 return 1; 12139 } 12140 12141 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { 12142 if (!extract32(env->v7m.nsacr, 10, 1)) { 12143 /* FP insns cause a NOCP UsageFault taken to Secure */ 12144 return 3; 12145 } 12146 } 12147 12148 return 0; 12149 } 12150 12151 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 12152 * 0, 2 : trap EL0 and EL1/PL1 accesses 12153 * 1 : trap only EL0 accesses 12154 * 3 : trap no accesses 12155 * This register is ignored if E2H+TGE are both set. 12156 */ 12157 if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 12158 int fpen = extract32(env->cp15.cpacr_el1, 20, 2); 12159 12160 switch (fpen) { 12161 case 0: 12162 case 2: 12163 if (cur_el == 0 || cur_el == 1) { 12164 /* Trap to PL1, which might be EL1 or EL3 */ 12165 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 12166 return 3; 12167 } 12168 return 1; 12169 } 12170 if (cur_el == 3 && !is_a64(env)) { 12171 /* Secure PL1 running at EL3 */ 12172 return 3; 12173 } 12174 break; 12175 case 1: 12176 if (cur_el == 0) { 12177 return 1; 12178 } 12179 break; 12180 case 3: 12181 break; 12182 } 12183 } 12184 12185 /* 12186 * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode 12187 * to control non-secure access to the FPU. It doesn't have any 12188 * effect if EL3 is AArch64 or if EL3 doesn't exist at all. 12189 */ 12190 if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && 12191 cur_el <= 2 && !arm_is_secure_below_el3(env))) { 12192 if (!extract32(env->cp15.nsacr, 10, 1)) { 12193 /* FP insns act as UNDEF */ 12194 return cur_el == 2 ? 2 : 1; 12195 } 12196 } 12197 12198 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 12199 * check because zero bits in the registers mean "don't trap". 12200 */ 12201 12202 /* CPTR_EL2 : present in v7VE or v8 */ 12203 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 12204 && !arm_is_secure_below_el3(env)) { 12205 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 12206 return 2; 12207 } 12208 12209 /* CPTR_EL3 : present in v8 */ 12210 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 12211 /* Trap all FP ops to EL3 */ 12212 return 3; 12213 } 12214 #endif 12215 return 0; 12216 } 12217 12218 /* Return the exception level we're running at if this is our mmu_idx */ 12219 int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) 12220 { 12221 if (mmu_idx & ARM_MMU_IDX_M) { 12222 return mmu_idx & ARM_MMU_IDX_M_PRIV; 12223 } 12224 12225 switch (mmu_idx) { 12226 case ARMMMUIdx_E10_0: 12227 case ARMMMUIdx_E20_0: 12228 case ARMMMUIdx_SE10_0: 12229 return 0; 12230 case ARMMMUIdx_E10_1: 12231 case ARMMMUIdx_E10_1_PAN: 12232 case ARMMMUIdx_SE10_1: 12233 case ARMMMUIdx_SE10_1_PAN: 12234 return 1; 12235 case ARMMMUIdx_E2: 12236 case ARMMMUIdx_E20_2: 12237 case ARMMMUIdx_E20_2_PAN: 12238 return 2; 12239 case ARMMMUIdx_SE3: 12240 return 3; 12241 default: 12242 g_assert_not_reached(); 12243 } 12244 } 12245 12246 #ifndef CONFIG_TCG 12247 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 12248 { 12249 g_assert_not_reached(); 12250 } 12251 #endif 12252 12253 ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) 12254 { 12255 if (arm_feature(env, ARM_FEATURE_M)) { 12256 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 12257 } 12258 12259 /* See ARM pseudo-function ELIsInHost. */ 12260 switch (el) { 12261 case 0: 12262 if (arm_is_secure_below_el3(env)) { 12263 return ARMMMUIdx_SE10_0; 12264 } 12265 if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE) 12266 && arm_el_is_aa64(env, 2)) { 12267 return ARMMMUIdx_E20_0; 12268 } 12269 return ARMMMUIdx_E10_0; 12270 case 1: 12271 if (arm_is_secure_below_el3(env)) { 12272 if (env->pstate & PSTATE_PAN) { 12273 return ARMMMUIdx_SE10_1_PAN; 12274 } 12275 return ARMMMUIdx_SE10_1; 12276 } 12277 if (env->pstate & PSTATE_PAN) { 12278 return ARMMMUIdx_E10_1_PAN; 12279 } 12280 return ARMMMUIdx_E10_1; 12281 case 2: 12282 /* TODO: ARMv8.4-SecEL2 */ 12283 /* Note that TGE does not apply at EL2. */ 12284 if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) { 12285 if (env->pstate & PSTATE_PAN) { 12286 return ARMMMUIdx_E20_2_PAN; 12287 } 12288 return ARMMMUIdx_E20_2; 12289 } 12290 return ARMMMUIdx_E2; 12291 case 3: 12292 return ARMMMUIdx_SE3; 12293 default: 12294 g_assert_not_reached(); 12295 } 12296 } 12297 12298 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 12299 { 12300 return arm_mmu_idx_el(env, arm_current_el(env)); 12301 } 12302 12303 #ifndef CONFIG_USER_ONLY 12304 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 12305 { 12306 return stage_1_mmu_idx(arm_mmu_idx(env)); 12307 } 12308 #endif 12309 12310 static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el, 12311 ARMMMUIdx mmu_idx, uint32_t flags) 12312 { 12313 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el); 12314 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, 12315 arm_to_core_mmu_idx(mmu_idx)); 12316 12317 if (arm_singlestep_active(env)) { 12318 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1); 12319 } 12320 return flags; 12321 } 12322 12323 static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el, 12324 ARMMMUIdx mmu_idx, uint32_t flags) 12325 { 12326 bool sctlr_b = arm_sctlr_b(env); 12327 12328 if (sctlr_b) { 12329 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1); 12330 } 12331 if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) { 12332 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 12333 } 12334 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env)); 12335 12336 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 12337 } 12338 12339 static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el, 12340 ARMMMUIdx mmu_idx) 12341 { 12342 uint32_t flags = 0; 12343 12344 if (arm_v7m_is_handler_mode(env)) { 12345 flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1); 12346 } 12347 12348 /* 12349 * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN 12350 * is suppressing them because the requested execution priority 12351 * is less than 0. 12352 */ 12353 if (arm_feature(env, ARM_FEATURE_V8) && 12354 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 12355 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 12356 flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1); 12357 } 12358 12359 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 12360 } 12361 12362 static uint32_t rebuild_hflags_aprofile(CPUARMState *env) 12363 { 12364 int flags = 0; 12365 12366 flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL, 12367 arm_debug_target_el(env)); 12368 return flags; 12369 } 12370 12371 static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, 12372 ARMMMUIdx mmu_idx) 12373 { 12374 uint32_t flags = rebuild_hflags_aprofile(env); 12375 12376 if (arm_el_is_aa64(env, 1)) { 12377 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 12378 } 12379 12380 if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && 12381 (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { 12382 flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1); 12383 } 12384 12385 return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); 12386 } 12387 12388 static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, 12389 ARMMMUIdx mmu_idx) 12390 { 12391 uint32_t flags = rebuild_hflags_aprofile(env); 12392 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); 12393 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 12394 uint64_t sctlr; 12395 int tbii, tbid; 12396 12397 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); 12398 12399 /* Get control bits for tagged addresses. */ 12400 tbid = aa64_va_parameter_tbi(tcr, mmu_idx); 12401 tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); 12402 12403 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); 12404 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid); 12405 12406 if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 12407 int sve_el = sve_exception_el(env, el); 12408 uint32_t zcr_len; 12409 12410 /* 12411 * If SVE is disabled, but FP is enabled, 12412 * then the effective len is 0. 12413 */ 12414 if (sve_el != 0 && fp_el == 0) { 12415 zcr_len = 0; 12416 } else { 12417 zcr_len = sve_zcr_len_for_el(env, el); 12418 } 12419 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el); 12420 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len); 12421 } 12422 12423 sctlr = regime_sctlr(env, stage1); 12424 12425 if (arm_cpu_data_is_big_endian_a64(el, sctlr)) { 12426 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 12427 } 12428 12429 if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) { 12430 /* 12431 * In order to save space in flags, we record only whether 12432 * pauth is "inactive", meaning all insns are implemented as 12433 * a nop, or "active" when some action must be performed. 12434 * The decision of which action to take is left to a helper. 12435 */ 12436 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { 12437 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1); 12438 } 12439 } 12440 12441 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 12442 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ 12443 if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { 12444 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1); 12445 } 12446 } 12447 12448 /* Compute the condition for using AccType_UNPRIV for LDTR et al. */ 12449 if (!(env->pstate & PSTATE_UAO)) { 12450 switch (mmu_idx) { 12451 case ARMMMUIdx_E10_1: 12452 case ARMMMUIdx_E10_1_PAN: 12453 case ARMMMUIdx_SE10_1: 12454 case ARMMMUIdx_SE10_1_PAN: 12455 /* TODO: ARMv8.3-NV */ 12456 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); 12457 break; 12458 case ARMMMUIdx_E20_2: 12459 case ARMMMUIdx_E20_2_PAN: 12460 /* TODO: ARMv8.4-SecEL2 */ 12461 /* 12462 * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is 12463 * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR. 12464 */ 12465 if (env->cp15.hcr_el2 & HCR_TGE) { 12466 flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1); 12467 } 12468 break; 12469 default: 12470 break; 12471 } 12472 } 12473 12474 return rebuild_hflags_common(env, fp_el, mmu_idx, flags); 12475 } 12476 12477 static uint32_t rebuild_hflags_internal(CPUARMState *env) 12478 { 12479 int el = arm_current_el(env); 12480 int fp_el = fp_exception_el(env, el); 12481 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12482 12483 if (is_a64(env)) { 12484 return rebuild_hflags_a64(env, el, fp_el, mmu_idx); 12485 } else if (arm_feature(env, ARM_FEATURE_M)) { 12486 return rebuild_hflags_m32(env, fp_el, mmu_idx); 12487 } else { 12488 return rebuild_hflags_a32(env, fp_el, mmu_idx); 12489 } 12490 } 12491 12492 void arm_rebuild_hflags(CPUARMState *env) 12493 { 12494 env->hflags = rebuild_hflags_internal(env); 12495 } 12496 12497 /* 12498 * If we have triggered a EL state change we can't rely on the 12499 * translator having passed it to us, we need to recompute. 12500 */ 12501 void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env) 12502 { 12503 int el = arm_current_el(env); 12504 int fp_el = fp_exception_el(env, el); 12505 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12506 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 12507 } 12508 12509 void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) 12510 { 12511 int fp_el = fp_exception_el(env, el); 12512 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12513 12514 env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); 12515 } 12516 12517 /* 12518 * If we have triggered a EL state change we can't rely on the 12519 * translator having passed it to us, we need to recompute. 12520 */ 12521 void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) 12522 { 12523 int el = arm_current_el(env); 12524 int fp_el = fp_exception_el(env, el); 12525 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12526 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 12527 } 12528 12529 void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) 12530 { 12531 int fp_el = fp_exception_el(env, el); 12532 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12533 12534 env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); 12535 } 12536 12537 void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el) 12538 { 12539 int fp_el = fp_exception_el(env, el); 12540 ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); 12541 12542 env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx); 12543 } 12544 12545 static inline void assert_hflags_rebuild_correctly(CPUARMState *env) 12546 { 12547 #ifdef CONFIG_DEBUG_TCG 12548 uint32_t env_flags_current = env->hflags; 12549 uint32_t env_flags_rebuilt = rebuild_hflags_internal(env); 12550 12551 if (unlikely(env_flags_current != env_flags_rebuilt)) { 12552 fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n", 12553 env_flags_current, env_flags_rebuilt); 12554 abort(); 12555 } 12556 #endif 12557 } 12558 12559 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 12560 target_ulong *cs_base, uint32_t *pflags) 12561 { 12562 uint32_t flags = env->hflags; 12563 uint32_t pstate_for_ss; 12564 12565 *cs_base = 0; 12566 assert_hflags_rebuild_correctly(env); 12567 12568 if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) { 12569 *pc = env->pc; 12570 if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { 12571 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype); 12572 } 12573 pstate_for_ss = env->pstate; 12574 } else { 12575 *pc = env->regs[15]; 12576 12577 if (arm_feature(env, ARM_FEATURE_M)) { 12578 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 12579 FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) 12580 != env->v7m.secure) { 12581 flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1); 12582 } 12583 12584 if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && 12585 (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || 12586 (env->v7m.secure && 12587 !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { 12588 /* 12589 * ASPEN is set, but FPCA/SFPA indicate that there is no 12590 * active FP context; we must create a new FP context before 12591 * executing any FP insn. 12592 */ 12593 flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1); 12594 } 12595 12596 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 12597 if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { 12598 flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1); 12599 } 12600 } else { 12601 /* 12602 * Note that XSCALE_CPAR shares bits with VECSTRIDE. 12603 * Note that VECLEN+VECSTRIDE are RES0 for M-profile. 12604 */ 12605 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 12606 flags = FIELD_DP32(flags, TBFLAG_A32, 12607 XSCALE_CPAR, env->cp15.c15_cpar); 12608 } else { 12609 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, 12610 env->vfp.vec_len); 12611 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, 12612 env->vfp.vec_stride); 12613 } 12614 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { 12615 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 12616 } 12617 } 12618 12619 flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb); 12620 flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits); 12621 pstate_for_ss = env->uncached_cpsr; 12622 } 12623 12624 /* 12625 * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 12626 * states defined in the ARM ARM for software singlestep: 12627 * SS_ACTIVE PSTATE.SS State 12628 * 0 x Inactive (the TB flag for SS is always 0) 12629 * 1 0 Active-pending 12630 * 1 1 Active-not-pending 12631 * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB. 12632 */ 12633 if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) && 12634 (pstate_for_ss & PSTATE_SS)) { 12635 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); 12636 } 12637 12638 *pflags = flags; 12639 } 12640 12641 #ifdef TARGET_AARCH64 12642 /* 12643 * The manual says that when SVE is enabled and VQ is widened the 12644 * implementation is allowed to zero the previously inaccessible 12645 * portion of the registers. The corollary to that is that when 12646 * SVE is enabled and VQ is narrowed we are also allowed to zero 12647 * the now inaccessible portion of the registers. 12648 * 12649 * The intent of this is that no predicate bit beyond VQ is ever set. 12650 * Which means that some operations on predicate registers themselves 12651 * may operate on full uint64_t or even unrolled across the maximum 12652 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 12653 * may well be cheaper than conditionals to restrict the operation 12654 * to the relevant portion of a uint16_t[16]. 12655 */ 12656 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 12657 { 12658 int i, j; 12659 uint64_t pmask; 12660 12661 assert(vq >= 1 && vq <= ARM_MAX_VQ); 12662 assert(vq <= env_archcpu(env)->sve_max_vq); 12663 12664 /* Zap the high bits of the zregs. */ 12665 for (i = 0; i < 32; i++) { 12666 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 12667 } 12668 12669 /* Zap the high bits of the pregs and ffr. */ 12670 pmask = 0; 12671 if (vq & 3) { 12672 pmask = ~(-1ULL << (16 * (vq & 3))); 12673 } 12674 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 12675 for (i = 0; i < 17; ++i) { 12676 env->vfp.pregs[i].p[j] &= pmask; 12677 } 12678 pmask = 0; 12679 } 12680 } 12681 12682 /* 12683 * Notice a change in SVE vector size when changing EL. 12684 */ 12685 void aarch64_sve_change_el(CPUARMState *env, int old_el, 12686 int new_el, bool el0_a64) 12687 { 12688 ARMCPU *cpu = env_archcpu(env); 12689 int old_len, new_len; 12690 bool old_a64, new_a64; 12691 12692 /* Nothing to do if no SVE. */ 12693 if (!cpu_isar_feature(aa64_sve, cpu)) { 12694 return; 12695 } 12696 12697 /* Nothing to do if FP is disabled in either EL. */ 12698 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 12699 return; 12700 } 12701 12702 /* 12703 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 12704 * at ELx, or not available because the EL is in AArch32 state, then 12705 * for all purposes other than a direct read, the ZCR_ELx.LEN field 12706 * has an effective value of 0". 12707 * 12708 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 12709 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 12710 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 12711 * we already have the correct register contents when encountering the 12712 * vq0->vq0 transition between EL0->EL1. 12713 */ 12714 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 12715 old_len = (old_a64 && !sve_exception_el(env, old_el) 12716 ? sve_zcr_len_for_el(env, old_el) : 0); 12717 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 12718 new_len = (new_a64 && !sve_exception_el(env, new_el) 12719 ? sve_zcr_len_for_el(env, new_el) : 0); 12720 12721 /* When changing vector length, clear inaccessible state. */ 12722 if (new_len < old_len) { 12723 aarch64_sve_narrow_vq(env, new_len + 1); 12724 } 12725 } 12726 #endif 12727