1 #include "qemu/osdep.h" 2 #include "target/arm/idau.h" 3 #include "trace.h" 4 #include "cpu.h" 5 #include "internals.h" 6 #include "exec/gdbstub.h" 7 #include "exec/helper-proto.h" 8 #include "qemu/host-utils.h" 9 #include "sysemu/arch_init.h" 10 #include "sysemu/sysemu.h" 11 #include "qemu/bitops.h" 12 #include "qemu/crc32c.h" 13 #include "exec/exec-all.h" 14 #include "exec/cpu_ldst.h" 15 #include "arm_ldst.h" 16 #include <zlib.h> /* For crc32 */ 17 #include "exec/semihost.h" 18 #include "sysemu/cpus.h" 19 #include "sysemu/kvm.h" 20 #include "fpu/softfloat.h" 21 #include "qemu/range.h" 22 23 #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ 24 25 #ifndef CONFIG_USER_ONLY 26 /* Cacheability and shareability attributes for a memory access */ 27 typedef struct ARMCacheAttrs { 28 unsigned int attrs:8; /* as in the MAIR register encoding */ 29 unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */ 30 } ARMCacheAttrs; 31 32 static bool get_phys_addr(CPUARMState *env, target_ulong address, 33 MMUAccessType access_type, ARMMMUIdx mmu_idx, 34 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 35 target_ulong *page_size, 36 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 37 38 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 39 MMUAccessType access_type, ARMMMUIdx mmu_idx, 40 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 41 target_ulong *page_size_ptr, 42 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs); 43 44 /* Security attributes for an address, as returned by v8m_security_lookup. */ 45 typedef struct V8M_SAttributes { 46 bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */ 47 bool ns; 48 bool nsc; 49 uint8_t sregion; 50 bool srvalid; 51 uint8_t iregion; 52 bool irvalid; 53 } V8M_SAttributes; 54 55 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 56 MMUAccessType access_type, ARMMMUIdx mmu_idx, 57 V8M_SAttributes *sattrs); 58 #endif 59 60 static void switch_mode(CPUARMState *env, int mode); 61 62 static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 63 { 64 int nregs; 65 66 /* VFP data registers are always little-endian. */ 67 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 68 if (reg < nregs) { 69 stq_le_p(buf, *aa32_vfp_dreg(env, reg)); 70 return 8; 71 } 72 if (arm_feature(env, ARM_FEATURE_NEON)) { 73 /* Aliases for Q regs. */ 74 nregs += 16; 75 if (reg < nregs) { 76 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 77 stq_le_p(buf, q[0]); 78 stq_le_p(buf + 8, q[1]); 79 return 16; 80 } 81 } 82 switch (reg - nregs) { 83 case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4; 84 case 1: stl_p(buf, vfp_get_fpscr(env)); return 4; 85 case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4; 86 } 87 return 0; 88 } 89 90 static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 91 { 92 int nregs; 93 94 nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; 95 if (reg < nregs) { 96 *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); 97 return 8; 98 } 99 if (arm_feature(env, ARM_FEATURE_NEON)) { 100 nregs += 16; 101 if (reg < nregs) { 102 uint64_t *q = aa32_vfp_qreg(env, reg - 32); 103 q[0] = ldq_le_p(buf); 104 q[1] = ldq_le_p(buf + 8); 105 return 16; 106 } 107 } 108 switch (reg - nregs) { 109 case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; 110 case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; 111 case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; 112 } 113 return 0; 114 } 115 116 static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) 117 { 118 switch (reg) { 119 case 0 ... 31: 120 /* 128 bit FP register */ 121 { 122 uint64_t *q = aa64_vfp_qreg(env, reg); 123 stq_le_p(buf, q[0]); 124 stq_le_p(buf + 8, q[1]); 125 return 16; 126 } 127 case 32: 128 /* FPSR */ 129 stl_p(buf, vfp_get_fpsr(env)); 130 return 4; 131 case 33: 132 /* FPCR */ 133 stl_p(buf, vfp_get_fpcr(env)); 134 return 4; 135 default: 136 return 0; 137 } 138 } 139 140 static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) 141 { 142 switch (reg) { 143 case 0 ... 31: 144 /* 128 bit FP register */ 145 { 146 uint64_t *q = aa64_vfp_qreg(env, reg); 147 q[0] = ldq_le_p(buf); 148 q[1] = ldq_le_p(buf + 8); 149 return 16; 150 } 151 case 32: 152 /* FPSR */ 153 vfp_set_fpsr(env, ldl_p(buf)); 154 return 4; 155 case 33: 156 /* FPCR */ 157 vfp_set_fpcr(env, ldl_p(buf)); 158 return 4; 159 default: 160 return 0; 161 } 162 } 163 164 static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) 165 { 166 assert(ri->fieldoffset); 167 if (cpreg_field_is_64bit(ri)) { 168 return CPREG_FIELD64(env, ri); 169 } else { 170 return CPREG_FIELD32(env, ri); 171 } 172 } 173 174 static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 175 uint64_t value) 176 { 177 assert(ri->fieldoffset); 178 if (cpreg_field_is_64bit(ri)) { 179 CPREG_FIELD64(env, ri) = value; 180 } else { 181 CPREG_FIELD32(env, ri) = value; 182 } 183 } 184 185 static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) 186 { 187 return (char *)env + ri->fieldoffset; 188 } 189 190 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) 191 { 192 /* Raw read of a coprocessor register (as needed for migration, etc). */ 193 if (ri->type & ARM_CP_CONST) { 194 return ri->resetvalue; 195 } else if (ri->raw_readfn) { 196 return ri->raw_readfn(env, ri); 197 } else if (ri->readfn) { 198 return ri->readfn(env, ri); 199 } else { 200 return raw_read(env, ri); 201 } 202 } 203 204 static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, 205 uint64_t v) 206 { 207 /* Raw write of a coprocessor register (as needed for migration, etc). 208 * Note that constant registers are treated as write-ignored; the 209 * caller should check for success by whether a readback gives the 210 * value written. 211 */ 212 if (ri->type & ARM_CP_CONST) { 213 return; 214 } else if (ri->raw_writefn) { 215 ri->raw_writefn(env, ri, v); 216 } else if (ri->writefn) { 217 ri->writefn(env, ri, v); 218 } else { 219 raw_write(env, ri, v); 220 } 221 } 222 223 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 224 { 225 ARMCPU *cpu = arm_env_get_cpu(env); 226 const ARMCPRegInfo *ri; 227 uint32_t key; 228 229 key = cpu->dyn_xml.cpregs_keys[reg]; 230 ri = get_arm_cp_reginfo(cpu->cp_regs, key); 231 if (ri) { 232 if (cpreg_field_is_64bit(ri)) { 233 return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); 234 } else { 235 return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); 236 } 237 } 238 return 0; 239 } 240 241 static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) 242 { 243 return 0; 244 } 245 246 static bool raw_accessors_invalid(const ARMCPRegInfo *ri) 247 { 248 /* Return true if the regdef would cause an assertion if you called 249 * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a 250 * program bug for it not to have the NO_RAW flag). 251 * NB that returning false here doesn't necessarily mean that calling 252 * read/write_raw_cp_reg() is safe, because we can't distinguish "has 253 * read/write access functions which are safe for raw use" from "has 254 * read/write access functions which have side effects but has forgotten 255 * to provide raw access functions". 256 * The tests here line up with the conditions in read/write_raw_cp_reg() 257 * and assertions in raw_read()/raw_write(). 258 */ 259 if ((ri->type & ARM_CP_CONST) || 260 ri->fieldoffset || 261 ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { 262 return false; 263 } 264 return true; 265 } 266 267 bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) 268 { 269 /* Write the coprocessor state from cpu->env to the (index,value) list. */ 270 int i; 271 bool ok = true; 272 273 for (i = 0; i < cpu->cpreg_array_len; i++) { 274 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 275 const ARMCPRegInfo *ri; 276 uint64_t newval; 277 278 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 279 if (!ri) { 280 ok = false; 281 continue; 282 } 283 if (ri->type & ARM_CP_NO_RAW) { 284 continue; 285 } 286 287 newval = read_raw_cp_reg(&cpu->env, ri); 288 if (kvm_sync) { 289 /* 290 * Only sync if the previous list->cpustate sync succeeded. 291 * Rather than tracking the success/failure state for every 292 * item in the list, we just recheck "does the raw write we must 293 * have made in write_list_to_cpustate() read back OK" here. 294 */ 295 uint64_t oldval = cpu->cpreg_values[i]; 296 297 if (oldval == newval) { 298 continue; 299 } 300 301 write_raw_cp_reg(&cpu->env, ri, oldval); 302 if (read_raw_cp_reg(&cpu->env, ri) != oldval) { 303 continue; 304 } 305 306 write_raw_cp_reg(&cpu->env, ri, newval); 307 } 308 cpu->cpreg_values[i] = newval; 309 } 310 return ok; 311 } 312 313 bool write_list_to_cpustate(ARMCPU *cpu) 314 { 315 int i; 316 bool ok = true; 317 318 for (i = 0; i < cpu->cpreg_array_len; i++) { 319 uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); 320 uint64_t v = cpu->cpreg_values[i]; 321 const ARMCPRegInfo *ri; 322 323 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 324 if (!ri) { 325 ok = false; 326 continue; 327 } 328 if (ri->type & ARM_CP_NO_RAW) { 329 continue; 330 } 331 /* Write value and confirm it reads back as written 332 * (to catch read-only registers and partially read-only 333 * registers where the incoming migration value doesn't match) 334 */ 335 write_raw_cp_reg(&cpu->env, ri, v); 336 if (read_raw_cp_reg(&cpu->env, ri) != v) { 337 ok = false; 338 } 339 } 340 return ok; 341 } 342 343 static void add_cpreg_to_list(gpointer key, gpointer opaque) 344 { 345 ARMCPU *cpu = opaque; 346 uint64_t regidx; 347 const ARMCPRegInfo *ri; 348 349 regidx = *(uint32_t *)key; 350 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 351 352 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 353 cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); 354 /* The value array need not be initialized at this point */ 355 cpu->cpreg_array_len++; 356 } 357 } 358 359 static void count_cpreg(gpointer key, gpointer opaque) 360 { 361 ARMCPU *cpu = opaque; 362 uint64_t regidx; 363 const ARMCPRegInfo *ri; 364 365 regidx = *(uint32_t *)key; 366 ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); 367 368 if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) { 369 cpu->cpreg_array_len++; 370 } 371 } 372 373 static gint cpreg_key_compare(gconstpointer a, gconstpointer b) 374 { 375 uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a); 376 uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b); 377 378 if (aidx > bidx) { 379 return 1; 380 } 381 if (aidx < bidx) { 382 return -1; 383 } 384 return 0; 385 } 386 387 void init_cpreg_list(ARMCPU *cpu) 388 { 389 /* Initialise the cpreg_tuples[] array based on the cp_regs hash. 390 * Note that we require cpreg_tuples[] to be sorted by key ID. 391 */ 392 GList *keys; 393 int arraylen; 394 395 keys = g_hash_table_get_keys(cpu->cp_regs); 396 keys = g_list_sort(keys, cpreg_key_compare); 397 398 cpu->cpreg_array_len = 0; 399 400 g_list_foreach(keys, count_cpreg, cpu); 401 402 arraylen = cpu->cpreg_array_len; 403 cpu->cpreg_indexes = g_new(uint64_t, arraylen); 404 cpu->cpreg_values = g_new(uint64_t, arraylen); 405 cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); 406 cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); 407 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; 408 cpu->cpreg_array_len = 0; 409 410 g_list_foreach(keys, add_cpreg_to_list, cpu); 411 412 assert(cpu->cpreg_array_len == arraylen); 413 414 g_list_free(keys); 415 } 416 417 /* 418 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but 419 * they are accessible when EL3 is using AArch64 regardless of EL3.NS. 420 * 421 * access_el3_aa32ns: Used to check AArch32 register views. 422 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views. 423 */ 424 static CPAccessResult access_el3_aa32ns(CPUARMState *env, 425 const ARMCPRegInfo *ri, 426 bool isread) 427 { 428 bool secure = arm_is_secure_below_el3(env); 429 430 assert(!arm_el_is_aa64(env, 3)); 431 if (secure) { 432 return CP_ACCESS_TRAP_UNCATEGORIZED; 433 } 434 return CP_ACCESS_OK; 435 } 436 437 static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env, 438 const ARMCPRegInfo *ri, 439 bool isread) 440 { 441 if (!arm_el_is_aa64(env, 3)) { 442 return access_el3_aa32ns(env, ri, isread); 443 } 444 return CP_ACCESS_OK; 445 } 446 447 /* Some secure-only AArch32 registers trap to EL3 if used from 448 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). 449 * Note that an access from Secure EL1 can only happen if EL3 is AArch64. 450 * We assume that the .access field is set to PL1_RW. 451 */ 452 static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, 453 const ARMCPRegInfo *ri, 454 bool isread) 455 { 456 if (arm_current_el(env) == 3) { 457 return CP_ACCESS_OK; 458 } 459 if (arm_is_secure_below_el3(env)) { 460 return CP_ACCESS_TRAP_EL3; 461 } 462 /* This will be EL1 NS and EL2 NS, which just UNDEF */ 463 return CP_ACCESS_TRAP_UNCATEGORIZED; 464 } 465 466 /* Check for traps to "powerdown debug" registers, which are controlled 467 * by MDCR.TDOSA 468 */ 469 static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri, 470 bool isread) 471 { 472 int el = arm_current_el(env); 473 bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) || 474 (env->cp15.mdcr_el2 & MDCR_TDE) || 475 (arm_hcr_el2_eff(env) & HCR_TGE); 476 477 if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) { 478 return CP_ACCESS_TRAP_EL2; 479 } 480 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) { 481 return CP_ACCESS_TRAP_EL3; 482 } 483 return CP_ACCESS_OK; 484 } 485 486 /* Check for traps to "debug ROM" registers, which are controlled 487 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3. 488 */ 489 static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri, 490 bool isread) 491 { 492 int el = arm_current_el(env); 493 bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) || 494 (env->cp15.mdcr_el2 & MDCR_TDE) || 495 (arm_hcr_el2_eff(env) & HCR_TGE); 496 497 if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) { 498 return CP_ACCESS_TRAP_EL2; 499 } 500 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 501 return CP_ACCESS_TRAP_EL3; 502 } 503 return CP_ACCESS_OK; 504 } 505 506 /* Check for traps to general debug registers, which are controlled 507 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3. 508 */ 509 static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, 510 bool isread) 511 { 512 int el = arm_current_el(env); 513 bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) || 514 (env->cp15.mdcr_el2 & MDCR_TDE) || 515 (arm_hcr_el2_eff(env) & HCR_TGE); 516 517 if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) { 518 return CP_ACCESS_TRAP_EL2; 519 } 520 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) { 521 return CP_ACCESS_TRAP_EL3; 522 } 523 return CP_ACCESS_OK; 524 } 525 526 /* Check for traps to performance monitor registers, which are controlled 527 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. 528 */ 529 static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, 530 bool isread) 531 { 532 int el = arm_current_el(env); 533 534 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 535 && !arm_is_secure_below_el3(env)) { 536 return CP_ACCESS_TRAP_EL2; 537 } 538 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 539 return CP_ACCESS_TRAP_EL3; 540 } 541 return CP_ACCESS_OK; 542 } 543 544 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 545 { 546 ARMCPU *cpu = arm_env_get_cpu(env); 547 548 raw_write(env, ri, value); 549 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ 550 } 551 552 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 553 { 554 ARMCPU *cpu = arm_env_get_cpu(env); 555 556 if (raw_read(env, ri) != value) { 557 /* Unlike real hardware the qemu TLB uses virtual addresses, 558 * not modified virtual addresses, so this causes a TLB flush. 559 */ 560 tlb_flush(CPU(cpu)); 561 raw_write(env, ri, value); 562 } 563 } 564 565 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 566 uint64_t value) 567 { 568 ARMCPU *cpu = arm_env_get_cpu(env); 569 570 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 571 && !extended_addresses_enabled(env)) { 572 /* For VMSA (when not using the LPAE long descriptor page table 573 * format) this register includes the ASID, so do a TLB flush. 574 * For PMSA it is purely a process ID and no action is needed. 575 */ 576 tlb_flush(CPU(cpu)); 577 } 578 raw_write(env, ri, value); 579 } 580 581 /* IS variants of TLB operations must affect all cores */ 582 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 583 uint64_t value) 584 { 585 CPUState *cs = ENV_GET_CPU(env); 586 587 tlb_flush_all_cpus_synced(cs); 588 } 589 590 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 591 uint64_t value) 592 { 593 CPUState *cs = ENV_GET_CPU(env); 594 595 tlb_flush_all_cpus_synced(cs); 596 } 597 598 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 599 uint64_t value) 600 { 601 CPUState *cs = ENV_GET_CPU(env); 602 603 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 604 } 605 606 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 607 uint64_t value) 608 { 609 CPUState *cs = ENV_GET_CPU(env); 610 611 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 612 } 613 614 /* 615 * Non-IS variants of TLB operations are upgraded to 616 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to 617 * force broadcast of these operations. 618 */ 619 static bool tlb_force_broadcast(CPUARMState *env) 620 { 621 return (env->cp15.hcr_el2 & HCR_FB) && 622 arm_current_el(env) == 1 && arm_is_secure_below_el3(env); 623 } 624 625 static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, 626 uint64_t value) 627 { 628 /* Invalidate all (TLBIALL) */ 629 ARMCPU *cpu = arm_env_get_cpu(env); 630 631 if (tlb_force_broadcast(env)) { 632 tlbiall_is_write(env, NULL, value); 633 return; 634 } 635 636 tlb_flush(CPU(cpu)); 637 } 638 639 static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, 640 uint64_t value) 641 { 642 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 643 ARMCPU *cpu = arm_env_get_cpu(env); 644 645 if (tlb_force_broadcast(env)) { 646 tlbimva_is_write(env, NULL, value); 647 return; 648 } 649 650 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 651 } 652 653 static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, 654 uint64_t value) 655 { 656 /* Invalidate by ASID (TLBIASID) */ 657 ARMCPU *cpu = arm_env_get_cpu(env); 658 659 if (tlb_force_broadcast(env)) { 660 tlbiasid_is_write(env, NULL, value); 661 return; 662 } 663 664 tlb_flush(CPU(cpu)); 665 } 666 667 static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, 668 uint64_t value) 669 { 670 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 671 ARMCPU *cpu = arm_env_get_cpu(env); 672 673 if (tlb_force_broadcast(env)) { 674 tlbimvaa_is_write(env, NULL, value); 675 return; 676 } 677 678 tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK); 679 } 680 681 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 682 uint64_t value) 683 { 684 CPUState *cs = ENV_GET_CPU(env); 685 686 tlb_flush_by_mmuidx(cs, 687 ARMMMUIdxBit_S12NSE1 | 688 ARMMMUIdxBit_S12NSE0 | 689 ARMMMUIdxBit_S2NS); 690 } 691 692 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 693 uint64_t value) 694 { 695 CPUState *cs = ENV_GET_CPU(env); 696 697 tlb_flush_by_mmuidx_all_cpus_synced(cs, 698 ARMMMUIdxBit_S12NSE1 | 699 ARMMMUIdxBit_S12NSE0 | 700 ARMMMUIdxBit_S2NS); 701 } 702 703 static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, 704 uint64_t value) 705 { 706 /* Invalidate by IPA. This has to invalidate any structures that 707 * contain only stage 2 translation information, but does not need 708 * to apply to structures that contain combined stage 1 and stage 2 709 * translation information. 710 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 711 */ 712 CPUState *cs = ENV_GET_CPU(env); 713 uint64_t pageaddr; 714 715 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 716 return; 717 } 718 719 pageaddr = sextract64(value << 12, 0, 40); 720 721 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 722 } 723 724 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 725 uint64_t value) 726 { 727 CPUState *cs = ENV_GET_CPU(env); 728 uint64_t pageaddr; 729 730 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 731 return; 732 } 733 734 pageaddr = sextract64(value << 12, 0, 40); 735 736 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 737 ARMMMUIdxBit_S2NS); 738 } 739 740 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 741 uint64_t value) 742 { 743 CPUState *cs = ENV_GET_CPU(env); 744 745 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 746 } 747 748 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 749 uint64_t value) 750 { 751 CPUState *cs = ENV_GET_CPU(env); 752 753 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 754 } 755 756 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 757 uint64_t value) 758 { 759 CPUState *cs = ENV_GET_CPU(env); 760 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 761 762 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 763 } 764 765 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 766 uint64_t value) 767 { 768 CPUState *cs = ENV_GET_CPU(env); 769 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 770 771 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 772 ARMMMUIdxBit_S1E2); 773 } 774 775 static const ARMCPRegInfo cp_reginfo[] = { 776 /* Define the secure and non-secure FCSE identifier CP registers 777 * separately because there is no secure bank in V8 (no _EL3). This allows 778 * the secure register to be properly reset and migrated. There is also no 779 * v8 EL1 version of the register so the non-secure instance stands alone. 780 */ 781 { .name = "FCSEIDR", 782 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 783 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 784 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), 785 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 786 { .name = "FCSEIDR_S", 787 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, 788 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 789 .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), 790 .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, 791 /* Define the secure and non-secure context identifier CP registers 792 * separately because there is no secure bank in V8 (no _EL3). This allows 793 * the secure register to be properly reset and migrated. In the 794 * non-secure case, the 32-bit register will have reset and migration 795 * disabled during registration as it is handled by the 64-bit instance. 796 */ 797 { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, 798 .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 799 .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, 800 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), 801 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 802 { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, 803 .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, 804 .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, 805 .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), 806 .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, 807 REGINFO_SENTINEL 808 }; 809 810 static const ARMCPRegInfo not_v8_cp_reginfo[] = { 811 /* NB: Some of these registers exist in v8 but with more precise 812 * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). 813 */ 814 /* MMU Domain access control / MPU write buffer control */ 815 { .name = "DACR", 816 .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, 817 .access = PL1_RW, .resetvalue = 0, 818 .writefn = dacr_write, .raw_writefn = raw_write, 819 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 820 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 821 /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. 822 * For v6 and v5, these mappings are overly broad. 823 */ 824 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, 825 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 826 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, 827 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 828 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, 829 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 830 { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, 831 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, 832 /* Cache maintenance ops; some of this space may be overridden later. */ 833 { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 834 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 835 .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, 836 REGINFO_SENTINEL 837 }; 838 839 static const ARMCPRegInfo not_v6_cp_reginfo[] = { 840 /* Not all pre-v6 cores implemented this WFI, so this is slightly 841 * over-broad. 842 */ 843 { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, 844 .access = PL1_W, .type = ARM_CP_WFI }, 845 REGINFO_SENTINEL 846 }; 847 848 static const ARMCPRegInfo not_v7_cp_reginfo[] = { 849 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which 850 * is UNPREDICTABLE; we choose to NOP as most implementations do). 851 */ 852 { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 853 .access = PL1_W, .type = ARM_CP_WFI }, 854 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice 855 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and 856 * OMAPCP will override this space. 857 */ 858 { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, 859 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), 860 .resetvalue = 0 }, 861 { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, 862 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), 863 .resetvalue = 0 }, 864 /* v6 doesn't have the cache ID registers but Linux reads them anyway */ 865 { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, 866 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 867 .resetvalue = 0 }, 868 /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; 869 * implementing it as RAZ means the "debug architecture version" bits 870 * will read as a reserved value, which should cause Linux to not try 871 * to use the debug hardware. 872 */ 873 { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 874 .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 875 /* MMU TLB control. Note that the wildcarding means we cover not just 876 * the unified TLB ops but also the dside/iside/inner-shareable variants. 877 */ 878 { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, 879 .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, 880 .type = ARM_CP_NO_RAW }, 881 { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, 882 .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, 883 .type = ARM_CP_NO_RAW }, 884 { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, 885 .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, 886 .type = ARM_CP_NO_RAW }, 887 { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, 888 .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, 889 .type = ARM_CP_NO_RAW }, 890 { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, 891 .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, 892 { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, 893 .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, 894 REGINFO_SENTINEL 895 }; 896 897 static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, 898 uint64_t value) 899 { 900 uint32_t mask = 0; 901 902 /* In ARMv8 most bits of CPACR_EL1 are RES0. */ 903 if (!arm_feature(env, ARM_FEATURE_V8)) { 904 /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. 905 * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. 906 * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. 907 */ 908 if (arm_feature(env, ARM_FEATURE_VFP)) { 909 /* VFP coprocessor: cp10 & cp11 [23:20] */ 910 mask |= (1 << 31) | (1 << 30) | (0xf << 20); 911 912 if (!arm_feature(env, ARM_FEATURE_NEON)) { 913 /* ASEDIS [31] bit is RAO/WI */ 914 value |= (1 << 31); 915 } 916 917 /* VFPv3 and upwards with NEON implement 32 double precision 918 * registers (D0-D31). 919 */ 920 if (!arm_feature(env, ARM_FEATURE_NEON) || 921 !arm_feature(env, ARM_FEATURE_VFP3)) { 922 /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ 923 value |= (1 << 30); 924 } 925 } 926 value &= mask; 927 } 928 env->cp15.cpacr_el1 = value; 929 } 930 931 static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 932 { 933 /* Call cpacr_write() so that we reset with the correct RAO bits set 934 * for our CPU features. 935 */ 936 cpacr_write(env, ri, 0); 937 } 938 939 static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 940 bool isread) 941 { 942 if (arm_feature(env, ARM_FEATURE_V8)) { 943 /* Check if CPACR accesses are to be trapped to EL2 */ 944 if (arm_current_el(env) == 1 && 945 (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) { 946 return CP_ACCESS_TRAP_EL2; 947 /* Check if CPACR accesses are to be trapped to EL3 */ 948 } else if (arm_current_el(env) < 3 && 949 (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 950 return CP_ACCESS_TRAP_EL3; 951 } 952 } 953 954 return CP_ACCESS_OK; 955 } 956 957 static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, 958 bool isread) 959 { 960 /* Check if CPTR accesses are set to trap to EL3 */ 961 if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) { 962 return CP_ACCESS_TRAP_EL3; 963 } 964 965 return CP_ACCESS_OK; 966 } 967 968 static const ARMCPRegInfo v6_cp_reginfo[] = { 969 /* prefetch by MVA in v6, NOP in v7 */ 970 { .name = "MVA_prefetch", 971 .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, 972 .access = PL1_W, .type = ARM_CP_NOP }, 973 /* We need to break the TB after ISB to execute self-modifying code 974 * correctly and also to take any pending interrupts immediately. 975 * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. 976 */ 977 { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, 978 .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, 979 { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, 980 .access = PL0_W, .type = ARM_CP_NOP }, 981 { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, 982 .access = PL0_W, .type = ARM_CP_NOP }, 983 { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, 984 .access = PL1_RW, 985 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), 986 offsetof(CPUARMState, cp15.ifar_ns) }, 987 .resetvalue = 0, }, 988 /* Watchpoint Fault Address Register : should actually only be present 989 * for 1136, 1176, 11MPCore. 990 */ 991 { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, 992 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, 993 { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, 994 .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, 995 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), 996 .resetfn = cpacr_reset, .writefn = cpacr_write }, 997 REGINFO_SENTINEL 998 }; 999 1000 /* Definitions for the PMU registers */ 1001 #define PMCRN_MASK 0xf800 1002 #define PMCRN_SHIFT 11 1003 #define PMCRLC 0x40 1004 #define PMCRDP 0x10 1005 #define PMCRD 0x8 1006 #define PMCRC 0x4 1007 #define PMCRP 0x2 1008 #define PMCRE 0x1 1009 1010 #define PMXEVTYPER_P 0x80000000 1011 #define PMXEVTYPER_U 0x40000000 1012 #define PMXEVTYPER_NSK 0x20000000 1013 #define PMXEVTYPER_NSU 0x10000000 1014 #define PMXEVTYPER_NSH 0x08000000 1015 #define PMXEVTYPER_M 0x04000000 1016 #define PMXEVTYPER_MT 0x02000000 1017 #define PMXEVTYPER_EVTCOUNT 0x0000ffff 1018 #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \ 1019 PMXEVTYPER_NSU | PMXEVTYPER_NSH | \ 1020 PMXEVTYPER_M | PMXEVTYPER_MT | \ 1021 PMXEVTYPER_EVTCOUNT) 1022 1023 #define PMCCFILTR 0xf8000000 1024 #define PMCCFILTR_M PMXEVTYPER_M 1025 #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M) 1026 1027 static inline uint32_t pmu_num_counters(CPUARMState *env) 1028 { 1029 return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; 1030 } 1031 1032 /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ 1033 static inline uint64_t pmu_counter_mask(CPUARMState *env) 1034 { 1035 return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); 1036 } 1037 1038 typedef struct pm_event { 1039 uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ 1040 /* If the event is supported on this CPU (used to generate PMCEID[01]) */ 1041 bool (*supported)(CPUARMState *); 1042 /* 1043 * Retrieve the current count of the underlying event. The programmed 1044 * counters hold a difference from the return value from this function 1045 */ 1046 uint64_t (*get_count)(CPUARMState *); 1047 /* 1048 * Return how many nanoseconds it will take (at a minimum) for count events 1049 * to occur. A negative value indicates the counter will never overflow, or 1050 * that the counter has otherwise arranged for the overflow bit to be set 1051 * and the PMU interrupt to be raised on overflow. 1052 */ 1053 int64_t (*ns_per_count)(uint64_t); 1054 } pm_event; 1055 1056 static bool event_always_supported(CPUARMState *env) 1057 { 1058 return true; 1059 } 1060 1061 static uint64_t swinc_get_count(CPUARMState *env) 1062 { 1063 /* 1064 * SW_INCR events are written directly to the pmevcntr's by writes to 1065 * PMSWINC, so there is no underlying count maintained by the PMU itself 1066 */ 1067 return 0; 1068 } 1069 1070 static int64_t swinc_ns_per(uint64_t ignored) 1071 { 1072 return -1; 1073 } 1074 1075 /* 1076 * Return the underlying cycle count for the PMU cycle counters. If we're in 1077 * usermode, simply return 0. 1078 */ 1079 static uint64_t cycles_get_count(CPUARMState *env) 1080 { 1081 #ifndef CONFIG_USER_ONLY 1082 return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 1083 ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); 1084 #else 1085 return cpu_get_host_ticks(); 1086 #endif 1087 } 1088 1089 #ifndef CONFIG_USER_ONLY 1090 static int64_t cycles_ns_per(uint64_t cycles) 1091 { 1092 return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; 1093 } 1094 1095 static bool instructions_supported(CPUARMState *env) 1096 { 1097 return use_icount == 1 /* Precise instruction counting */; 1098 } 1099 1100 static uint64_t instructions_get_count(CPUARMState *env) 1101 { 1102 return (uint64_t)cpu_get_icount_raw(); 1103 } 1104 1105 static int64_t instructions_ns_per(uint64_t icount) 1106 { 1107 return cpu_icount_to_ns((int64_t)icount); 1108 } 1109 #endif 1110 1111 static const pm_event pm_events[] = { 1112 { .number = 0x000, /* SW_INCR */ 1113 .supported = event_always_supported, 1114 .get_count = swinc_get_count, 1115 .ns_per_count = swinc_ns_per, 1116 }, 1117 #ifndef CONFIG_USER_ONLY 1118 { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ 1119 .supported = instructions_supported, 1120 .get_count = instructions_get_count, 1121 .ns_per_count = instructions_ns_per, 1122 }, 1123 { .number = 0x011, /* CPU_CYCLES, Cycle */ 1124 .supported = event_always_supported, 1125 .get_count = cycles_get_count, 1126 .ns_per_count = cycles_ns_per, 1127 } 1128 #endif 1129 }; 1130 1131 /* 1132 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of 1133 * events (i.e. the statistical profiling extension), this implementation 1134 * should first be updated to something sparse instead of the current 1135 * supported_event_map[] array. 1136 */ 1137 #define MAX_EVENT_ID 0x11 1138 #define UNSUPPORTED_EVENT UINT16_MAX 1139 static uint16_t supported_event_map[MAX_EVENT_ID + 1]; 1140 1141 /* 1142 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map 1143 * of ARM event numbers to indices in our pm_events array. 1144 * 1145 * Note: Events in the 0x40XX range are not currently supported. 1146 */ 1147 void pmu_init(ARMCPU *cpu) 1148 { 1149 unsigned int i; 1150 1151 /* 1152 * Empty supported_event_map and cpu->pmceid[01] before adding supported 1153 * events to them 1154 */ 1155 for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { 1156 supported_event_map[i] = UNSUPPORTED_EVENT; 1157 } 1158 cpu->pmceid0 = 0; 1159 cpu->pmceid1 = 0; 1160 1161 for (i = 0; i < ARRAY_SIZE(pm_events); i++) { 1162 const pm_event *cnt = &pm_events[i]; 1163 assert(cnt->number <= MAX_EVENT_ID); 1164 /* We do not currently support events in the 0x40xx range */ 1165 assert(cnt->number <= 0x3f); 1166 1167 if (cnt->supported(&cpu->env)) { 1168 supported_event_map[cnt->number] = i; 1169 uint64_t event_mask = 1 << (cnt->number & 0x1f); 1170 if (cnt->number & 0x20) { 1171 cpu->pmceid1 |= event_mask; 1172 } else { 1173 cpu->pmceid0 |= event_mask; 1174 } 1175 } 1176 } 1177 } 1178 1179 /* 1180 * Check at runtime whether a PMU event is supported for the current machine 1181 */ 1182 static bool event_supported(uint16_t number) 1183 { 1184 if (number > MAX_EVENT_ID) { 1185 return false; 1186 } 1187 return supported_event_map[number] != UNSUPPORTED_EVENT; 1188 } 1189 1190 static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, 1191 bool isread) 1192 { 1193 /* Performance monitor registers user accessibility is controlled 1194 * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable 1195 * trapping to EL2 or EL3 for other accesses. 1196 */ 1197 int el = arm_current_el(env); 1198 1199 if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { 1200 return CP_ACCESS_TRAP; 1201 } 1202 if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM) 1203 && !arm_is_secure_below_el3(env)) { 1204 return CP_ACCESS_TRAP_EL2; 1205 } 1206 if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { 1207 return CP_ACCESS_TRAP_EL3; 1208 } 1209 1210 return CP_ACCESS_OK; 1211 } 1212 1213 static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, 1214 const ARMCPRegInfo *ri, 1215 bool isread) 1216 { 1217 /* ER: event counter read trap control */ 1218 if (arm_feature(env, ARM_FEATURE_V8) 1219 && arm_current_el(env) == 0 1220 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 1221 && isread) { 1222 return CP_ACCESS_OK; 1223 } 1224 1225 return pmreg_access(env, ri, isread); 1226 } 1227 1228 static CPAccessResult pmreg_access_swinc(CPUARMState *env, 1229 const ARMCPRegInfo *ri, 1230 bool isread) 1231 { 1232 /* SW: software increment write trap control */ 1233 if (arm_feature(env, ARM_FEATURE_V8) 1234 && arm_current_el(env) == 0 1235 && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 1236 && !isread) { 1237 return CP_ACCESS_OK; 1238 } 1239 1240 return pmreg_access(env, ri, isread); 1241 } 1242 1243 static CPAccessResult pmreg_access_selr(CPUARMState *env, 1244 const ARMCPRegInfo *ri, 1245 bool isread) 1246 { 1247 /* ER: event counter read trap control */ 1248 if (arm_feature(env, ARM_FEATURE_V8) 1249 && arm_current_el(env) == 0 1250 && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { 1251 return CP_ACCESS_OK; 1252 } 1253 1254 return pmreg_access(env, ri, isread); 1255 } 1256 1257 static CPAccessResult pmreg_access_ccntr(CPUARMState *env, 1258 const ARMCPRegInfo *ri, 1259 bool isread) 1260 { 1261 /* CR: cycle counter read trap control */ 1262 if (arm_feature(env, ARM_FEATURE_V8) 1263 && arm_current_el(env) == 0 1264 && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 1265 && isread) { 1266 return CP_ACCESS_OK; 1267 } 1268 1269 return pmreg_access(env, ri, isread); 1270 } 1271 1272 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using 1273 * the current EL, security state, and register configuration. 1274 */ 1275 static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) 1276 { 1277 uint64_t filter; 1278 bool e, p, u, nsk, nsu, nsh, m; 1279 bool enabled, prohibited, filtered; 1280 bool secure = arm_is_secure(env); 1281 int el = arm_current_el(env); 1282 uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; 1283 1284 if (!arm_feature(env, ARM_FEATURE_EL2) || 1285 (counter < hpmn || counter == 31)) { 1286 e = env->cp15.c9_pmcr & PMCRE; 1287 } else { 1288 e = env->cp15.mdcr_el2 & MDCR_HPME; 1289 } 1290 enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); 1291 1292 if (!secure) { 1293 if (el == 2 && (counter < hpmn || counter == 31)) { 1294 prohibited = env->cp15.mdcr_el2 & MDCR_HPMD; 1295 } else { 1296 prohibited = false; 1297 } 1298 } else { 1299 prohibited = arm_feature(env, ARM_FEATURE_EL3) && 1300 (env->cp15.mdcr_el3 & MDCR_SPME); 1301 } 1302 1303 if (prohibited && counter == 31) { 1304 prohibited = env->cp15.c9_pmcr & PMCRDP; 1305 } 1306 1307 if (counter == 31) { 1308 filter = env->cp15.pmccfiltr_el0; 1309 } else { 1310 filter = env->cp15.c14_pmevtyper[counter]; 1311 } 1312 1313 p = filter & PMXEVTYPER_P; 1314 u = filter & PMXEVTYPER_U; 1315 nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); 1316 nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); 1317 nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); 1318 m = arm_el_is_aa64(env, 1) && 1319 arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); 1320 1321 if (el == 0) { 1322 filtered = secure ? u : u != nsu; 1323 } else if (el == 1) { 1324 filtered = secure ? p : p != nsk; 1325 } else if (el == 2) { 1326 filtered = !nsh; 1327 } else { /* EL3 */ 1328 filtered = m != p; 1329 } 1330 1331 if (counter != 31) { 1332 /* 1333 * If not checking PMCCNTR, ensure the counter is setup to an event we 1334 * support 1335 */ 1336 uint16_t event = filter & PMXEVTYPER_EVTCOUNT; 1337 if (!event_supported(event)) { 1338 return false; 1339 } 1340 } 1341 1342 return enabled && !prohibited && !filtered; 1343 } 1344 1345 static void pmu_update_irq(CPUARMState *env) 1346 { 1347 ARMCPU *cpu = arm_env_get_cpu(env); 1348 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1349 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1350 } 1351 1352 /* 1353 * Ensure c15_ccnt is the guest-visible count so that operations such as 1354 * enabling/disabling the counter or filtering, modifying the count itself, 1355 * etc. can be done logically. This is essentially a no-op if the counter is 1356 * not enabled at the time of the call. 1357 */ 1358 void pmccntr_op_start(CPUARMState *env) 1359 { 1360 uint64_t cycles = cycles_get_count(env); 1361 1362 if (pmu_counter_enabled(env, 31)) { 1363 uint64_t eff_cycles = cycles; 1364 if (env->cp15.c9_pmcr & PMCRD) { 1365 /* Increment once every 64 processor clock cycles */ 1366 eff_cycles /= 64; 1367 } 1368 1369 uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; 1370 1371 uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ 1372 1ull << 63 : 1ull << 31; 1373 if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { 1374 env->cp15.c9_pmovsr |= (1 << 31); 1375 pmu_update_irq(env); 1376 } 1377 1378 env->cp15.c15_ccnt = new_pmccntr; 1379 } 1380 env->cp15.c15_ccnt_delta = cycles; 1381 } 1382 1383 /* 1384 * If PMCCNTR is enabled, recalculate the delta between the clock and the 1385 * guest-visible count. A call to pmccntr_op_finish should follow every call to 1386 * pmccntr_op_start. 1387 */ 1388 void pmccntr_op_finish(CPUARMState *env) 1389 { 1390 if (pmu_counter_enabled(env, 31)) { 1391 #ifndef CONFIG_USER_ONLY 1392 /* Calculate when the counter will next overflow */ 1393 uint64_t remaining_cycles = -env->cp15.c15_ccnt; 1394 if (!(env->cp15.c9_pmcr & PMCRLC)) { 1395 remaining_cycles = (uint32_t)remaining_cycles; 1396 } 1397 int64_t overflow_in = cycles_ns_per(remaining_cycles); 1398 1399 if (overflow_in > 0) { 1400 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1401 overflow_in; 1402 ARMCPU *cpu = arm_env_get_cpu(env); 1403 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1404 } 1405 #endif 1406 1407 uint64_t prev_cycles = env->cp15.c15_ccnt_delta; 1408 if (env->cp15.c9_pmcr & PMCRD) { 1409 /* Increment once every 64 processor clock cycles */ 1410 prev_cycles /= 64; 1411 } 1412 env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; 1413 } 1414 } 1415 1416 static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) 1417 { 1418 1419 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1420 uint64_t count = 0; 1421 if (event_supported(event)) { 1422 uint16_t event_idx = supported_event_map[event]; 1423 count = pm_events[event_idx].get_count(env); 1424 } 1425 1426 if (pmu_counter_enabled(env, counter)) { 1427 uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; 1428 1429 if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) { 1430 env->cp15.c9_pmovsr |= (1 << counter); 1431 pmu_update_irq(env); 1432 } 1433 env->cp15.c14_pmevcntr[counter] = new_pmevcntr; 1434 } 1435 env->cp15.c14_pmevcntr_delta[counter] = count; 1436 } 1437 1438 static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) 1439 { 1440 if (pmu_counter_enabled(env, counter)) { 1441 #ifndef CONFIG_USER_ONLY 1442 uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; 1443 uint16_t event_idx = supported_event_map[event]; 1444 uint64_t delta = UINT32_MAX - 1445 (uint32_t)env->cp15.c14_pmevcntr[counter] + 1; 1446 int64_t overflow_in = pm_events[event_idx].ns_per_count(delta); 1447 1448 if (overflow_in > 0) { 1449 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1450 overflow_in; 1451 ARMCPU *cpu = arm_env_get_cpu(env); 1452 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1453 } 1454 #endif 1455 1456 env->cp15.c14_pmevcntr_delta[counter] -= 1457 env->cp15.c14_pmevcntr[counter]; 1458 } 1459 } 1460 1461 void pmu_op_start(CPUARMState *env) 1462 { 1463 unsigned int i; 1464 pmccntr_op_start(env); 1465 for (i = 0; i < pmu_num_counters(env); i++) { 1466 pmevcntr_op_start(env, i); 1467 } 1468 } 1469 1470 void pmu_op_finish(CPUARMState *env) 1471 { 1472 unsigned int i; 1473 pmccntr_op_finish(env); 1474 for (i = 0; i < pmu_num_counters(env); i++) { 1475 pmevcntr_op_finish(env, i); 1476 } 1477 } 1478 1479 void pmu_pre_el_change(ARMCPU *cpu, void *ignored) 1480 { 1481 pmu_op_start(&cpu->env); 1482 } 1483 1484 void pmu_post_el_change(ARMCPU *cpu, void *ignored) 1485 { 1486 pmu_op_finish(&cpu->env); 1487 } 1488 1489 void arm_pmu_timer_cb(void *opaque) 1490 { 1491 ARMCPU *cpu = opaque; 1492 1493 /* 1494 * Update all the counter values based on the current underlying counts, 1495 * triggering interrupts to be raised, if necessary. pmu_op_finish() also 1496 * has the effect of setting the cpu->pmu_timer to the next earliest time a 1497 * counter may expire. 1498 */ 1499 pmu_op_start(&cpu->env); 1500 pmu_op_finish(&cpu->env); 1501 } 1502 1503 static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1504 uint64_t value) 1505 { 1506 pmu_op_start(env); 1507 1508 if (value & PMCRC) { 1509 /* The counter has been reset */ 1510 env->cp15.c15_ccnt = 0; 1511 } 1512 1513 if (value & PMCRP) { 1514 unsigned int i; 1515 for (i = 0; i < pmu_num_counters(env); i++) { 1516 env->cp15.c14_pmevcntr[i] = 0; 1517 } 1518 } 1519 1520 /* only the DP, X, D and E bits are writable */ 1521 env->cp15.c9_pmcr &= ~0x39; 1522 env->cp15.c9_pmcr |= (value & 0x39); 1523 1524 pmu_op_finish(env); 1525 } 1526 1527 static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, 1528 uint64_t value) 1529 { 1530 unsigned int i; 1531 for (i = 0; i < pmu_num_counters(env); i++) { 1532 /* Increment a counter's count iff: */ 1533 if ((value & (1 << i)) && /* counter's bit is set */ 1534 /* counter is enabled and not filtered */ 1535 pmu_counter_enabled(env, i) && 1536 /* counter is SW_INCR */ 1537 (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { 1538 pmevcntr_op_start(env, i); 1539 1540 /* 1541 * Detect if this write causes an overflow since we can't predict 1542 * PMSWINC overflows like we can for other events 1543 */ 1544 uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; 1545 1546 if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { 1547 env->cp15.c9_pmovsr |= (1 << i); 1548 pmu_update_irq(env); 1549 } 1550 1551 env->cp15.c14_pmevcntr[i] = new_pmswinc; 1552 1553 pmevcntr_op_finish(env, i); 1554 } 1555 } 1556 } 1557 1558 static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1559 { 1560 uint64_t ret; 1561 pmccntr_op_start(env); 1562 ret = env->cp15.c15_ccnt; 1563 pmccntr_op_finish(env); 1564 return ret; 1565 } 1566 1567 static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1568 uint64_t value) 1569 { 1570 /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and 1571 * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the 1572 * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are 1573 * accessed. 1574 */ 1575 env->cp15.c9_pmselr = value & 0x1f; 1576 } 1577 1578 static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1579 uint64_t value) 1580 { 1581 pmccntr_op_start(env); 1582 env->cp15.c15_ccnt = value; 1583 pmccntr_op_finish(env); 1584 } 1585 1586 static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, 1587 uint64_t value) 1588 { 1589 uint64_t cur_val = pmccntr_read(env, NULL); 1590 1591 pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); 1592 } 1593 1594 static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1595 uint64_t value) 1596 { 1597 pmccntr_op_start(env); 1598 env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; 1599 pmccntr_op_finish(env); 1600 } 1601 1602 static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, 1603 uint64_t value) 1604 { 1605 pmccntr_op_start(env); 1606 /* M is not accessible from AArch32 */ 1607 env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | 1608 (value & PMCCFILTR); 1609 pmccntr_op_finish(env); 1610 } 1611 1612 static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) 1613 { 1614 /* M is not visible in AArch32 */ 1615 return env->cp15.pmccfiltr_el0 & PMCCFILTR; 1616 } 1617 1618 static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1619 uint64_t value) 1620 { 1621 value &= pmu_counter_mask(env); 1622 env->cp15.c9_pmcnten |= value; 1623 } 1624 1625 static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1626 uint64_t value) 1627 { 1628 value &= pmu_counter_mask(env); 1629 env->cp15.c9_pmcnten &= ~value; 1630 } 1631 1632 static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1633 uint64_t value) 1634 { 1635 value &= pmu_counter_mask(env); 1636 env->cp15.c9_pmovsr &= ~value; 1637 pmu_update_irq(env); 1638 } 1639 1640 static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1641 uint64_t value) 1642 { 1643 value &= pmu_counter_mask(env); 1644 env->cp15.c9_pmovsr |= value; 1645 pmu_update_irq(env); 1646 } 1647 1648 static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1649 uint64_t value, const uint8_t counter) 1650 { 1651 if (counter == 31) { 1652 pmccfiltr_write(env, ri, value); 1653 } else if (counter < pmu_num_counters(env)) { 1654 pmevcntr_op_start(env, counter); 1655 1656 /* 1657 * If this counter's event type is changing, store the current 1658 * underlying count for the new type in c14_pmevcntr_delta[counter] so 1659 * pmevcntr_op_finish has the correct baseline when it converts back to 1660 * a delta. 1661 */ 1662 uint16_t old_event = env->cp15.c14_pmevtyper[counter] & 1663 PMXEVTYPER_EVTCOUNT; 1664 uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; 1665 if (old_event != new_event) { 1666 uint64_t count = 0; 1667 if (event_supported(new_event)) { 1668 uint16_t event_idx = supported_event_map[new_event]; 1669 count = pm_events[event_idx].get_count(env); 1670 } 1671 env->cp15.c14_pmevcntr_delta[counter] = count; 1672 } 1673 1674 env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; 1675 pmevcntr_op_finish(env, counter); 1676 } 1677 /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when 1678 * PMSELR value is equal to or greater than the number of implemented 1679 * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. 1680 */ 1681 } 1682 1683 static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, 1684 const uint8_t counter) 1685 { 1686 if (counter == 31) { 1687 return env->cp15.pmccfiltr_el0; 1688 } else if (counter < pmu_num_counters(env)) { 1689 return env->cp15.c14_pmevtyper[counter]; 1690 } else { 1691 /* 1692 * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER 1693 * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). 1694 */ 1695 return 0; 1696 } 1697 } 1698 1699 static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1700 uint64_t value) 1701 { 1702 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1703 pmevtyper_write(env, ri, value, counter); 1704 } 1705 1706 static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1707 uint64_t value) 1708 { 1709 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1710 env->cp15.c14_pmevtyper[counter] = value; 1711 1712 /* 1713 * pmevtyper_rawwrite is called between a pair of pmu_op_start and 1714 * pmu_op_finish calls when loading saved state for a migration. Because 1715 * we're potentially updating the type of event here, the value written to 1716 * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a 1717 * different counter type. Therefore, we need to set this value to the 1718 * current count for the counter type we're writing so that pmu_op_finish 1719 * has the correct count for its calculation. 1720 */ 1721 uint16_t event = value & PMXEVTYPER_EVTCOUNT; 1722 if (event_supported(event)) { 1723 uint16_t event_idx = supported_event_map[event]; 1724 env->cp15.c14_pmevcntr_delta[counter] = 1725 pm_events[event_idx].get_count(env); 1726 } 1727 } 1728 1729 static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1730 { 1731 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1732 return pmevtyper_read(env, ri, counter); 1733 } 1734 1735 static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, 1736 uint64_t value) 1737 { 1738 pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); 1739 } 1740 1741 static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) 1742 { 1743 return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); 1744 } 1745 1746 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1747 uint64_t value, uint8_t counter) 1748 { 1749 if (counter < pmu_num_counters(env)) { 1750 pmevcntr_op_start(env, counter); 1751 env->cp15.c14_pmevcntr[counter] = value; 1752 pmevcntr_op_finish(env, counter); 1753 } 1754 /* 1755 * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1756 * are CONSTRAINED UNPREDICTABLE. 1757 */ 1758 } 1759 1760 static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, 1761 uint8_t counter) 1762 { 1763 if (counter < pmu_num_counters(env)) { 1764 uint64_t ret; 1765 pmevcntr_op_start(env, counter); 1766 ret = env->cp15.c14_pmevcntr[counter]; 1767 pmevcntr_op_finish(env, counter); 1768 return ret; 1769 } else { 1770 /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR 1771 * are CONSTRAINED UNPREDICTABLE. */ 1772 return 0; 1773 } 1774 } 1775 1776 static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, 1777 uint64_t value) 1778 { 1779 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1780 pmevcntr_write(env, ri, value, counter); 1781 } 1782 1783 static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) 1784 { 1785 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1786 return pmevcntr_read(env, ri, counter); 1787 } 1788 1789 static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, 1790 uint64_t value) 1791 { 1792 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1793 assert(counter < pmu_num_counters(env)); 1794 env->cp15.c14_pmevcntr[counter] = value; 1795 pmevcntr_write(env, ri, value, counter); 1796 } 1797 1798 static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) 1799 { 1800 uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); 1801 assert(counter < pmu_num_counters(env)); 1802 return env->cp15.c14_pmevcntr[counter]; 1803 } 1804 1805 static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1806 uint64_t value) 1807 { 1808 pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); 1809 } 1810 1811 static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1812 { 1813 return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); 1814 } 1815 1816 static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1817 uint64_t value) 1818 { 1819 if (arm_feature(env, ARM_FEATURE_V8)) { 1820 env->cp15.c9_pmuserenr = value & 0xf; 1821 } else { 1822 env->cp15.c9_pmuserenr = value & 1; 1823 } 1824 } 1825 1826 static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, 1827 uint64_t value) 1828 { 1829 /* We have no event counters so only the C bit can be changed */ 1830 value &= pmu_counter_mask(env); 1831 env->cp15.c9_pminten |= value; 1832 pmu_update_irq(env); 1833 } 1834 1835 static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1836 uint64_t value) 1837 { 1838 value &= pmu_counter_mask(env); 1839 env->cp15.c9_pminten &= ~value; 1840 pmu_update_irq(env); 1841 } 1842 1843 static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, 1844 uint64_t value) 1845 { 1846 /* Note that even though the AArch64 view of this register has bits 1847 * [10:0] all RES0 we can only mask the bottom 5, to comply with the 1848 * architectural requirements for bits which are RES0 only in some 1849 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 1850 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) 1851 */ 1852 raw_write(env, ri, value & ~0x1FULL); 1853 } 1854 1855 static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 1856 { 1857 /* Begin with base v8.0 state. */ 1858 uint32_t valid_mask = 0x3fff; 1859 ARMCPU *cpu = arm_env_get_cpu(env); 1860 1861 if (arm_el_is_aa64(env, 3)) { 1862 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ 1863 valid_mask &= ~SCR_NET; 1864 } else { 1865 valid_mask &= ~(SCR_RW | SCR_ST); 1866 } 1867 1868 if (!arm_feature(env, ARM_FEATURE_EL2)) { 1869 valid_mask &= ~SCR_HCE; 1870 1871 /* On ARMv7, SMD (or SCD as it is called in v7) is only 1872 * supported if EL2 exists. The bit is UNK/SBZP when 1873 * EL2 is unavailable. In QEMU ARMv7, we force it to always zero 1874 * when EL2 is unavailable. 1875 * On ARMv8, this bit is always available. 1876 */ 1877 if (arm_feature(env, ARM_FEATURE_V7) && 1878 !arm_feature(env, ARM_FEATURE_V8)) { 1879 valid_mask &= ~SCR_SMD; 1880 } 1881 } 1882 if (cpu_isar_feature(aa64_lor, cpu)) { 1883 valid_mask |= SCR_TLOR; 1884 } 1885 if (cpu_isar_feature(aa64_pauth, cpu)) { 1886 valid_mask |= SCR_API | SCR_APK; 1887 } 1888 1889 /* Clear all-context RES0 bits. */ 1890 value &= valid_mask; 1891 raw_write(env, ri, value); 1892 } 1893 1894 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1895 { 1896 ARMCPU *cpu = arm_env_get_cpu(env); 1897 1898 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1899 * bank 1900 */ 1901 uint32_t index = A32_BANKED_REG_GET(env, csselr, 1902 ri->secure & ARM_CP_SECSTATE_S); 1903 1904 return cpu->ccsidr[index]; 1905 } 1906 1907 static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, 1908 uint64_t value) 1909 { 1910 raw_write(env, ri, value & 0xf); 1911 } 1912 1913 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1914 { 1915 CPUState *cs = ENV_GET_CPU(env); 1916 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 1917 uint64_t ret = 0; 1918 1919 if (hcr_el2 & HCR_IMO) { 1920 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { 1921 ret |= CPSR_I; 1922 } 1923 } else { 1924 if (cs->interrupt_request & CPU_INTERRUPT_HARD) { 1925 ret |= CPSR_I; 1926 } 1927 } 1928 1929 if (hcr_el2 & HCR_FMO) { 1930 if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { 1931 ret |= CPSR_F; 1932 } 1933 } else { 1934 if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { 1935 ret |= CPSR_F; 1936 } 1937 } 1938 1939 /* External aborts are not possible in QEMU so A bit is always clear */ 1940 return ret; 1941 } 1942 1943 static const ARMCPRegInfo v7_cp_reginfo[] = { 1944 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ 1945 { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, 1946 .access = PL1_W, .type = ARM_CP_NOP }, 1947 /* Performance monitors are implementation defined in v7, 1948 * but with an ARM recommended set of registers, which we 1949 * follow. 1950 * 1951 * Performance registers fall into three categories: 1952 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) 1953 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) 1954 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) 1955 * For the cases controlled by PMUSERENR we must set .access to PL0_RW 1956 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. 1957 */ 1958 { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, 1959 .access = PL0_RW, .type = ARM_CP_ALIAS, 1960 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1961 .writefn = pmcntenset_write, 1962 .accessfn = pmreg_access, 1963 .raw_writefn = raw_write }, 1964 { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, 1965 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, 1966 .access = PL0_RW, .accessfn = pmreg_access, 1967 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, 1968 .writefn = pmcntenset_write, .raw_writefn = raw_write }, 1969 { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, 1970 .access = PL0_RW, 1971 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), 1972 .accessfn = pmreg_access, 1973 .writefn = pmcntenclr_write, 1974 .type = ARM_CP_ALIAS }, 1975 { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, 1976 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, 1977 .access = PL0_RW, .accessfn = pmreg_access, 1978 .type = ARM_CP_ALIAS, 1979 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), 1980 .writefn = pmcntenclr_write }, 1981 { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, 1982 .access = PL0_RW, .type = ARM_CP_IO, 1983 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 1984 .accessfn = pmreg_access, 1985 .writefn = pmovsr_write, 1986 .raw_writefn = raw_write }, 1987 { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, 1988 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, 1989 .access = PL0_RW, .accessfn = pmreg_access, 1990 .type = ARM_CP_ALIAS | ARM_CP_IO, 1991 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 1992 .writefn = pmovsr_write, 1993 .raw_writefn = raw_write }, 1994 { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, 1995 .access = PL0_W, .accessfn = pmreg_access_swinc, 1996 .type = ARM_CP_NO_RAW | ARM_CP_IO, 1997 .writefn = pmswinc_write }, 1998 { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, 1999 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, 2000 .access = PL0_W, .accessfn = pmreg_access_swinc, 2001 .type = ARM_CP_NO_RAW | ARM_CP_IO, 2002 .writefn = pmswinc_write }, 2003 { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, 2004 .access = PL0_RW, .type = ARM_CP_ALIAS, 2005 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), 2006 .accessfn = pmreg_access_selr, .writefn = pmselr_write, 2007 .raw_writefn = raw_write}, 2008 { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, 2009 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, 2010 .access = PL0_RW, .accessfn = pmreg_access_selr, 2011 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), 2012 .writefn = pmselr_write, .raw_writefn = raw_write, }, 2013 { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, 2014 .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, 2015 .readfn = pmccntr_read, .writefn = pmccntr_write32, 2016 .accessfn = pmreg_access_ccntr }, 2017 { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, 2018 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, 2019 .access = PL0_RW, .accessfn = pmreg_access_ccntr, 2020 .type = ARM_CP_IO, 2021 .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), 2022 .readfn = pmccntr_read, .writefn = pmccntr_write, 2023 .raw_readfn = raw_read, .raw_writefn = raw_write, }, 2024 { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, 2025 .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, 2026 .access = PL0_RW, .accessfn = pmreg_access, 2027 .type = ARM_CP_ALIAS | ARM_CP_IO, 2028 .resetvalue = 0, }, 2029 { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, 2030 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, 2031 .writefn = pmccfiltr_write, .raw_writefn = raw_write, 2032 .access = PL0_RW, .accessfn = pmreg_access, 2033 .type = ARM_CP_IO, 2034 .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), 2035 .resetvalue = 0, }, 2036 { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, 2037 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2038 .accessfn = pmreg_access, 2039 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2040 { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, 2041 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, 2042 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2043 .accessfn = pmreg_access, 2044 .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, 2045 { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, 2046 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2047 .accessfn = pmreg_access_xevcntr, 2048 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2049 { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, 2050 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, 2051 .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2052 .accessfn = pmreg_access_xevcntr, 2053 .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, 2054 { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, 2055 .access = PL0_R | PL1_RW, .accessfn = access_tpm, 2056 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), 2057 .resetvalue = 0, 2058 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2059 { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, 2060 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, 2061 .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, 2062 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), 2063 .resetvalue = 0, 2064 .writefn = pmuserenr_write, .raw_writefn = raw_write }, 2065 { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, 2066 .access = PL1_RW, .accessfn = access_tpm, 2067 .type = ARM_CP_ALIAS | ARM_CP_IO, 2068 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), 2069 .resetvalue = 0, 2070 .writefn = pmintenset_write, .raw_writefn = raw_write }, 2071 { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, 2072 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, 2073 .access = PL1_RW, .accessfn = access_tpm, 2074 .type = ARM_CP_IO, 2075 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2076 .writefn = pmintenset_write, .raw_writefn = raw_write, 2077 .resetvalue = 0x0 }, 2078 { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, 2079 .access = PL1_RW, .accessfn = access_tpm, 2080 .type = ARM_CP_ALIAS | ARM_CP_IO, 2081 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2082 .writefn = pmintenclr_write, }, 2083 { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, 2084 .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, 2085 .access = PL1_RW, .accessfn = access_tpm, 2086 .type = ARM_CP_ALIAS | ARM_CP_IO, 2087 .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), 2088 .writefn = pmintenclr_write }, 2089 { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, 2090 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, 2091 .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, 2092 { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, 2093 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, 2094 .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, 2095 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), 2096 offsetof(CPUARMState, cp15.csselr_ns) } }, 2097 /* Auxiliary ID register: this actually has an IMPDEF value but for now 2098 * just RAZ for all cores: 2099 */ 2100 { .name = "AIDR", .state = ARM_CP_STATE_BOTH, 2101 .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, 2102 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 2103 /* Auxiliary fault status registers: these also are IMPDEF, and we 2104 * choose to RAZ/WI for all cores. 2105 */ 2106 { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, 2107 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, 2108 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 2109 { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, 2110 .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, 2111 .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 2112 /* MAIR can just read-as-written because we don't implement caches 2113 * and so don't need to care about memory attributes. 2114 */ 2115 { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, 2116 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, 2117 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), 2118 .resetvalue = 0 }, 2119 { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, 2120 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, 2121 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), 2122 .resetvalue = 0 }, 2123 /* For non-long-descriptor page tables these are PRRR and NMRR; 2124 * regardless they still act as reads-as-written for QEMU. 2125 */ 2126 /* MAIR0/1 are defined separately from their 64-bit counterpart which 2127 * allows them to assign the correct fieldoffset based on the endianness 2128 * handled in the field definitions. 2129 */ 2130 { .name = "MAIR0", .state = ARM_CP_STATE_AA32, 2131 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, 2132 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), 2133 offsetof(CPUARMState, cp15.mair0_ns) }, 2134 .resetfn = arm_cp_reset_ignore }, 2135 { .name = "MAIR1", .state = ARM_CP_STATE_AA32, 2136 .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW, 2137 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), 2138 offsetof(CPUARMState, cp15.mair1_ns) }, 2139 .resetfn = arm_cp_reset_ignore }, 2140 { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, 2141 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, 2142 .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, 2143 /* 32 bit ITLB invalidates */ 2144 { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, 2145 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2146 { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, 2147 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2148 { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, 2149 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2150 /* 32 bit DTLB invalidates */ 2151 { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, 2152 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2153 { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, 2154 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2155 { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, 2156 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2157 /* 32 bit TLB invalidates */ 2158 { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 2159 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write }, 2160 { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 2161 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 2162 { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 2163 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write }, 2164 { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 2165 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 2166 REGINFO_SENTINEL 2167 }; 2168 2169 static const ARMCPRegInfo v7mp_cp_reginfo[] = { 2170 /* 32 bit TLB invalidates, Inner Shareable */ 2171 { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 2172 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write }, 2173 { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 2174 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 2175 { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 2176 .type = ARM_CP_NO_RAW, .access = PL1_W, 2177 .writefn = tlbiasid_is_write }, 2178 { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 2179 .type = ARM_CP_NO_RAW, .access = PL1_W, 2180 .writefn = tlbimvaa_is_write }, 2181 REGINFO_SENTINEL 2182 }; 2183 2184 static const ARMCPRegInfo pmovsset_cp_reginfo[] = { 2185 /* PMOVSSET is not implemented in v7 before v7ve */ 2186 { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, 2187 .access = PL0_RW, .accessfn = pmreg_access, 2188 .type = ARM_CP_ALIAS | ARM_CP_IO, 2189 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), 2190 .writefn = pmovsset_write, 2191 .raw_writefn = raw_write }, 2192 { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, 2193 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, 2194 .access = PL0_RW, .accessfn = pmreg_access, 2195 .type = ARM_CP_ALIAS | ARM_CP_IO, 2196 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), 2197 .writefn = pmovsset_write, 2198 .raw_writefn = raw_write }, 2199 REGINFO_SENTINEL 2200 }; 2201 2202 static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, 2203 uint64_t value) 2204 { 2205 value &= 1; 2206 env->teecr = value; 2207 } 2208 2209 static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, 2210 bool isread) 2211 { 2212 if (arm_current_el(env) == 0 && (env->teecr & 1)) { 2213 return CP_ACCESS_TRAP; 2214 } 2215 return CP_ACCESS_OK; 2216 } 2217 2218 static const ARMCPRegInfo t2ee_cp_reginfo[] = { 2219 { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, 2220 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), 2221 .resetvalue = 0, 2222 .writefn = teecr_write }, 2223 { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, 2224 .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), 2225 .accessfn = teehbr_access, .resetvalue = 0 }, 2226 REGINFO_SENTINEL 2227 }; 2228 2229 static const ARMCPRegInfo v6k_cp_reginfo[] = { 2230 { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, 2231 .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, 2232 .access = PL0_RW, 2233 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, 2234 { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, 2235 .access = PL0_RW, 2236 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), 2237 offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, 2238 .resetfn = arm_cp_reset_ignore }, 2239 { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, 2240 .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, 2241 .access = PL0_R|PL1_W, 2242 .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), 2243 .resetvalue = 0}, 2244 { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, 2245 .access = PL0_R|PL1_W, 2246 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), 2247 offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, 2248 .resetfn = arm_cp_reset_ignore }, 2249 { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, 2250 .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, 2251 .access = PL1_RW, 2252 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, 2253 { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, 2254 .access = PL1_RW, 2255 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), 2256 offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, 2257 .resetvalue = 0 }, 2258 REGINFO_SENTINEL 2259 }; 2260 2261 #ifndef CONFIG_USER_ONLY 2262 2263 static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, 2264 bool isread) 2265 { 2266 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. 2267 * Writable only at the highest implemented exception level. 2268 */ 2269 int el = arm_current_el(env); 2270 2271 switch (el) { 2272 case 0: 2273 if (!extract32(env->cp15.c14_cntkctl, 0, 2)) { 2274 return CP_ACCESS_TRAP; 2275 } 2276 break; 2277 case 1: 2278 if (!isread && ri->state == ARM_CP_STATE_AA32 && 2279 arm_is_secure_below_el3(env)) { 2280 /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ 2281 return CP_ACCESS_TRAP_UNCATEGORIZED; 2282 } 2283 break; 2284 case 2: 2285 case 3: 2286 break; 2287 } 2288 2289 if (!isread && el < arm_highest_el(env)) { 2290 return CP_ACCESS_TRAP_UNCATEGORIZED; 2291 } 2292 2293 return CP_ACCESS_OK; 2294 } 2295 2296 static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, 2297 bool isread) 2298 { 2299 unsigned int cur_el = arm_current_el(env); 2300 bool secure = arm_is_secure(env); 2301 2302 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */ 2303 if (cur_el == 0 && 2304 !extract32(env->cp15.c14_cntkctl, timeridx, 1)) { 2305 return CP_ACCESS_TRAP; 2306 } 2307 2308 if (arm_feature(env, ARM_FEATURE_EL2) && 2309 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 2310 !extract32(env->cp15.cnthctl_el2, 0, 1)) { 2311 return CP_ACCESS_TRAP_EL2; 2312 } 2313 return CP_ACCESS_OK; 2314 } 2315 2316 static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, 2317 bool isread) 2318 { 2319 unsigned int cur_el = arm_current_el(env); 2320 bool secure = arm_is_secure(env); 2321 2322 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if 2323 * EL0[PV]TEN is zero. 2324 */ 2325 if (cur_el == 0 && 2326 !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { 2327 return CP_ACCESS_TRAP; 2328 } 2329 2330 if (arm_feature(env, ARM_FEATURE_EL2) && 2331 timeridx == GTIMER_PHYS && !secure && cur_el < 2 && 2332 !extract32(env->cp15.cnthctl_el2, 1, 1)) { 2333 return CP_ACCESS_TRAP_EL2; 2334 } 2335 return CP_ACCESS_OK; 2336 } 2337 2338 static CPAccessResult gt_pct_access(CPUARMState *env, 2339 const ARMCPRegInfo *ri, 2340 bool isread) 2341 { 2342 return gt_counter_access(env, GTIMER_PHYS, isread); 2343 } 2344 2345 static CPAccessResult gt_vct_access(CPUARMState *env, 2346 const ARMCPRegInfo *ri, 2347 bool isread) 2348 { 2349 return gt_counter_access(env, GTIMER_VIRT, isread); 2350 } 2351 2352 static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2353 bool isread) 2354 { 2355 return gt_timer_access(env, GTIMER_PHYS, isread); 2356 } 2357 2358 static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, 2359 bool isread) 2360 { 2361 return gt_timer_access(env, GTIMER_VIRT, isread); 2362 } 2363 2364 static CPAccessResult gt_stimer_access(CPUARMState *env, 2365 const ARMCPRegInfo *ri, 2366 bool isread) 2367 { 2368 /* The AArch64 register view of the secure physical timer is 2369 * always accessible from EL3, and configurably accessible from 2370 * Secure EL1. 2371 */ 2372 switch (arm_current_el(env)) { 2373 case 1: 2374 if (!arm_is_secure(env)) { 2375 return CP_ACCESS_TRAP; 2376 } 2377 if (!(env->cp15.scr_el3 & SCR_ST)) { 2378 return CP_ACCESS_TRAP_EL3; 2379 } 2380 return CP_ACCESS_OK; 2381 case 0: 2382 case 2: 2383 return CP_ACCESS_TRAP; 2384 case 3: 2385 return CP_ACCESS_OK; 2386 default: 2387 g_assert_not_reached(); 2388 } 2389 } 2390 2391 static uint64_t gt_get_countervalue(CPUARMState *env) 2392 { 2393 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE; 2394 } 2395 2396 static void gt_recalc_timer(ARMCPU *cpu, int timeridx) 2397 { 2398 ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; 2399 2400 if (gt->ctl & 1) { 2401 /* Timer enabled: calculate and set current ISTATUS, irq, and 2402 * reset timer to when ISTATUS next has to change 2403 */ 2404 uint64_t offset = timeridx == GTIMER_VIRT ? 2405 cpu->env.cp15.cntvoff_el2 : 0; 2406 uint64_t count = gt_get_countervalue(&cpu->env); 2407 /* Note that this must be unsigned 64 bit arithmetic: */ 2408 int istatus = count - offset >= gt->cval; 2409 uint64_t nexttick; 2410 int irqstate; 2411 2412 gt->ctl = deposit32(gt->ctl, 2, 1, istatus); 2413 2414 irqstate = (istatus && !(gt->ctl & 2)); 2415 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2416 2417 if (istatus) { 2418 /* Next transition is when count rolls back over to zero */ 2419 nexttick = UINT64_MAX; 2420 } else { 2421 /* Next transition is when we hit cval */ 2422 nexttick = gt->cval + offset; 2423 } 2424 /* Note that the desired next expiry time might be beyond the 2425 * signed-64-bit range of a QEMUTimer -- in this case we just 2426 * set the timer for as far in the future as possible. When the 2427 * timer expires we will reset the timer for any remaining period. 2428 */ 2429 if (nexttick > INT64_MAX / GTIMER_SCALE) { 2430 nexttick = INT64_MAX / GTIMER_SCALE; 2431 } 2432 timer_mod(cpu->gt_timer[timeridx], nexttick); 2433 trace_arm_gt_recalc(timeridx, irqstate, nexttick); 2434 } else { 2435 /* Timer disabled: ISTATUS and timer output always clear */ 2436 gt->ctl &= ~4; 2437 qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); 2438 timer_del(cpu->gt_timer[timeridx]); 2439 trace_arm_gt_recalc_disabled(timeridx); 2440 } 2441 } 2442 2443 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2444 int timeridx) 2445 { 2446 ARMCPU *cpu = arm_env_get_cpu(env); 2447 2448 timer_del(cpu->gt_timer[timeridx]); 2449 } 2450 2451 static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2452 { 2453 return gt_get_countervalue(env); 2454 } 2455 2456 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2457 { 2458 return gt_get_countervalue(env) - env->cp15.cntvoff_el2; 2459 } 2460 2461 static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2462 int timeridx, 2463 uint64_t value) 2464 { 2465 trace_arm_gt_cval_write(timeridx, value); 2466 env->cp15.c14_timer[timeridx].cval = value; 2467 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 2468 } 2469 2470 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, 2471 int timeridx) 2472 { 2473 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 2474 2475 return (uint32_t)(env->cp15.c14_timer[timeridx].cval - 2476 (gt_get_countervalue(env) - offset)); 2477 } 2478 2479 static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2480 int timeridx, 2481 uint64_t value) 2482 { 2483 uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0; 2484 2485 trace_arm_gt_tval_write(timeridx, value); 2486 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2487 sextract64(value, 0, 32); 2488 gt_recalc_timer(arm_env_get_cpu(env), timeridx); 2489 } 2490 2491 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2492 int timeridx, 2493 uint64_t value) 2494 { 2495 ARMCPU *cpu = arm_env_get_cpu(env); 2496 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2497 2498 trace_arm_gt_ctl_write(timeridx, value); 2499 env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); 2500 if ((oldval ^ value) & 1) { 2501 /* Enable toggled */ 2502 gt_recalc_timer(cpu, timeridx); 2503 } else if ((oldval ^ value) & 2) { 2504 /* IMASK toggled: don't need to recalculate, 2505 * just set the interrupt line based on ISTATUS 2506 */ 2507 int irqstate = (oldval & 4) && !(value & 2); 2508 2509 trace_arm_gt_imask_toggle(timeridx, irqstate); 2510 qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); 2511 } 2512 } 2513 2514 static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2515 { 2516 gt_timer_reset(env, ri, GTIMER_PHYS); 2517 } 2518 2519 static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2520 uint64_t value) 2521 { 2522 gt_cval_write(env, ri, GTIMER_PHYS, value); 2523 } 2524 2525 static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2526 { 2527 return gt_tval_read(env, ri, GTIMER_PHYS); 2528 } 2529 2530 static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2531 uint64_t value) 2532 { 2533 gt_tval_write(env, ri, GTIMER_PHYS, value); 2534 } 2535 2536 static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2537 uint64_t value) 2538 { 2539 gt_ctl_write(env, ri, GTIMER_PHYS, value); 2540 } 2541 2542 static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2543 { 2544 gt_timer_reset(env, ri, GTIMER_VIRT); 2545 } 2546 2547 static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2548 uint64_t value) 2549 { 2550 gt_cval_write(env, ri, GTIMER_VIRT, value); 2551 } 2552 2553 static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2554 { 2555 return gt_tval_read(env, ri, GTIMER_VIRT); 2556 } 2557 2558 static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2559 uint64_t value) 2560 { 2561 gt_tval_write(env, ri, GTIMER_VIRT, value); 2562 } 2563 2564 static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2565 uint64_t value) 2566 { 2567 gt_ctl_write(env, ri, GTIMER_VIRT, value); 2568 } 2569 2570 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2571 uint64_t value) 2572 { 2573 ARMCPU *cpu = arm_env_get_cpu(env); 2574 2575 trace_arm_gt_cntvoff_write(value); 2576 raw_write(env, ri, value); 2577 gt_recalc_timer(cpu, GTIMER_VIRT); 2578 } 2579 2580 static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2581 { 2582 gt_timer_reset(env, ri, GTIMER_HYP); 2583 } 2584 2585 static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2586 uint64_t value) 2587 { 2588 gt_cval_write(env, ri, GTIMER_HYP, value); 2589 } 2590 2591 static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2592 { 2593 return gt_tval_read(env, ri, GTIMER_HYP); 2594 } 2595 2596 static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2597 uint64_t value) 2598 { 2599 gt_tval_write(env, ri, GTIMER_HYP, value); 2600 } 2601 2602 static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2603 uint64_t value) 2604 { 2605 gt_ctl_write(env, ri, GTIMER_HYP, value); 2606 } 2607 2608 static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) 2609 { 2610 gt_timer_reset(env, ri, GTIMER_SEC); 2611 } 2612 2613 static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2614 uint64_t value) 2615 { 2616 gt_cval_write(env, ri, GTIMER_SEC, value); 2617 } 2618 2619 static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) 2620 { 2621 return gt_tval_read(env, ri, GTIMER_SEC); 2622 } 2623 2624 static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, 2625 uint64_t value) 2626 { 2627 gt_tval_write(env, ri, GTIMER_SEC, value); 2628 } 2629 2630 static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2631 uint64_t value) 2632 { 2633 gt_ctl_write(env, ri, GTIMER_SEC, value); 2634 } 2635 2636 void arm_gt_ptimer_cb(void *opaque) 2637 { 2638 ARMCPU *cpu = opaque; 2639 2640 gt_recalc_timer(cpu, GTIMER_PHYS); 2641 } 2642 2643 void arm_gt_vtimer_cb(void *opaque) 2644 { 2645 ARMCPU *cpu = opaque; 2646 2647 gt_recalc_timer(cpu, GTIMER_VIRT); 2648 } 2649 2650 void arm_gt_htimer_cb(void *opaque) 2651 { 2652 ARMCPU *cpu = opaque; 2653 2654 gt_recalc_timer(cpu, GTIMER_HYP); 2655 } 2656 2657 void arm_gt_stimer_cb(void *opaque) 2658 { 2659 ARMCPU *cpu = opaque; 2660 2661 gt_recalc_timer(cpu, GTIMER_SEC); 2662 } 2663 2664 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2665 /* Note that CNTFRQ is purely reads-as-written for the benefit 2666 * of software; writing it doesn't actually change the timer frequency. 2667 * Our reset value matches the fixed frequency we implement the timer at. 2668 */ 2669 { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, 2670 .type = ARM_CP_ALIAS, 2671 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2672 .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), 2673 }, 2674 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2675 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2676 .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, 2677 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2678 .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE, 2679 }, 2680 /* overall control: mostly access permissions */ 2681 { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, 2682 .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, 2683 .access = PL1_RW, 2684 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), 2685 .resetvalue = 0, 2686 }, 2687 /* per-timer control */ 2688 { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2689 .secure = ARM_CP_SECSTATE_NS, 2690 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2691 .accessfn = gt_ptimer_access, 2692 .fieldoffset = offsetoflow32(CPUARMState, 2693 cp15.c14_timer[GTIMER_PHYS].ctl), 2694 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2695 }, 2696 { .name = "CNTP_CTL_S", 2697 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, 2698 .secure = ARM_CP_SECSTATE_S, 2699 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2700 .accessfn = gt_ptimer_access, 2701 .fieldoffset = offsetoflow32(CPUARMState, 2702 cp15.c14_timer[GTIMER_SEC].ctl), 2703 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2704 }, 2705 { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, 2706 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, 2707 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2708 .accessfn = gt_ptimer_access, 2709 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), 2710 .resetvalue = 0, 2711 .writefn = gt_phys_ctl_write, .raw_writefn = raw_write, 2712 }, 2713 { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, 2714 .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R, 2715 .accessfn = gt_vtimer_access, 2716 .fieldoffset = offsetoflow32(CPUARMState, 2717 cp15.c14_timer[GTIMER_VIRT].ctl), 2718 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2719 }, 2720 { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, 2721 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, 2722 .type = ARM_CP_IO, .access = PL1_RW | PL0_R, 2723 .accessfn = gt_vtimer_access, 2724 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), 2725 .resetvalue = 0, 2726 .writefn = gt_virt_ctl_write, .raw_writefn = raw_write, 2727 }, 2728 /* TimerValue views: a 32 bit downcounting view of the underlying state */ 2729 { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2730 .secure = ARM_CP_SECSTATE_NS, 2731 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2732 .accessfn = gt_ptimer_access, 2733 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2734 }, 2735 { .name = "CNTP_TVAL_S", 2736 .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, 2737 .secure = ARM_CP_SECSTATE_S, 2738 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2739 .accessfn = gt_ptimer_access, 2740 .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, 2741 }, 2742 { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2743 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, 2744 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2745 .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, 2746 .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write, 2747 }, 2748 { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, 2749 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2750 .accessfn = gt_vtimer_access, 2751 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2752 }, 2753 { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, 2754 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, 2755 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R, 2756 .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, 2757 .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write, 2758 }, 2759 /* The counter itself */ 2760 { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, 2761 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2762 .accessfn = gt_pct_access, 2763 .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, 2764 }, 2765 { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, 2766 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, 2767 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2768 .accessfn = gt_pct_access, .readfn = gt_cnt_read, 2769 }, 2770 { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, 2771 .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, 2772 .accessfn = gt_vct_access, 2773 .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, 2774 }, 2775 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2776 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2777 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2778 .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, 2779 }, 2780 /* Comparison value, indicating when the timer goes off */ 2781 { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, 2782 .secure = ARM_CP_SECSTATE_NS, 2783 .access = PL1_RW | PL0_R, 2784 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2785 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2786 .accessfn = gt_ptimer_access, 2787 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2788 }, 2789 { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, 2790 .secure = ARM_CP_SECSTATE_S, 2791 .access = PL1_RW | PL0_R, 2792 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2793 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2794 .accessfn = gt_ptimer_access, 2795 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2796 }, 2797 { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2798 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, 2799 .access = PL1_RW | PL0_R, 2800 .type = ARM_CP_IO, 2801 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), 2802 .resetvalue = 0, .accessfn = gt_ptimer_access, 2803 .writefn = gt_phys_cval_write, .raw_writefn = raw_write, 2804 }, 2805 { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, 2806 .access = PL1_RW | PL0_R, 2807 .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, 2808 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2809 .accessfn = gt_vtimer_access, 2810 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2811 }, 2812 { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, 2813 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, 2814 .access = PL1_RW | PL0_R, 2815 .type = ARM_CP_IO, 2816 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), 2817 .resetvalue = 0, .accessfn = gt_vtimer_access, 2818 .writefn = gt_virt_cval_write, .raw_writefn = raw_write, 2819 }, 2820 /* Secure timer -- this is actually restricted to only EL3 2821 * and configurably Secure-EL1 via the accessfn. 2822 */ 2823 { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, 2824 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, 2825 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, 2826 .accessfn = gt_stimer_access, 2827 .readfn = gt_sec_tval_read, 2828 .writefn = gt_sec_tval_write, 2829 .resetfn = gt_sec_timer_reset, 2830 }, 2831 { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, 2832 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, 2833 .type = ARM_CP_IO, .access = PL1_RW, 2834 .accessfn = gt_stimer_access, 2835 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), 2836 .resetvalue = 0, 2837 .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, 2838 }, 2839 { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, 2840 .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, 2841 .type = ARM_CP_IO, .access = PL1_RW, 2842 .accessfn = gt_stimer_access, 2843 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), 2844 .writefn = gt_sec_cval_write, .raw_writefn = raw_write, 2845 }, 2846 REGINFO_SENTINEL 2847 }; 2848 2849 #else 2850 2851 /* In user-mode most of the generic timer registers are inaccessible 2852 * however modern kernels (4.12+) allow access to cntvct_el0 2853 */ 2854 2855 static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) 2856 { 2857 /* Currently we have no support for QEMUTimer in linux-user so we 2858 * can't call gt_get_countervalue(env), instead we directly 2859 * call the lower level functions. 2860 */ 2861 return cpu_get_clock() / GTIMER_SCALE; 2862 } 2863 2864 static const ARMCPRegInfo generic_timer_cp_reginfo[] = { 2865 { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, 2866 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, 2867 .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, 2868 .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), 2869 .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, 2870 }, 2871 { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, 2872 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, 2873 .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, 2874 .readfn = gt_virt_cnt_read, 2875 }, 2876 REGINFO_SENTINEL 2877 }; 2878 2879 #endif 2880 2881 static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 2882 { 2883 if (arm_feature(env, ARM_FEATURE_LPAE)) { 2884 raw_write(env, ri, value); 2885 } else if (arm_feature(env, ARM_FEATURE_V7)) { 2886 raw_write(env, ri, value & 0xfffff6ff); 2887 } else { 2888 raw_write(env, ri, value & 0xfffff1ff); 2889 } 2890 } 2891 2892 #ifndef CONFIG_USER_ONLY 2893 /* get_phys_addr() isn't present for user-mode-only targets */ 2894 2895 static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, 2896 bool isread) 2897 { 2898 if (ri->opc2 & 4) { 2899 /* The ATS12NSO* operations must trap to EL3 if executed in 2900 * Secure EL1 (which can only happen if EL3 is AArch64). 2901 * They are simply UNDEF if executed from NS EL1. 2902 * They function normally from EL2 or EL3. 2903 */ 2904 if (arm_current_el(env) == 1) { 2905 if (arm_is_secure_below_el3(env)) { 2906 return CP_ACCESS_TRAP_UNCATEGORIZED_EL3; 2907 } 2908 return CP_ACCESS_TRAP_UNCATEGORIZED; 2909 } 2910 } 2911 return CP_ACCESS_OK; 2912 } 2913 2914 static uint64_t do_ats_write(CPUARMState *env, uint64_t value, 2915 MMUAccessType access_type, ARMMMUIdx mmu_idx) 2916 { 2917 hwaddr phys_addr; 2918 target_ulong page_size; 2919 int prot; 2920 bool ret; 2921 uint64_t par64; 2922 bool format64 = false; 2923 MemTxAttrs attrs = {}; 2924 ARMMMUFaultInfo fi = {}; 2925 ARMCacheAttrs cacheattrs = {}; 2926 2927 ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs, 2928 &prot, &page_size, &fi, &cacheattrs); 2929 2930 if (is_a64(env)) { 2931 format64 = true; 2932 } else if (arm_feature(env, ARM_FEATURE_LPAE)) { 2933 /* 2934 * ATS1Cxx: 2935 * * TTBCR.EAE determines whether the result is returned using the 2936 * 32-bit or the 64-bit PAR format 2937 * * Instructions executed in Hyp mode always use the 64bit format 2938 * 2939 * ATS1S2NSOxx uses the 64bit format if any of the following is true: 2940 * * The Non-secure TTBCR.EAE bit is set to 1 2941 * * The implementation includes EL2, and the value of HCR.VM is 1 2942 * 2943 * (Note that HCR.DC makes HCR.VM behave as if it is 1.) 2944 * 2945 * ATS1Hx always uses the 64bit format. 2946 */ 2947 format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); 2948 2949 if (arm_feature(env, ARM_FEATURE_EL2)) { 2950 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 2951 format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); 2952 } else { 2953 format64 |= arm_current_el(env) == 2; 2954 } 2955 } 2956 } 2957 2958 if (format64) { 2959 /* Create a 64-bit PAR */ 2960 par64 = (1 << 11); /* LPAE bit always set */ 2961 if (!ret) { 2962 par64 |= phys_addr & ~0xfffULL; 2963 if (!attrs.secure) { 2964 par64 |= (1 << 9); /* NS */ 2965 } 2966 par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */ 2967 par64 |= cacheattrs.shareability << 7; /* SH */ 2968 } else { 2969 uint32_t fsr = arm_fi_to_lfsc(&fi); 2970 2971 par64 |= 1; /* F */ 2972 par64 |= (fsr & 0x3f) << 1; /* FS */ 2973 if (fi.stage2) { 2974 par64 |= (1 << 9); /* S */ 2975 } 2976 if (fi.s1ptw) { 2977 par64 |= (1 << 8); /* PTW */ 2978 } 2979 } 2980 } else { 2981 /* fsr is a DFSR/IFSR value for the short descriptor 2982 * translation table format (with WnR always clear). 2983 * Convert it to a 32-bit PAR. 2984 */ 2985 if (!ret) { 2986 /* We do not set any attribute bits in the PAR */ 2987 if (page_size == (1 << 24) 2988 && arm_feature(env, ARM_FEATURE_V7)) { 2989 par64 = (phys_addr & 0xff000000) | (1 << 1); 2990 } else { 2991 par64 = phys_addr & 0xfffff000; 2992 } 2993 if (!attrs.secure) { 2994 par64 |= (1 << 9); /* NS */ 2995 } 2996 } else { 2997 uint32_t fsr = arm_fi_to_sfsc(&fi); 2998 2999 par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | 3000 ((fsr & 0xf) << 1) | 1; 3001 } 3002 } 3003 return par64; 3004 } 3005 3006 static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 3007 { 3008 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3009 uint64_t par64; 3010 ARMMMUIdx mmu_idx; 3011 int el = arm_current_el(env); 3012 bool secure = arm_is_secure_below_el3(env); 3013 3014 switch (ri->opc2 & 6) { 3015 case 0: 3016 /* stage 1 current state PL1: ATS1CPR, ATS1CPW */ 3017 switch (el) { 3018 case 3: 3019 mmu_idx = ARMMMUIdx_S1E3; 3020 break; 3021 case 2: 3022 mmu_idx = ARMMMUIdx_S1NSE1; 3023 break; 3024 case 1: 3025 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 3026 break; 3027 default: 3028 g_assert_not_reached(); 3029 } 3030 break; 3031 case 2: 3032 /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ 3033 switch (el) { 3034 case 3: 3035 mmu_idx = ARMMMUIdx_S1SE0; 3036 break; 3037 case 2: 3038 mmu_idx = ARMMMUIdx_S1NSE0; 3039 break; 3040 case 1: 3041 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 3042 break; 3043 default: 3044 g_assert_not_reached(); 3045 } 3046 break; 3047 case 4: 3048 /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ 3049 mmu_idx = ARMMMUIdx_S12NSE1; 3050 break; 3051 case 6: 3052 /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ 3053 mmu_idx = ARMMMUIdx_S12NSE0; 3054 break; 3055 default: 3056 g_assert_not_reached(); 3057 } 3058 3059 par64 = do_ats_write(env, value, access_type, mmu_idx); 3060 3061 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3062 } 3063 3064 static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, 3065 uint64_t value) 3066 { 3067 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3068 uint64_t par64; 3069 3070 par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2); 3071 3072 A32_BANKED_CURRENT_REG_SET(env, par, par64); 3073 } 3074 3075 static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, 3076 bool isread) 3077 { 3078 if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) { 3079 return CP_ACCESS_TRAP; 3080 } 3081 return CP_ACCESS_OK; 3082 } 3083 3084 static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, 3085 uint64_t value) 3086 { 3087 MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; 3088 ARMMMUIdx mmu_idx; 3089 int secure = arm_is_secure_below_el3(env); 3090 3091 switch (ri->opc2 & 6) { 3092 case 0: 3093 switch (ri->opc1) { 3094 case 0: /* AT S1E1R, AT S1E1W */ 3095 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1; 3096 break; 3097 case 4: /* AT S1E2R, AT S1E2W */ 3098 mmu_idx = ARMMMUIdx_S1E2; 3099 break; 3100 case 6: /* AT S1E3R, AT S1E3W */ 3101 mmu_idx = ARMMMUIdx_S1E3; 3102 break; 3103 default: 3104 g_assert_not_reached(); 3105 } 3106 break; 3107 case 2: /* AT S1E0R, AT S1E0W */ 3108 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0; 3109 break; 3110 case 4: /* AT S12E1R, AT S12E1W */ 3111 mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1; 3112 break; 3113 case 6: /* AT S12E0R, AT S12E0W */ 3114 mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0; 3115 break; 3116 default: 3117 g_assert_not_reached(); 3118 } 3119 3120 env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx); 3121 } 3122 #endif 3123 3124 static const ARMCPRegInfo vapa_cp_reginfo[] = { 3125 { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, 3126 .access = PL1_RW, .resetvalue = 0, 3127 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), 3128 offsetoflow32(CPUARMState, cp15.par_ns) }, 3129 .writefn = par_write }, 3130 #ifndef CONFIG_USER_ONLY 3131 /* This underdecoding is safe because the reginfo is NO_RAW. */ 3132 { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, 3133 .access = PL1_W, .accessfn = ats_access, 3134 .writefn = ats_write, .type = ARM_CP_NO_RAW }, 3135 #endif 3136 REGINFO_SENTINEL 3137 }; 3138 3139 /* Return basic MPU access permission bits. */ 3140 static uint32_t simple_mpu_ap_bits(uint32_t val) 3141 { 3142 uint32_t ret; 3143 uint32_t mask; 3144 int i; 3145 ret = 0; 3146 mask = 3; 3147 for (i = 0; i < 16; i += 2) { 3148 ret |= (val >> i) & mask; 3149 mask <<= 2; 3150 } 3151 return ret; 3152 } 3153 3154 /* Pad basic MPU access permission bits to extended format. */ 3155 static uint32_t extended_mpu_ap_bits(uint32_t val) 3156 { 3157 uint32_t ret; 3158 uint32_t mask; 3159 int i; 3160 ret = 0; 3161 mask = 3; 3162 for (i = 0; i < 16; i += 2) { 3163 ret |= (val & mask) << i; 3164 mask <<= 2; 3165 } 3166 return ret; 3167 } 3168 3169 static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3170 uint64_t value) 3171 { 3172 env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); 3173 } 3174 3175 static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3176 { 3177 return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); 3178 } 3179 3180 static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, 3181 uint64_t value) 3182 { 3183 env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); 3184 } 3185 3186 static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) 3187 { 3188 return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); 3189 } 3190 3191 static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) 3192 { 3193 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3194 3195 if (!u32p) { 3196 return 0; 3197 } 3198 3199 u32p += env->pmsav7.rnr[M_REG_NS]; 3200 return *u32p; 3201 } 3202 3203 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3204 uint64_t value) 3205 { 3206 ARMCPU *cpu = arm_env_get_cpu(env); 3207 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3208 3209 if (!u32p) { 3210 return; 3211 } 3212 3213 u32p += env->pmsav7.rnr[M_REG_NS]; 3214 tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ 3215 *u32p = value; 3216 } 3217 3218 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3219 uint64_t value) 3220 { 3221 ARMCPU *cpu = arm_env_get_cpu(env); 3222 uint32_t nrgs = cpu->pmsav7_dregion; 3223 3224 if (value >= nrgs) { 3225 qemu_log_mask(LOG_GUEST_ERROR, 3226 "PMSAv7 RGNR write >= # supported regions, %" PRIu32 3227 " > %" PRIu32 "\n", (uint32_t)value, nrgs); 3228 return; 3229 } 3230 3231 raw_write(env, ri, value); 3232 } 3233 3234 static const ARMCPRegInfo pmsav7_cp_reginfo[] = { 3235 /* Reset for all these registers is handled in arm_cpu_reset(), 3236 * because the PMSAv7 is also used by M-profile CPUs, which do 3237 * not register cpregs but still need the state to be reset. 3238 */ 3239 { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, 3240 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3241 .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), 3242 .readfn = pmsav7_read, .writefn = pmsav7_write, 3243 .resetfn = arm_cp_reset_ignore }, 3244 { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, 3245 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3246 .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), 3247 .readfn = pmsav7_read, .writefn = pmsav7_write, 3248 .resetfn = arm_cp_reset_ignore }, 3249 { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, 3250 .access = PL1_RW, .type = ARM_CP_NO_RAW, 3251 .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), 3252 .readfn = pmsav7_read, .writefn = pmsav7_write, 3253 .resetfn = arm_cp_reset_ignore }, 3254 { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, 3255 .access = PL1_RW, 3256 .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), 3257 .writefn = pmsav7_rgnr_write, 3258 .resetfn = arm_cp_reset_ignore }, 3259 REGINFO_SENTINEL 3260 }; 3261 3262 static const ARMCPRegInfo pmsav5_cp_reginfo[] = { 3263 { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3264 .access = PL1_RW, .type = ARM_CP_ALIAS, 3265 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3266 .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, 3267 { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3268 .access = PL1_RW, .type = ARM_CP_ALIAS, 3269 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3270 .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, 3271 { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, 3272 .access = PL1_RW, 3273 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), 3274 .resetvalue = 0, }, 3275 { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, 3276 .access = PL1_RW, 3277 .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), 3278 .resetvalue = 0, }, 3279 { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 3280 .access = PL1_RW, 3281 .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, 3282 { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, 3283 .access = PL1_RW, 3284 .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, 3285 /* Protection region base and size registers */ 3286 { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, 3287 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3288 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, 3289 { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, 3290 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3291 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, 3292 { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, 3293 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3294 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, 3295 { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, 3296 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3297 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, 3298 { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, 3299 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3300 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, 3301 { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, 3302 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3303 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, 3304 { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, 3305 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3306 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, 3307 { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, 3308 .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, 3309 .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, 3310 REGINFO_SENTINEL 3311 }; 3312 3313 static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri, 3314 uint64_t value) 3315 { 3316 TCR *tcr = raw_ptr(env, ri); 3317 int maskshift = extract32(value, 0, 3); 3318 3319 if (!arm_feature(env, ARM_FEATURE_V8)) { 3320 if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { 3321 /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when 3322 * using Long-desciptor translation table format */ 3323 value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); 3324 } else if (arm_feature(env, ARM_FEATURE_EL3)) { 3325 /* In an implementation that includes the Security Extensions 3326 * TTBCR has additional fields PD0 [4] and PD1 [5] for 3327 * Short-descriptor translation table format. 3328 */ 3329 value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; 3330 } else { 3331 value &= TTBCR_N; 3332 } 3333 } 3334 3335 /* Update the masks corresponding to the TCR bank being written 3336 * Note that we always calculate mask and base_mask, but 3337 * they are only used for short-descriptor tables (ie if EAE is 0); 3338 * for long-descriptor tables the TCR fields are used differently 3339 * and the mask and base_mask values are meaningless. 3340 */ 3341 tcr->raw_tcr = value; 3342 tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift); 3343 tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift); 3344 } 3345 3346 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3347 uint64_t value) 3348 { 3349 ARMCPU *cpu = arm_env_get_cpu(env); 3350 TCR *tcr = raw_ptr(env, ri); 3351 3352 if (arm_feature(env, ARM_FEATURE_LPAE)) { 3353 /* With LPAE the TTBCR could result in a change of ASID 3354 * via the TTBCR.A1 bit, so do a TLB flush. 3355 */ 3356 tlb_flush(CPU(cpu)); 3357 } 3358 /* Preserve the high half of TCR_EL1, set via TTBCR2. */ 3359 value = deposit64(tcr->raw_tcr, 0, 32, value); 3360 vmsa_ttbcr_raw_write(env, ri, value); 3361 } 3362 3363 static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri) 3364 { 3365 TCR *tcr = raw_ptr(env, ri); 3366 3367 /* Reset both the TCR as well as the masks corresponding to the bank of 3368 * the TCR being reset. 3369 */ 3370 tcr->raw_tcr = 0; 3371 tcr->mask = 0; 3372 tcr->base_mask = 0xffffc000u; 3373 } 3374 3375 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3376 uint64_t value) 3377 { 3378 ARMCPU *cpu = arm_env_get_cpu(env); 3379 TCR *tcr = raw_ptr(env, ri); 3380 3381 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ 3382 tlb_flush(CPU(cpu)); 3383 tcr->raw_tcr = value; 3384 } 3385 3386 static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3387 uint64_t value) 3388 { 3389 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3390 if (cpreg_field_is_64bit(ri) && 3391 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3392 ARMCPU *cpu = arm_env_get_cpu(env); 3393 tlb_flush(CPU(cpu)); 3394 } 3395 raw_write(env, ri, value); 3396 } 3397 3398 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3399 uint64_t value) 3400 { 3401 ARMCPU *cpu = arm_env_get_cpu(env); 3402 CPUState *cs = CPU(cpu); 3403 3404 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ 3405 if (raw_read(env, ri) != value) { 3406 tlb_flush_by_mmuidx(cs, 3407 ARMMMUIdxBit_S12NSE1 | 3408 ARMMMUIdxBit_S12NSE0 | 3409 ARMMMUIdxBit_S2NS); 3410 raw_write(env, ri, value); 3411 } 3412 } 3413 3414 static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { 3415 { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, 3416 .access = PL1_RW, .type = ARM_CP_ALIAS, 3417 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), 3418 offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, 3419 { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, 3420 .access = PL1_RW, .resetvalue = 0, 3421 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), 3422 offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, 3423 { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, 3424 .access = PL1_RW, .resetvalue = 0, 3425 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), 3426 offsetof(CPUARMState, cp15.dfar_ns) } }, 3427 { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, 3428 .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, 3429 .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), 3430 .resetvalue = 0, }, 3431 REGINFO_SENTINEL 3432 }; 3433 3434 static const ARMCPRegInfo vmsa_cp_reginfo[] = { 3435 { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, 3436 .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, 3437 .access = PL1_RW, 3438 .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, 3439 { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, 3440 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, 3441 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 3442 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3443 offsetof(CPUARMState, cp15.ttbr0_ns) } }, 3444 { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, 3445 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, 3446 .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0, 3447 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3448 offsetof(CPUARMState, cp15.ttbr1_ns) } }, 3449 { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, 3450 .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3451 .access = PL1_RW, .writefn = vmsa_tcr_el1_write, 3452 .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write, 3453 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, 3454 { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, 3455 .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, 3456 .raw_writefn = vmsa_ttbcr_raw_write, 3457 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), 3458 offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, 3459 REGINFO_SENTINEL 3460 }; 3461 3462 /* Note that unlike TTBCR, writing to TTBCR2 does not require flushing 3463 * qemu tlbs nor adjusting cached masks. 3464 */ 3465 static const ARMCPRegInfo ttbcr2_reginfo = { 3466 .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, 3467 .access = PL1_RW, .type = ARM_CP_ALIAS, 3468 .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]), 3469 offsetofhigh32(CPUARMState, cp15.tcr_el[1]) }, 3470 }; 3471 3472 static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, 3473 uint64_t value) 3474 { 3475 env->cp15.c15_ticonfig = value & 0xe7; 3476 /* The OS_TYPE bit in this register changes the reported CPUID! */ 3477 env->cp15.c0_cpuid = (value & (1 << 5)) ? 3478 ARM_CPUID_TI915T : ARM_CPUID_TI925T; 3479 } 3480 3481 static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, 3482 uint64_t value) 3483 { 3484 env->cp15.c15_threadid = value & 0xffff; 3485 } 3486 3487 static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, 3488 uint64_t value) 3489 { 3490 /* Wait-for-interrupt (deprecated) */ 3491 cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); 3492 } 3493 3494 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, 3495 uint64_t value) 3496 { 3497 /* On OMAP there are registers indicating the max/min index of dcache lines 3498 * containing a dirty line; cache flush operations have to reset these. 3499 */ 3500 env->cp15.c15_i_max = 0x000; 3501 env->cp15.c15_i_min = 0xff0; 3502 } 3503 3504 static const ARMCPRegInfo omap_cp_reginfo[] = { 3505 { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, 3506 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, 3507 .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), 3508 .resetvalue = 0, }, 3509 { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, 3510 .access = PL1_RW, .type = ARM_CP_NOP }, 3511 { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, 3512 .access = PL1_RW, 3513 .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, 3514 .writefn = omap_ticonfig_write }, 3515 { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, 3516 .access = PL1_RW, 3517 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, 3518 { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, 3519 .access = PL1_RW, .resetvalue = 0xff0, 3520 .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, 3521 { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, 3522 .access = PL1_RW, 3523 .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, 3524 .writefn = omap_threadid_write }, 3525 { .name = "TI925T_STATUS", .cp = 15, .crn = 15, 3526 .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 3527 .type = ARM_CP_NO_RAW, 3528 .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, 3529 /* TODO: Peripheral port remap register: 3530 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller 3531 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), 3532 * when MMU is off. 3533 */ 3534 { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, 3535 .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, 3536 .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, 3537 .writefn = omap_cachemaint_write }, 3538 { .name = "C9", .cp = 15, .crn = 9, 3539 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, 3540 .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, 3541 REGINFO_SENTINEL 3542 }; 3543 3544 static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, 3545 uint64_t value) 3546 { 3547 env->cp15.c15_cpar = value & 0x3fff; 3548 } 3549 3550 static const ARMCPRegInfo xscale_cp_reginfo[] = { 3551 { .name = "XSCALE_CPAR", 3552 .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, 3553 .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, 3554 .writefn = xscale_cpar_write, }, 3555 { .name = "XSCALE_AUXCR", 3556 .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, 3557 .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), 3558 .resetvalue = 0, }, 3559 /* XScale specific cache-lockdown: since we have no cache we NOP these 3560 * and hope the guest does not really rely on cache behaviour. 3561 */ 3562 { .name = "XSCALE_LOCK_ICACHE_LINE", 3563 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, 3564 .access = PL1_W, .type = ARM_CP_NOP }, 3565 { .name = "XSCALE_UNLOCK_ICACHE", 3566 .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, 3567 .access = PL1_W, .type = ARM_CP_NOP }, 3568 { .name = "XSCALE_DCACHE_LOCK", 3569 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, 3570 .access = PL1_RW, .type = ARM_CP_NOP }, 3571 { .name = "XSCALE_UNLOCK_DCACHE", 3572 .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, 3573 .access = PL1_W, .type = ARM_CP_NOP }, 3574 REGINFO_SENTINEL 3575 }; 3576 3577 static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { 3578 /* RAZ/WI the whole crn=15 space, when we don't have a more specific 3579 * implementation of this implementation-defined space. 3580 * Ideally this should eventually disappear in favour of actually 3581 * implementing the correct behaviour for all cores. 3582 */ 3583 { .name = "C15_IMPDEF", .cp = 15, .crn = 15, 3584 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 3585 .access = PL1_RW, 3586 .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, 3587 .resetvalue = 0 }, 3588 REGINFO_SENTINEL 3589 }; 3590 3591 static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { 3592 /* Cache status: RAZ because we have no cache so it's always clean */ 3593 { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, 3594 .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3595 .resetvalue = 0 }, 3596 REGINFO_SENTINEL 3597 }; 3598 3599 static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { 3600 /* We never have a a block transfer operation in progress */ 3601 { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, 3602 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3603 .resetvalue = 0 }, 3604 /* The cache ops themselves: these all NOP for QEMU */ 3605 { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, 3606 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3607 { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, 3608 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3609 { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, 3610 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3611 { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, 3612 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3613 { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, 3614 .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3615 { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, 3616 .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT }, 3617 REGINFO_SENTINEL 3618 }; 3619 3620 static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { 3621 /* The cache test-and-clean instructions always return (1 << 30) 3622 * to indicate that there are no dirty cache lines. 3623 */ 3624 { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, 3625 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3626 .resetvalue = (1 << 30) }, 3627 { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, 3628 .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, 3629 .resetvalue = (1 << 30) }, 3630 REGINFO_SENTINEL 3631 }; 3632 3633 static const ARMCPRegInfo strongarm_cp_reginfo[] = { 3634 /* Ignore ReadBuffer accesses */ 3635 { .name = "C9_READBUFFER", .cp = 15, .crn = 9, 3636 .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, 3637 .access = PL1_RW, .resetvalue = 0, 3638 .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, 3639 REGINFO_SENTINEL 3640 }; 3641 3642 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3643 { 3644 ARMCPU *cpu = arm_env_get_cpu(env); 3645 unsigned int cur_el = arm_current_el(env); 3646 bool secure = arm_is_secure(env); 3647 3648 if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 3649 return env->cp15.vpidr_el2; 3650 } 3651 return raw_read(env, ri); 3652 } 3653 3654 static uint64_t mpidr_read_val(CPUARMState *env) 3655 { 3656 ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env)); 3657 uint64_t mpidr = cpu->mp_affinity; 3658 3659 if (arm_feature(env, ARM_FEATURE_V7MP)) { 3660 mpidr |= (1U << 31); 3661 /* Cores which are uniprocessor (non-coherent) 3662 * but still implement the MP extensions set 3663 * bit 30. (For instance, Cortex-R5). 3664 */ 3665 if (cpu->mp_is_up) { 3666 mpidr |= (1u << 30); 3667 } 3668 } 3669 return mpidr; 3670 } 3671 3672 static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3673 { 3674 unsigned int cur_el = arm_current_el(env); 3675 bool secure = arm_is_secure(env); 3676 3677 if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) { 3678 return env->cp15.vmpidr_el2; 3679 } 3680 return mpidr_read_val(env); 3681 } 3682 3683 static const ARMCPRegInfo lpae_cp_reginfo[] = { 3684 /* NOP AMAIR0/1 */ 3685 { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, 3686 .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, 3687 .access = PL1_RW, .type = ARM_CP_CONST, 3688 .resetvalue = 0 }, 3689 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ 3690 { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, 3691 .access = PL1_RW, .type = ARM_CP_CONST, 3692 .resetvalue = 0 }, 3693 { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, 3694 .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, 3695 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), 3696 offsetof(CPUARMState, cp15.par_ns)} }, 3697 { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, 3698 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3699 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), 3700 offsetof(CPUARMState, cp15.ttbr0_ns) }, 3701 .writefn = vmsa_ttbr_write, }, 3702 { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, 3703 .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 3704 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), 3705 offsetof(CPUARMState, cp15.ttbr1_ns) }, 3706 .writefn = vmsa_ttbr_write, }, 3707 REGINFO_SENTINEL 3708 }; 3709 3710 static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3711 { 3712 return vfp_get_fpcr(env); 3713 } 3714 3715 static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3716 uint64_t value) 3717 { 3718 vfp_set_fpcr(env, value); 3719 } 3720 3721 static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3722 { 3723 return vfp_get_fpsr(env); 3724 } 3725 3726 static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3727 uint64_t value) 3728 { 3729 vfp_set_fpsr(env, value); 3730 } 3731 3732 static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, 3733 bool isread) 3734 { 3735 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) { 3736 return CP_ACCESS_TRAP; 3737 } 3738 return CP_ACCESS_OK; 3739 } 3740 3741 static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, 3742 uint64_t value) 3743 { 3744 env->daif = value & PSTATE_DAIF; 3745 } 3746 3747 static CPAccessResult aa64_cacheop_access(CPUARMState *env, 3748 const ARMCPRegInfo *ri, 3749 bool isread) 3750 { 3751 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless 3752 * SCTLR_EL1.UCI is set. 3753 */ 3754 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) { 3755 return CP_ACCESS_TRAP; 3756 } 3757 return CP_ACCESS_OK; 3758 } 3759 3760 /* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions 3761 * Page D4-1736 (DDI0487A.b) 3762 */ 3763 3764 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3765 uint64_t value) 3766 { 3767 CPUState *cs = ENV_GET_CPU(env); 3768 bool sec = arm_is_secure_below_el3(env); 3769 3770 if (sec) { 3771 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3772 ARMMMUIdxBit_S1SE1 | 3773 ARMMMUIdxBit_S1SE0); 3774 } else { 3775 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3776 ARMMMUIdxBit_S12NSE1 | 3777 ARMMMUIdxBit_S12NSE0); 3778 } 3779 } 3780 3781 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3782 uint64_t value) 3783 { 3784 CPUState *cs = ENV_GET_CPU(env); 3785 3786 if (tlb_force_broadcast(env)) { 3787 tlbi_aa64_vmalle1is_write(env, NULL, value); 3788 return; 3789 } 3790 3791 if (arm_is_secure_below_el3(env)) { 3792 tlb_flush_by_mmuidx(cs, 3793 ARMMMUIdxBit_S1SE1 | 3794 ARMMMUIdxBit_S1SE0); 3795 } else { 3796 tlb_flush_by_mmuidx(cs, 3797 ARMMMUIdxBit_S12NSE1 | 3798 ARMMMUIdxBit_S12NSE0); 3799 } 3800 } 3801 3802 static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3803 uint64_t value) 3804 { 3805 /* Note that the 'ALL' scope must invalidate both stage 1 and 3806 * stage 2 translations, whereas most other scopes only invalidate 3807 * stage 1 translations. 3808 */ 3809 ARMCPU *cpu = arm_env_get_cpu(env); 3810 CPUState *cs = CPU(cpu); 3811 3812 if (arm_is_secure_below_el3(env)) { 3813 tlb_flush_by_mmuidx(cs, 3814 ARMMMUIdxBit_S1SE1 | 3815 ARMMMUIdxBit_S1SE0); 3816 } else { 3817 if (arm_feature(env, ARM_FEATURE_EL2)) { 3818 tlb_flush_by_mmuidx(cs, 3819 ARMMMUIdxBit_S12NSE1 | 3820 ARMMMUIdxBit_S12NSE0 | 3821 ARMMMUIdxBit_S2NS); 3822 } else { 3823 tlb_flush_by_mmuidx(cs, 3824 ARMMMUIdxBit_S12NSE1 | 3825 ARMMMUIdxBit_S12NSE0); 3826 } 3827 } 3828 } 3829 3830 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3831 uint64_t value) 3832 { 3833 ARMCPU *cpu = arm_env_get_cpu(env); 3834 CPUState *cs = CPU(cpu); 3835 3836 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 3837 } 3838 3839 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3840 uint64_t value) 3841 { 3842 ARMCPU *cpu = arm_env_get_cpu(env); 3843 CPUState *cs = CPU(cpu); 3844 3845 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); 3846 } 3847 3848 static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3849 uint64_t value) 3850 { 3851 /* Note that the 'ALL' scope must invalidate both stage 1 and 3852 * stage 2 translations, whereas most other scopes only invalidate 3853 * stage 1 translations. 3854 */ 3855 CPUState *cs = ENV_GET_CPU(env); 3856 bool sec = arm_is_secure_below_el3(env); 3857 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); 3858 3859 if (sec) { 3860 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3861 ARMMMUIdxBit_S1SE1 | 3862 ARMMMUIdxBit_S1SE0); 3863 } else if (has_el2) { 3864 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3865 ARMMMUIdxBit_S12NSE1 | 3866 ARMMMUIdxBit_S12NSE0 | 3867 ARMMMUIdxBit_S2NS); 3868 } else { 3869 tlb_flush_by_mmuidx_all_cpus_synced(cs, 3870 ARMMMUIdxBit_S12NSE1 | 3871 ARMMMUIdxBit_S12NSE0); 3872 } 3873 } 3874 3875 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3876 uint64_t value) 3877 { 3878 CPUState *cs = ENV_GET_CPU(env); 3879 3880 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 3881 } 3882 3883 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3884 uint64_t value) 3885 { 3886 CPUState *cs = ENV_GET_CPU(env); 3887 3888 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); 3889 } 3890 3891 static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3892 uint64_t value) 3893 { 3894 /* Invalidate by VA, EL2 3895 * Currently handles both VAE2 and VALE2, since we don't support 3896 * flush-last-level-only. 3897 */ 3898 ARMCPU *cpu = arm_env_get_cpu(env); 3899 CPUState *cs = CPU(cpu); 3900 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3901 3902 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); 3903 } 3904 3905 static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3906 uint64_t value) 3907 { 3908 /* Invalidate by VA, EL3 3909 * Currently handles both VAE3 and VALE3, since we don't support 3910 * flush-last-level-only. 3911 */ 3912 ARMCPU *cpu = arm_env_get_cpu(env); 3913 CPUState *cs = CPU(cpu); 3914 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3915 3916 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3); 3917 } 3918 3919 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3920 uint64_t value) 3921 { 3922 ARMCPU *cpu = arm_env_get_cpu(env); 3923 CPUState *cs = CPU(cpu); 3924 bool sec = arm_is_secure_below_el3(env); 3925 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3926 3927 if (sec) { 3928 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3929 ARMMMUIdxBit_S1SE1 | 3930 ARMMMUIdxBit_S1SE0); 3931 } else { 3932 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3933 ARMMMUIdxBit_S12NSE1 | 3934 ARMMMUIdxBit_S12NSE0); 3935 } 3936 } 3937 3938 static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3939 uint64_t value) 3940 { 3941 /* Invalidate by VA, EL1&0 (AArch64 version). 3942 * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, 3943 * since we don't support flush-for-specific-ASID-only or 3944 * flush-last-level-only. 3945 */ 3946 ARMCPU *cpu = arm_env_get_cpu(env); 3947 CPUState *cs = CPU(cpu); 3948 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3949 3950 if (tlb_force_broadcast(env)) { 3951 tlbi_aa64_vae1is_write(env, NULL, value); 3952 return; 3953 } 3954 3955 if (arm_is_secure_below_el3(env)) { 3956 tlb_flush_page_by_mmuidx(cs, pageaddr, 3957 ARMMMUIdxBit_S1SE1 | 3958 ARMMMUIdxBit_S1SE0); 3959 } else { 3960 tlb_flush_page_by_mmuidx(cs, pageaddr, 3961 ARMMMUIdxBit_S12NSE1 | 3962 ARMMMUIdxBit_S12NSE0); 3963 } 3964 } 3965 3966 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3967 uint64_t value) 3968 { 3969 CPUState *cs = ENV_GET_CPU(env); 3970 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3971 3972 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3973 ARMMMUIdxBit_S1E2); 3974 } 3975 3976 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3977 uint64_t value) 3978 { 3979 CPUState *cs = ENV_GET_CPU(env); 3980 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3981 3982 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 3983 ARMMMUIdxBit_S1E3); 3984 } 3985 3986 static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3987 uint64_t value) 3988 { 3989 /* Invalidate by IPA. This has to invalidate any structures that 3990 * contain only stage 2 translation information, but does not need 3991 * to apply to structures that contain combined stage 1 and stage 2 3992 * translation information. 3993 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 3994 */ 3995 ARMCPU *cpu = arm_env_get_cpu(env); 3996 CPUState *cs = CPU(cpu); 3997 uint64_t pageaddr; 3998 3999 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4000 return; 4001 } 4002 4003 pageaddr = sextract64(value << 12, 0, 48); 4004 4005 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS); 4006 } 4007 4008 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4009 uint64_t value) 4010 { 4011 CPUState *cs = ENV_GET_CPU(env); 4012 uint64_t pageaddr; 4013 4014 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { 4015 return; 4016 } 4017 4018 pageaddr = sextract64(value << 12, 0, 48); 4019 4020 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, 4021 ARMMMUIdxBit_S2NS); 4022 } 4023 4024 static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, 4025 bool isread) 4026 { 4027 /* We don't implement EL2, so the only control on DC ZVA is the 4028 * bit in the SCTLR which can prohibit access for EL0. 4029 */ 4030 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) { 4031 return CP_ACCESS_TRAP; 4032 } 4033 return CP_ACCESS_OK; 4034 } 4035 4036 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4037 { 4038 ARMCPU *cpu = arm_env_get_cpu(env); 4039 int dzp_bit = 1 << 4; 4040 4041 /* DZP indicates whether DC ZVA access is allowed */ 4042 if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { 4043 dzp_bit = 0; 4044 } 4045 return cpu->dcz_blocksize | dzp_bit; 4046 } 4047 4048 static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 4049 bool isread) 4050 { 4051 if (!(env->pstate & PSTATE_SP)) { 4052 /* Access to SP_EL0 is undefined if it's being used as 4053 * the stack pointer. 4054 */ 4055 return CP_ACCESS_TRAP_UNCATEGORIZED; 4056 } 4057 return CP_ACCESS_OK; 4058 } 4059 4060 static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) 4061 { 4062 return env->pstate & PSTATE_SP; 4063 } 4064 4065 static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) 4066 { 4067 update_spsel(env, val); 4068 } 4069 4070 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4071 uint64_t value) 4072 { 4073 ARMCPU *cpu = arm_env_get_cpu(env); 4074 4075 if (raw_read(env, ri) == value) { 4076 /* Skip the TLB flush if nothing actually changed; Linux likes 4077 * to do a lot of pointless SCTLR writes. 4078 */ 4079 return; 4080 } 4081 4082 if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { 4083 /* M bit is RAZ/WI for PMSA with no MPU implemented */ 4084 value &= ~SCTLR_M; 4085 } 4086 4087 raw_write(env, ri, value); 4088 /* ??? Lots of these bits are not implemented. */ 4089 /* This may enable/disable the MMU, so do a TLB flush. */ 4090 tlb_flush(CPU(cpu)); 4091 } 4092 4093 static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri, 4094 bool isread) 4095 { 4096 if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { 4097 return CP_ACCESS_TRAP_FP_EL2; 4098 } 4099 if (env->cp15.cptr_el[3] & CPTR_TFP) { 4100 return CP_ACCESS_TRAP_FP_EL3; 4101 } 4102 return CP_ACCESS_OK; 4103 } 4104 4105 static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4106 uint64_t value) 4107 { 4108 env->cp15.mdcr_el3 = value & SDCR_VALID_MASK; 4109 } 4110 4111 static const ARMCPRegInfo v8_cp_reginfo[] = { 4112 /* Minimal set of EL0-visible registers. This will need to be expanded 4113 * significantly for system emulation of AArch64 CPUs. 4114 */ 4115 { .name = "NZCV", .state = ARM_CP_STATE_AA64, 4116 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, 4117 .access = PL0_RW, .type = ARM_CP_NZCV }, 4118 { .name = "DAIF", .state = ARM_CP_STATE_AA64, 4119 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, 4120 .type = ARM_CP_NO_RAW, 4121 .access = PL0_RW, .accessfn = aa64_daif_access, 4122 .fieldoffset = offsetof(CPUARMState, daif), 4123 .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, 4124 { .name = "FPCR", .state = ARM_CP_STATE_AA64, 4125 .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, 4126 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4127 .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, 4128 { .name = "FPSR", .state = ARM_CP_STATE_AA64, 4129 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, 4130 .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, 4131 .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, 4132 { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, 4133 .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, 4134 .access = PL0_R, .type = ARM_CP_NO_RAW, 4135 .readfn = aa64_dczid_read }, 4136 { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, 4137 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, 4138 .access = PL0_W, .type = ARM_CP_DC_ZVA, 4139 #ifndef CONFIG_USER_ONLY 4140 /* Avoid overhead of an access check that always passes in user-mode */ 4141 .accessfn = aa64_zva_access, 4142 #endif 4143 }, 4144 { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, 4145 .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, 4146 .access = PL1_R, .type = ARM_CP_CURRENTEL }, 4147 /* Cache ops: all NOPs since we don't emulate caches */ 4148 { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, 4149 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4150 .access = PL1_W, .type = ARM_CP_NOP }, 4151 { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, 4152 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4153 .access = PL1_W, .type = ARM_CP_NOP }, 4154 { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, 4155 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, 4156 .access = PL0_W, .type = ARM_CP_NOP, 4157 .accessfn = aa64_cacheop_access }, 4158 { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, 4159 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4160 .access = PL1_W, .type = ARM_CP_NOP }, 4161 { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, 4162 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4163 .access = PL1_W, .type = ARM_CP_NOP }, 4164 { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, 4165 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, 4166 .access = PL0_W, .type = ARM_CP_NOP, 4167 .accessfn = aa64_cacheop_access }, 4168 { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, 4169 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4170 .access = PL1_W, .type = ARM_CP_NOP }, 4171 { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, 4172 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, 4173 .access = PL0_W, .type = ARM_CP_NOP, 4174 .accessfn = aa64_cacheop_access }, 4175 { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, 4176 .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, 4177 .access = PL0_W, .type = ARM_CP_NOP, 4178 .accessfn = aa64_cacheop_access }, 4179 { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, 4180 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4181 .access = PL1_W, .type = ARM_CP_NOP }, 4182 /* TLBI operations */ 4183 { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, 4184 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, 4185 .access = PL1_W, .type = ARM_CP_NO_RAW, 4186 .writefn = tlbi_aa64_vmalle1is_write }, 4187 { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, 4188 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, 4189 .access = PL1_W, .type = ARM_CP_NO_RAW, 4190 .writefn = tlbi_aa64_vae1is_write }, 4191 { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, 4192 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, 4193 .access = PL1_W, .type = ARM_CP_NO_RAW, 4194 .writefn = tlbi_aa64_vmalle1is_write }, 4195 { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, 4196 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, 4197 .access = PL1_W, .type = ARM_CP_NO_RAW, 4198 .writefn = tlbi_aa64_vae1is_write }, 4199 { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, 4200 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4201 .access = PL1_W, .type = ARM_CP_NO_RAW, 4202 .writefn = tlbi_aa64_vae1is_write }, 4203 { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, 4204 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4205 .access = PL1_W, .type = ARM_CP_NO_RAW, 4206 .writefn = tlbi_aa64_vae1is_write }, 4207 { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, 4208 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, 4209 .access = PL1_W, .type = ARM_CP_NO_RAW, 4210 .writefn = tlbi_aa64_vmalle1_write }, 4211 { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, 4212 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, 4213 .access = PL1_W, .type = ARM_CP_NO_RAW, 4214 .writefn = tlbi_aa64_vae1_write }, 4215 { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, 4216 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, 4217 .access = PL1_W, .type = ARM_CP_NO_RAW, 4218 .writefn = tlbi_aa64_vmalle1_write }, 4219 { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, 4220 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, 4221 .access = PL1_W, .type = ARM_CP_NO_RAW, 4222 .writefn = tlbi_aa64_vae1_write }, 4223 { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, 4224 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4225 .access = PL1_W, .type = ARM_CP_NO_RAW, 4226 .writefn = tlbi_aa64_vae1_write }, 4227 { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, 4228 .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4229 .access = PL1_W, .type = ARM_CP_NO_RAW, 4230 .writefn = tlbi_aa64_vae1_write }, 4231 { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, 4232 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4233 .access = PL2_W, .type = ARM_CP_NO_RAW, 4234 .writefn = tlbi_aa64_ipas2e1is_write }, 4235 { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, 4236 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4237 .access = PL2_W, .type = ARM_CP_NO_RAW, 4238 .writefn = tlbi_aa64_ipas2e1is_write }, 4239 { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, 4240 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4241 .access = PL2_W, .type = ARM_CP_NO_RAW, 4242 .writefn = tlbi_aa64_alle1is_write }, 4243 { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, 4244 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, 4245 .access = PL2_W, .type = ARM_CP_NO_RAW, 4246 .writefn = tlbi_aa64_alle1is_write }, 4247 { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, 4248 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4249 .access = PL2_W, .type = ARM_CP_NO_RAW, 4250 .writefn = tlbi_aa64_ipas2e1_write }, 4251 { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, 4252 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4253 .access = PL2_W, .type = ARM_CP_NO_RAW, 4254 .writefn = tlbi_aa64_ipas2e1_write }, 4255 { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, 4256 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4257 .access = PL2_W, .type = ARM_CP_NO_RAW, 4258 .writefn = tlbi_aa64_alle1_write }, 4259 { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, 4260 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, 4261 .access = PL2_W, .type = ARM_CP_NO_RAW, 4262 .writefn = tlbi_aa64_alle1is_write }, 4263 #ifndef CONFIG_USER_ONLY 4264 /* 64 bit address translation operations */ 4265 { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, 4266 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, 4267 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4268 { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, 4269 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, 4270 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4271 { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, 4272 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, 4273 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4274 { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, 4275 .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, 4276 .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4277 { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, 4278 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, 4279 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4280 { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, 4281 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, 4282 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4283 { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, 4284 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, 4285 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4286 { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, 4287 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, 4288 .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4289 /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ 4290 { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, 4291 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, 4292 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4293 { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, 4294 .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, 4295 .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4296 { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, 4297 .type = ARM_CP_ALIAS, 4298 .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, 4299 .access = PL1_RW, .resetvalue = 0, 4300 .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), 4301 .writefn = par_write }, 4302 #endif 4303 /* TLB invalidate last level of translation table walk */ 4304 { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, 4305 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write }, 4306 { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, 4307 .type = ARM_CP_NO_RAW, .access = PL1_W, 4308 .writefn = tlbimvaa_is_write }, 4309 { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, 4310 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write }, 4311 { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, 4312 .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write }, 4313 { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4314 .type = ARM_CP_NO_RAW, .access = PL2_W, 4315 .writefn = tlbimva_hyp_write }, 4316 { .name = "TLBIMVALHIS", 4317 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4318 .type = ARM_CP_NO_RAW, .access = PL2_W, 4319 .writefn = tlbimva_hyp_is_write }, 4320 { .name = "TLBIIPAS2", 4321 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, 4322 .type = ARM_CP_NO_RAW, .access = PL2_W, 4323 .writefn = tlbiipas2_write }, 4324 { .name = "TLBIIPAS2IS", 4325 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, 4326 .type = ARM_CP_NO_RAW, .access = PL2_W, 4327 .writefn = tlbiipas2_is_write }, 4328 { .name = "TLBIIPAS2L", 4329 .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, 4330 .type = ARM_CP_NO_RAW, .access = PL2_W, 4331 .writefn = tlbiipas2_write }, 4332 { .name = "TLBIIPAS2LIS", 4333 .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, 4334 .type = ARM_CP_NO_RAW, .access = PL2_W, 4335 .writefn = tlbiipas2_is_write }, 4336 /* 32 bit cache operations */ 4337 { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, 4338 .type = ARM_CP_NOP, .access = PL1_W }, 4339 { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, 4340 .type = ARM_CP_NOP, .access = PL1_W }, 4341 { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, 4342 .type = ARM_CP_NOP, .access = PL1_W }, 4343 { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, 4344 .type = ARM_CP_NOP, .access = PL1_W }, 4345 { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, 4346 .type = ARM_CP_NOP, .access = PL1_W }, 4347 { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, 4348 .type = ARM_CP_NOP, .access = PL1_W }, 4349 { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, 4350 .type = ARM_CP_NOP, .access = PL1_W }, 4351 { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, 4352 .type = ARM_CP_NOP, .access = PL1_W }, 4353 { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, 4354 .type = ARM_CP_NOP, .access = PL1_W }, 4355 { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, 4356 .type = ARM_CP_NOP, .access = PL1_W }, 4357 { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, 4358 .type = ARM_CP_NOP, .access = PL1_W }, 4359 { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, 4360 .type = ARM_CP_NOP, .access = PL1_W }, 4361 { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, 4362 .type = ARM_CP_NOP, .access = PL1_W }, 4363 /* MMU Domain access control / MPU write buffer control */ 4364 { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, 4365 .access = PL1_RW, .resetvalue = 0, 4366 .writefn = dacr_write, .raw_writefn = raw_write, 4367 .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), 4368 offsetoflow32(CPUARMState, cp15.dacr_ns) } }, 4369 { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, 4370 .type = ARM_CP_ALIAS, 4371 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, 4372 .access = PL1_RW, 4373 .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, 4374 { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, 4375 .type = ARM_CP_ALIAS, 4376 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, 4377 .access = PL1_RW, 4378 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, 4379 /* We rely on the access checks not allowing the guest to write to the 4380 * state field when SPSel indicates that it's being used as the stack 4381 * pointer. 4382 */ 4383 { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, 4384 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, 4385 .access = PL1_RW, .accessfn = sp_el0_access, 4386 .type = ARM_CP_ALIAS, 4387 .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, 4388 { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, 4389 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, 4390 .access = PL2_RW, .type = ARM_CP_ALIAS, 4391 .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, 4392 { .name = "SPSel", .state = ARM_CP_STATE_AA64, 4393 .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, 4394 .type = ARM_CP_NO_RAW, 4395 .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, 4396 { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, 4397 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, 4398 .type = ARM_CP_ALIAS, 4399 .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), 4400 .access = PL2_RW, .accessfn = fpexc32_access }, 4401 { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, 4402 .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, 4403 .access = PL2_RW, .resetvalue = 0, 4404 .writefn = dacr_write, .raw_writefn = raw_write, 4405 .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, 4406 { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, 4407 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, 4408 .access = PL2_RW, .resetvalue = 0, 4409 .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, 4410 { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, 4411 .type = ARM_CP_ALIAS, 4412 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, 4413 .access = PL2_RW, 4414 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, 4415 { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, 4416 .type = ARM_CP_ALIAS, 4417 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, 4418 .access = PL2_RW, 4419 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, 4420 { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, 4421 .type = ARM_CP_ALIAS, 4422 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, 4423 .access = PL2_RW, 4424 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, 4425 { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, 4426 .type = ARM_CP_ALIAS, 4427 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, 4428 .access = PL2_RW, 4429 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, 4430 { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, 4431 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, 4432 .resetvalue = 0, 4433 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, 4434 { .name = "SDCR", .type = ARM_CP_ALIAS, 4435 .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, 4436 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4437 .writefn = sdcr_write, 4438 .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, 4439 REGINFO_SENTINEL 4440 }; 4441 4442 /* Used to describe the behaviour of EL2 regs when EL2 does not exist. */ 4443 static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = { 4444 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 4445 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 4446 .access = PL2_RW, 4447 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore }, 4448 { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH, 4449 .type = ARM_CP_NO_RAW, 4450 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 4451 .access = PL2_RW, 4452 .type = ARM_CP_CONST, .resetvalue = 0 }, 4453 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 4454 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 4455 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4456 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 4457 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 4458 .access = PL2_RW, 4459 .type = ARM_CP_CONST, .resetvalue = 0 }, 4460 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 4461 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 4462 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4463 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 4464 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 4465 .access = PL2_RW, .type = ARM_CP_CONST, 4466 .resetvalue = 0 }, 4467 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 4468 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 4469 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4470 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 4471 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 4472 .access = PL2_RW, .type = ARM_CP_CONST, 4473 .resetvalue = 0 }, 4474 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 4475 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 4476 .access = PL2_RW, .type = ARM_CP_CONST, 4477 .resetvalue = 0 }, 4478 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 4479 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 4480 .access = PL2_RW, .type = ARM_CP_CONST, 4481 .resetvalue = 0 }, 4482 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 4483 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 4484 .access = PL2_RW, .type = ARM_CP_CONST, 4485 .resetvalue = 0 }, 4486 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 4487 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 4488 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4489 { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH, 4490 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 4491 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 4492 .type = ARM_CP_CONST, .resetvalue = 0 }, 4493 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 4494 .cp = 15, .opc1 = 6, .crm = 2, 4495 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4496 .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 }, 4497 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 4498 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 4499 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4500 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 4501 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 4502 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4503 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 4504 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 4505 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4506 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 4507 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 4508 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4509 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 4510 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 4511 .resetvalue = 0 }, 4512 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4513 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4514 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4515 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4516 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 4517 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4518 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 4519 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 4520 .resetvalue = 0 }, 4521 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 4522 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 4523 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4524 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 4525 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST, 4526 .resetvalue = 0 }, 4527 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4528 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4529 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4530 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4531 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4532 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4533 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4534 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4535 .access = PL2_RW, .accessfn = access_tda, 4536 .type = ARM_CP_CONST, .resetvalue = 0 }, 4537 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH, 4538 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4539 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 4540 .type = ARM_CP_CONST, .resetvalue = 0 }, 4541 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4542 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4543 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4544 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 4545 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 4546 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4547 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 4548 .type = ARM_CP_CONST, 4549 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 4550 .access = PL2_RW, .resetvalue = 0 }, 4551 REGINFO_SENTINEL 4552 }; 4553 4554 /* Ditto, but for registers which exist in ARMv8 but not v7 */ 4555 static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = { 4556 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 4557 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 4558 .access = PL2_RW, 4559 .type = ARM_CP_CONST, .resetvalue = 0 }, 4560 REGINFO_SENTINEL 4561 }; 4562 4563 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 4564 { 4565 ARMCPU *cpu = arm_env_get_cpu(env); 4566 uint64_t valid_mask = HCR_MASK; 4567 4568 if (arm_feature(env, ARM_FEATURE_EL3)) { 4569 valid_mask &= ~HCR_HCD; 4570 } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { 4571 /* Architecturally HCR.TSC is RES0 if EL3 is not implemented. 4572 * However, if we're using the SMC PSCI conduit then QEMU is 4573 * effectively acting like EL3 firmware and so the guest at 4574 * EL2 should retain the ability to prevent EL1 from being 4575 * able to make SMC calls into the ersatz firmware, so in 4576 * that case HCR.TSC should be read/write. 4577 */ 4578 valid_mask &= ~HCR_TSC; 4579 } 4580 if (cpu_isar_feature(aa64_lor, cpu)) { 4581 valid_mask |= HCR_TLOR; 4582 } 4583 if (cpu_isar_feature(aa64_pauth, cpu)) { 4584 valid_mask |= HCR_API | HCR_APK; 4585 } 4586 4587 /* Clear RES0 bits. */ 4588 value &= valid_mask; 4589 4590 /* These bits change the MMU setup: 4591 * HCR_VM enables stage 2 translation 4592 * HCR_PTW forbids certain page-table setups 4593 * HCR_DC Disables stage1 and enables stage2 translation 4594 */ 4595 if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) { 4596 tlb_flush(CPU(cpu)); 4597 } 4598 env->cp15.hcr_el2 = value; 4599 4600 /* 4601 * Updates to VI and VF require us to update the status of 4602 * virtual interrupts, which are the logical OR of these bits 4603 * and the state of the input lines from the GIC. (This requires 4604 * that we have the iothread lock, which is done by marking the 4605 * reginfo structs as ARM_CP_IO.) 4606 * Note that if a write to HCR pends a VIRQ or VFIQ it is never 4607 * possible for it to be taken immediately, because VIRQ and 4608 * VFIQ are masked unless running at EL0 or EL1, and HCR 4609 * can only be written at EL2. 4610 */ 4611 g_assert(qemu_mutex_iothread_locked()); 4612 arm_cpu_update_virq(cpu); 4613 arm_cpu_update_vfiq(cpu); 4614 } 4615 4616 static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, 4617 uint64_t value) 4618 { 4619 /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ 4620 value = deposit64(env->cp15.hcr_el2, 32, 32, value); 4621 hcr_write(env, NULL, value); 4622 } 4623 4624 static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, 4625 uint64_t value) 4626 { 4627 /* Handle HCR write, i.e. write to low half of HCR_EL2 */ 4628 value = deposit64(env->cp15.hcr_el2, 0, 32, value); 4629 hcr_write(env, NULL, value); 4630 } 4631 4632 /* 4633 * Return the effective value of HCR_EL2. 4634 * Bits that are not included here: 4635 * RW (read from SCR_EL3.RW as needed) 4636 */ 4637 uint64_t arm_hcr_el2_eff(CPUARMState *env) 4638 { 4639 uint64_t ret = env->cp15.hcr_el2; 4640 4641 if (arm_is_secure_below_el3(env)) { 4642 /* 4643 * "This register has no effect if EL2 is not enabled in the 4644 * current Security state". This is ARMv8.4-SecEL2 speak for 4645 * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). 4646 * 4647 * Prior to that, the language was "In an implementation that 4648 * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves 4649 * as if this field is 0 for all purposes other than a direct 4650 * read or write access of HCR_EL2". With lots of enumeration 4651 * on a per-field basis. In current QEMU, this is condition 4652 * is arm_is_secure_below_el3. 4653 * 4654 * Since the v8.4 language applies to the entire register, and 4655 * appears to be backward compatible, use that. 4656 */ 4657 ret = 0; 4658 } else if (ret & HCR_TGE) { 4659 /* These bits are up-to-date as of ARMv8.4. */ 4660 if (ret & HCR_E2H) { 4661 ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | 4662 HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | 4663 HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | 4664 HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE); 4665 } else { 4666 ret |= HCR_FMO | HCR_IMO | HCR_AMO; 4667 } 4668 ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | 4669 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | 4670 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | 4671 HCR_TLOR); 4672 } 4673 4674 return ret; 4675 } 4676 4677 static const ARMCPRegInfo el2_cp_reginfo[] = { 4678 { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, 4679 .type = ARM_CP_IO, 4680 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 4681 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 4682 .writefn = hcr_write }, 4683 { .name = "HCR", .state = ARM_CP_STATE_AA32, 4684 .type = ARM_CP_ALIAS | ARM_CP_IO, 4685 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, 4686 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), 4687 .writefn = hcr_writelow }, 4688 { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, 4689 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, 4690 .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, 4691 { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, 4692 .type = ARM_CP_ALIAS, 4693 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, 4694 .access = PL2_RW, 4695 .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, 4696 { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, 4697 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, 4698 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, 4699 { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, 4700 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, 4701 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, 4702 { .name = "HIFAR", .state = ARM_CP_STATE_AA32, 4703 .type = ARM_CP_ALIAS, 4704 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, 4705 .access = PL2_RW, 4706 .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, 4707 { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, 4708 .type = ARM_CP_ALIAS, 4709 .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, 4710 .access = PL2_RW, 4711 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, 4712 { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, 4713 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, 4714 .access = PL2_RW, .writefn = vbar_write, 4715 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), 4716 .resetvalue = 0 }, 4717 { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, 4718 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, 4719 .access = PL3_RW, .type = ARM_CP_ALIAS, 4720 .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, 4721 { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, 4722 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, 4723 .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, 4724 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) }, 4725 { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, 4726 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, 4727 .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), 4728 .resetvalue = 0 }, 4729 { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, 4730 .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, 4731 .access = PL2_RW, .type = ARM_CP_ALIAS, 4732 .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, 4733 { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, 4734 .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, 4735 .access = PL2_RW, .type = ARM_CP_CONST, 4736 .resetvalue = 0 }, 4737 /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ 4738 { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, 4739 .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, 4740 .access = PL2_RW, .type = ARM_CP_CONST, 4741 .resetvalue = 0 }, 4742 { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, 4743 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, 4744 .access = PL2_RW, .type = ARM_CP_CONST, 4745 .resetvalue = 0 }, 4746 { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, 4747 .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, 4748 .access = PL2_RW, .type = ARM_CP_CONST, 4749 .resetvalue = 0 }, 4750 { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, 4751 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, 4752 .access = PL2_RW, 4753 /* no .writefn needed as this can't cause an ASID change; 4754 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 4755 */ 4756 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, 4757 { .name = "VTCR", .state = ARM_CP_STATE_AA32, 4758 .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 4759 .type = ARM_CP_ALIAS, 4760 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4761 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 4762 { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, 4763 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, 4764 .access = PL2_RW, 4765 /* no .writefn needed as this can't cause an ASID change; 4766 * no .raw_writefn or .resetfn needed as we never use mask/base_mask 4767 */ 4768 .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, 4769 { .name = "VTTBR", .state = ARM_CP_STATE_AA32, 4770 .cp = 15, .opc1 = 6, .crm = 2, 4771 .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4772 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4773 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), 4774 .writefn = vttbr_write }, 4775 { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, 4776 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, 4777 .access = PL2_RW, .writefn = vttbr_write, 4778 .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, 4779 { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, 4780 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, 4781 .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, 4782 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, 4783 { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, 4784 .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, 4785 .access = PL2_RW, .resetvalue = 0, 4786 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, 4787 { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, 4788 .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, 4789 .access = PL2_RW, .resetvalue = 0, 4790 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 4791 { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, 4792 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, 4793 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, 4794 { .name = "TLBIALLNSNH", 4795 .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, 4796 .type = ARM_CP_NO_RAW, .access = PL2_W, 4797 .writefn = tlbiall_nsnh_write }, 4798 { .name = "TLBIALLNSNHIS", 4799 .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, 4800 .type = ARM_CP_NO_RAW, .access = PL2_W, 4801 .writefn = tlbiall_nsnh_is_write }, 4802 { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4803 .type = ARM_CP_NO_RAW, .access = PL2_W, 4804 .writefn = tlbiall_hyp_write }, 4805 { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4806 .type = ARM_CP_NO_RAW, .access = PL2_W, 4807 .writefn = tlbiall_hyp_is_write }, 4808 { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4809 .type = ARM_CP_NO_RAW, .access = PL2_W, 4810 .writefn = tlbimva_hyp_write }, 4811 { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4812 .type = ARM_CP_NO_RAW, .access = PL2_W, 4813 .writefn = tlbimva_hyp_is_write }, 4814 { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, 4815 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, 4816 .type = ARM_CP_NO_RAW, .access = PL2_W, 4817 .writefn = tlbi_aa64_alle2_write }, 4818 { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, 4819 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, 4820 .type = ARM_CP_NO_RAW, .access = PL2_W, 4821 .writefn = tlbi_aa64_vae2_write }, 4822 { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, 4823 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, 4824 .access = PL2_W, .type = ARM_CP_NO_RAW, 4825 .writefn = tlbi_aa64_vae2_write }, 4826 { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, 4827 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, 4828 .access = PL2_W, .type = ARM_CP_NO_RAW, 4829 .writefn = tlbi_aa64_alle2is_write }, 4830 { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, 4831 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, 4832 .type = ARM_CP_NO_RAW, .access = PL2_W, 4833 .writefn = tlbi_aa64_vae2is_write }, 4834 { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, 4835 .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, 4836 .access = PL2_W, .type = ARM_CP_NO_RAW, 4837 .writefn = tlbi_aa64_vae2is_write }, 4838 #ifndef CONFIG_USER_ONLY 4839 /* Unlike the other EL2-related AT operations, these must 4840 * UNDEF from EL3 if EL2 is not implemented, which is why we 4841 * define them here rather than with the rest of the AT ops. 4842 */ 4843 { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, 4844 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4845 .access = PL2_W, .accessfn = at_s1e2_access, 4846 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4847 { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, 4848 .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4849 .access = PL2_W, .accessfn = at_s1e2_access, 4850 .type = ARM_CP_NO_RAW, .writefn = ats_write64 }, 4851 /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE 4852 * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 4853 * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose 4854 * to behave as if SCR.NS was 1. 4855 */ 4856 { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, 4857 .access = PL2_W, 4858 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4859 { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, 4860 .access = PL2_W, 4861 .writefn = ats1h_write, .type = ARM_CP_NO_RAW }, 4862 { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, 4863 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, 4864 /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the 4865 * reset values as IMPDEF. We choose to reset to 3 to comply with 4866 * both ARMv7 and ARMv8. 4867 */ 4868 .access = PL2_RW, .resetvalue = 3, 4869 .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, 4870 { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, 4871 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, 4872 .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, 4873 .writefn = gt_cntvoff_write, 4874 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4875 { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, 4876 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, 4877 .writefn = gt_cntvoff_write, 4878 .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, 4879 { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, 4880 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, 4881 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4882 .type = ARM_CP_IO, .access = PL2_RW, 4883 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4884 { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, 4885 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), 4886 .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, 4887 .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, 4888 { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, 4889 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, 4890 .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, 4891 .resetfn = gt_hyp_timer_reset, 4892 .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, 4893 { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, 4894 .type = ARM_CP_IO, 4895 .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, 4896 .access = PL2_RW, 4897 .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), 4898 .resetvalue = 0, 4899 .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, 4900 #endif 4901 /* The only field of MDCR_EL2 that has a defined architectural reset value 4902 * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we 4903 * don't implement any PMU event counters, so using zero as a reset 4904 * value for MDCR_EL2 is okay 4905 */ 4906 { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, 4907 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, 4908 .access = PL2_RW, .resetvalue = 0, 4909 .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), }, 4910 { .name = "HPFAR", .state = ARM_CP_STATE_AA32, 4911 .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4912 .access = PL2_RW, .accessfn = access_el3_aa32ns, 4913 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4914 { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, 4915 .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, 4916 .access = PL2_RW, 4917 .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, 4918 { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, 4919 .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, 4920 .access = PL2_RW, 4921 .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, 4922 REGINFO_SENTINEL 4923 }; 4924 4925 static const ARMCPRegInfo el2_v8_cp_reginfo[] = { 4926 { .name = "HCR2", .state = ARM_CP_STATE_AA32, 4927 .type = ARM_CP_ALIAS | ARM_CP_IO, 4928 .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, 4929 .access = PL2_RW, 4930 .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), 4931 .writefn = hcr_writehigh }, 4932 REGINFO_SENTINEL 4933 }; 4934 4935 static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, 4936 bool isread) 4937 { 4938 /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. 4939 * At Secure EL1 it traps to EL3. 4940 */ 4941 if (arm_current_el(env) == 3) { 4942 return CP_ACCESS_OK; 4943 } 4944 if (arm_is_secure_below_el3(env)) { 4945 return CP_ACCESS_TRAP_EL3; 4946 } 4947 /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ 4948 if (isread) { 4949 return CP_ACCESS_OK; 4950 } 4951 return CP_ACCESS_TRAP_UNCATEGORIZED; 4952 } 4953 4954 static const ARMCPRegInfo el3_cp_reginfo[] = { 4955 { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, 4956 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, 4957 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), 4958 .resetvalue = 0, .writefn = scr_write }, 4959 { .name = "SCR", .type = ARM_CP_ALIAS, 4960 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, 4961 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4962 .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), 4963 .writefn = scr_write }, 4964 { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, 4965 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, 4966 .access = PL3_RW, .resetvalue = 0, 4967 .fieldoffset = offsetof(CPUARMState, cp15.sder) }, 4968 { .name = "SDER", 4969 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, 4970 .access = PL3_RW, .resetvalue = 0, 4971 .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, 4972 { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 4973 .access = PL1_RW, .accessfn = access_trap_aa32s_el1, 4974 .writefn = vbar_write, .resetvalue = 0, 4975 .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, 4976 { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, 4977 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, 4978 .access = PL3_RW, .resetvalue = 0, 4979 .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, 4980 { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, 4981 .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, 4982 .access = PL3_RW, 4983 /* no .writefn needed as this can't cause an ASID change; 4984 * we must provide a .raw_writefn and .resetfn because we handle 4985 * reset and migration for the AArch32 TTBCR(S), which might be 4986 * using mask and base_mask. 4987 */ 4988 .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write, 4989 .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, 4990 { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, 4991 .type = ARM_CP_ALIAS, 4992 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, 4993 .access = PL3_RW, 4994 .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, 4995 { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, 4996 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, 4997 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, 4998 { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, 4999 .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, 5000 .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, 5001 { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, 5002 .type = ARM_CP_ALIAS, 5003 .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, 5004 .access = PL3_RW, 5005 .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, 5006 { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, 5007 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, 5008 .access = PL3_RW, .writefn = vbar_write, 5009 .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), 5010 .resetvalue = 0 }, 5011 { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, 5012 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, 5013 .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, 5014 .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, 5015 { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, 5016 .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, 5017 .access = PL3_RW, .resetvalue = 0, 5018 .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, 5019 { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, 5020 .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, 5021 .access = PL3_RW, .type = ARM_CP_CONST, 5022 .resetvalue = 0 }, 5023 { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, 5024 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, 5025 .access = PL3_RW, .type = ARM_CP_CONST, 5026 .resetvalue = 0 }, 5027 { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, 5028 .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, 5029 .access = PL3_RW, .type = ARM_CP_CONST, 5030 .resetvalue = 0 }, 5031 { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, 5032 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, 5033 .access = PL3_W, .type = ARM_CP_NO_RAW, 5034 .writefn = tlbi_aa64_alle3is_write }, 5035 { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, 5036 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, 5037 .access = PL3_W, .type = ARM_CP_NO_RAW, 5038 .writefn = tlbi_aa64_vae3is_write }, 5039 { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, 5040 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, 5041 .access = PL3_W, .type = ARM_CP_NO_RAW, 5042 .writefn = tlbi_aa64_vae3is_write }, 5043 { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, 5044 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, 5045 .access = PL3_W, .type = ARM_CP_NO_RAW, 5046 .writefn = tlbi_aa64_alle3_write }, 5047 { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, 5048 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, 5049 .access = PL3_W, .type = ARM_CP_NO_RAW, 5050 .writefn = tlbi_aa64_vae3_write }, 5051 { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, 5052 .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, 5053 .access = PL3_W, .type = ARM_CP_NO_RAW, 5054 .writefn = tlbi_aa64_vae3_write }, 5055 REGINFO_SENTINEL 5056 }; 5057 5058 static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, 5059 bool isread) 5060 { 5061 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64, 5062 * but the AArch32 CTR has its own reginfo struct) 5063 */ 5064 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { 5065 return CP_ACCESS_TRAP; 5066 } 5067 return CP_ACCESS_OK; 5068 } 5069 5070 static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri, 5071 uint64_t value) 5072 { 5073 /* Writes to OSLAR_EL1 may update the OS lock status, which can be 5074 * read via a bit in OSLSR_EL1. 5075 */ 5076 int oslock; 5077 5078 if (ri->state == ARM_CP_STATE_AA32) { 5079 oslock = (value == 0xC5ACCE55); 5080 } else { 5081 oslock = value & 1; 5082 } 5083 5084 env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock); 5085 } 5086 5087 static const ARMCPRegInfo debug_cp_reginfo[] = { 5088 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped 5089 * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1; 5090 * unlike DBGDRAR it is never accessible from EL0. 5091 * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64 5092 * accessor. 5093 */ 5094 { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0, 5095 .access = PL0_R, .accessfn = access_tdra, 5096 .type = ARM_CP_CONST, .resetvalue = 0 }, 5097 { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64, 5098 .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 5099 .access = PL1_R, .accessfn = access_tdra, 5100 .type = ARM_CP_CONST, .resetvalue = 0 }, 5101 { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, 5102 .access = PL0_R, .accessfn = access_tdra, 5103 .type = ARM_CP_CONST, .resetvalue = 0 }, 5104 /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */ 5105 { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH, 5106 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 5107 .access = PL1_RW, .accessfn = access_tda, 5108 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), 5109 .resetvalue = 0 }, 5110 /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1. 5111 * We don't implement the configurable EL0 access. 5112 */ 5113 { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH, 5114 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 5115 .type = ARM_CP_ALIAS, 5116 .access = PL1_R, .accessfn = access_tda, 5117 .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), }, 5118 { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH, 5119 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4, 5120 .access = PL1_W, .type = ARM_CP_NO_RAW, 5121 .accessfn = access_tdosa, 5122 .writefn = oslar_write }, 5123 { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH, 5124 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4, 5125 .access = PL1_R, .resetvalue = 10, 5126 .accessfn = access_tdosa, 5127 .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) }, 5128 /* Dummy OSDLR_EL1: 32-bit Linux will read this */ 5129 { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH, 5130 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4, 5131 .access = PL1_RW, .accessfn = access_tdosa, 5132 .type = ARM_CP_NOP }, 5133 /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't 5134 * implement vector catch debug events yet. 5135 */ 5136 { .name = "DBGVCR", 5137 .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 5138 .access = PL1_RW, .accessfn = access_tda, 5139 .type = ARM_CP_NOP }, 5140 /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor 5141 * to save and restore a 32-bit guest's DBGVCR) 5142 */ 5143 { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, 5144 .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, 5145 .access = PL2_RW, .accessfn = access_tda, 5146 .type = ARM_CP_NOP }, 5147 /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications 5148 * Channel but Linux may try to access this register. The 32-bit 5149 * alias is DBGDCCINT. 5150 */ 5151 { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH, 5152 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 5153 .access = PL1_RW, .accessfn = access_tda, 5154 .type = ARM_CP_NOP }, 5155 REGINFO_SENTINEL 5156 }; 5157 5158 static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { 5159 /* 64 bit access versions of the (dummy) debug registers */ 5160 { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, 5161 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 5162 { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0, 5163 .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 }, 5164 REGINFO_SENTINEL 5165 }; 5166 5167 /* Return the exception level to which exceptions should be taken 5168 * via SVEAccessTrap. If an exception should be routed through 5169 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should 5170 * take care of raising that exception. 5171 * C.f. the ARM pseudocode function CheckSVEEnabled. 5172 */ 5173 int sve_exception_el(CPUARMState *env, int el) 5174 { 5175 #ifndef CONFIG_USER_ONLY 5176 if (el <= 1) { 5177 bool disabled = false; 5178 5179 /* The CPACR.ZEN controls traps to EL1: 5180 * 0, 2 : trap EL0 and EL1 accesses 5181 * 1 : trap only EL0 accesses 5182 * 3 : trap no accesses 5183 */ 5184 if (!extract32(env->cp15.cpacr_el1, 16, 1)) { 5185 disabled = true; 5186 } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) { 5187 disabled = el == 0; 5188 } 5189 if (disabled) { 5190 /* route_to_el2 */ 5191 return (arm_feature(env, ARM_FEATURE_EL2) 5192 && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1); 5193 } 5194 5195 /* Check CPACR.FPEN. */ 5196 if (!extract32(env->cp15.cpacr_el1, 20, 1)) { 5197 disabled = true; 5198 } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) { 5199 disabled = el == 0; 5200 } 5201 if (disabled) { 5202 return 0; 5203 } 5204 } 5205 5206 /* CPTR_EL2. Since TZ and TFP are positive, 5207 * they will be zero when EL2 is not present. 5208 */ 5209 if (el <= 2 && !arm_is_secure_below_el3(env)) { 5210 if (env->cp15.cptr_el[2] & CPTR_TZ) { 5211 return 2; 5212 } 5213 if (env->cp15.cptr_el[2] & CPTR_TFP) { 5214 return 0; 5215 } 5216 } 5217 5218 /* CPTR_EL3. Since EZ is negative we must check for EL3. */ 5219 if (arm_feature(env, ARM_FEATURE_EL3) 5220 && !(env->cp15.cptr_el[3] & CPTR_EZ)) { 5221 return 3; 5222 } 5223 #endif 5224 return 0; 5225 } 5226 5227 /* 5228 * Given that SVE is enabled, return the vector length for EL. 5229 */ 5230 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 5231 { 5232 ARMCPU *cpu = arm_env_get_cpu(env); 5233 uint32_t zcr_len = cpu->sve_max_vq - 1; 5234 5235 if (el <= 1) { 5236 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]); 5237 } 5238 if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) { 5239 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]); 5240 } 5241 if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) { 5242 zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]); 5243 } 5244 return zcr_len; 5245 } 5246 5247 static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5248 uint64_t value) 5249 { 5250 int cur_el = arm_current_el(env); 5251 int old_len = sve_zcr_len_for_el(env, cur_el); 5252 int new_len; 5253 5254 /* Bits other than [3:0] are RAZ/WI. */ 5255 raw_write(env, ri, value & 0xf); 5256 5257 /* 5258 * Because we arrived here, we know both FP and SVE are enabled; 5259 * otherwise we would have trapped access to the ZCR_ELn register. 5260 */ 5261 new_len = sve_zcr_len_for_el(env, cur_el); 5262 if (new_len < old_len) { 5263 aarch64_sve_narrow_vq(env, new_len + 1); 5264 } 5265 } 5266 5267 static const ARMCPRegInfo zcr_el1_reginfo = { 5268 .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, 5269 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, 5270 .access = PL1_RW, .type = ARM_CP_SVE, 5271 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), 5272 .writefn = zcr_write, .raw_writefn = raw_write 5273 }; 5274 5275 static const ARMCPRegInfo zcr_el2_reginfo = { 5276 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 5277 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 5278 .access = PL2_RW, .type = ARM_CP_SVE, 5279 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), 5280 .writefn = zcr_write, .raw_writefn = raw_write 5281 }; 5282 5283 static const ARMCPRegInfo zcr_no_el2_reginfo = { 5284 .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, 5285 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, 5286 .access = PL2_RW, .type = ARM_CP_SVE, 5287 .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore 5288 }; 5289 5290 static const ARMCPRegInfo zcr_el3_reginfo = { 5291 .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, 5292 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, 5293 .access = PL3_RW, .type = ARM_CP_SVE, 5294 .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), 5295 .writefn = zcr_write, .raw_writefn = raw_write 5296 }; 5297 5298 void hw_watchpoint_update(ARMCPU *cpu, int n) 5299 { 5300 CPUARMState *env = &cpu->env; 5301 vaddr len = 0; 5302 vaddr wvr = env->cp15.dbgwvr[n]; 5303 uint64_t wcr = env->cp15.dbgwcr[n]; 5304 int mask; 5305 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 5306 5307 if (env->cpu_watchpoint[n]) { 5308 cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]); 5309 env->cpu_watchpoint[n] = NULL; 5310 } 5311 5312 if (!extract64(wcr, 0, 1)) { 5313 /* E bit clear : watchpoint disabled */ 5314 return; 5315 } 5316 5317 switch (extract64(wcr, 3, 2)) { 5318 case 0: 5319 /* LSC 00 is reserved and must behave as if the wp is disabled */ 5320 return; 5321 case 1: 5322 flags |= BP_MEM_READ; 5323 break; 5324 case 2: 5325 flags |= BP_MEM_WRITE; 5326 break; 5327 case 3: 5328 flags |= BP_MEM_ACCESS; 5329 break; 5330 } 5331 5332 /* Attempts to use both MASK and BAS fields simultaneously are 5333 * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case, 5334 * thus generating a watchpoint for every byte in the masked region. 5335 */ 5336 mask = extract64(wcr, 24, 4); 5337 if (mask == 1 || mask == 2) { 5338 /* Reserved values of MASK; we must act as if the mask value was 5339 * some non-reserved value, or as if the watchpoint were disabled. 5340 * We choose the latter. 5341 */ 5342 return; 5343 } else if (mask) { 5344 /* Watchpoint covers an aligned area up to 2GB in size */ 5345 len = 1ULL << mask; 5346 /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE 5347 * whether the watchpoint fires when the unmasked bits match; we opt 5348 * to generate the exceptions. 5349 */ 5350 wvr &= ~(len - 1); 5351 } else { 5352 /* Watchpoint covers bytes defined by the byte address select bits */ 5353 int bas = extract64(wcr, 5, 8); 5354 int basstart; 5355 5356 if (bas == 0) { 5357 /* This must act as if the watchpoint is disabled */ 5358 return; 5359 } 5360 5361 if (extract64(wvr, 2, 1)) { 5362 /* Deprecated case of an only 4-aligned address. BAS[7:4] are 5363 * ignored, and BAS[3:0] define which bytes to watch. 5364 */ 5365 bas &= 0xf; 5366 } 5367 /* The BAS bits are supposed to be programmed to indicate a contiguous 5368 * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether 5369 * we fire for each byte in the word/doubleword addressed by the WVR. 5370 * We choose to ignore any non-zero bits after the first range of 1s. 5371 */ 5372 basstart = ctz32(bas); 5373 len = cto32(bas >> basstart); 5374 wvr += basstart; 5375 } 5376 5377 cpu_watchpoint_insert(CPU(cpu), wvr, len, flags, 5378 &env->cpu_watchpoint[n]); 5379 } 5380 5381 void hw_watchpoint_update_all(ARMCPU *cpu) 5382 { 5383 int i; 5384 CPUARMState *env = &cpu->env; 5385 5386 /* Completely clear out existing QEMU watchpoints and our array, to 5387 * avoid possible stale entries following migration load. 5388 */ 5389 cpu_watchpoint_remove_all(CPU(cpu), BP_CPU); 5390 memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint)); 5391 5392 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) { 5393 hw_watchpoint_update(cpu, i); 5394 } 5395 } 5396 5397 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5398 uint64_t value) 5399 { 5400 ARMCPU *cpu = arm_env_get_cpu(env); 5401 int i = ri->crm; 5402 5403 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the 5404 * register reads and behaves as if values written are sign extended. 5405 * Bits [1:0] are RES0. 5406 */ 5407 value = sextract64(value, 0, 49) & ~3ULL; 5408 5409 raw_write(env, ri, value); 5410 hw_watchpoint_update(cpu, i); 5411 } 5412 5413 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5414 uint64_t value) 5415 { 5416 ARMCPU *cpu = arm_env_get_cpu(env); 5417 int i = ri->crm; 5418 5419 raw_write(env, ri, value); 5420 hw_watchpoint_update(cpu, i); 5421 } 5422 5423 void hw_breakpoint_update(ARMCPU *cpu, int n) 5424 { 5425 CPUARMState *env = &cpu->env; 5426 uint64_t bvr = env->cp15.dbgbvr[n]; 5427 uint64_t bcr = env->cp15.dbgbcr[n]; 5428 vaddr addr; 5429 int bt; 5430 int flags = BP_CPU; 5431 5432 if (env->cpu_breakpoint[n]) { 5433 cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]); 5434 env->cpu_breakpoint[n] = NULL; 5435 } 5436 5437 if (!extract64(bcr, 0, 1)) { 5438 /* E bit clear : watchpoint disabled */ 5439 return; 5440 } 5441 5442 bt = extract64(bcr, 20, 4); 5443 5444 switch (bt) { 5445 case 4: /* unlinked address mismatch (reserved if AArch64) */ 5446 case 5: /* linked address mismatch (reserved if AArch64) */ 5447 qemu_log_mask(LOG_UNIMP, 5448 "arm: address mismatch breakpoint types not implemented\n"); 5449 return; 5450 case 0: /* unlinked address match */ 5451 case 1: /* linked address match */ 5452 { 5453 /* Bits [63:49] are hardwired to the value of bit [48]; that is, 5454 * we behave as if the register was sign extended. Bits [1:0] are 5455 * RES0. The BAS field is used to allow setting breakpoints on 16 5456 * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether 5457 * a bp will fire if the addresses covered by the bp and the addresses 5458 * covered by the insn overlap but the insn doesn't start at the 5459 * start of the bp address range. We choose to require the insn and 5460 * the bp to have the same address. The constraints on writing to 5461 * BAS enforced in dbgbcr_write mean we have only four cases: 5462 * 0b0000 => no breakpoint 5463 * 0b0011 => breakpoint on addr 5464 * 0b1100 => breakpoint on addr + 2 5465 * 0b1111 => breakpoint on addr 5466 * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c). 5467 */ 5468 int bas = extract64(bcr, 5, 4); 5469 addr = sextract64(bvr, 0, 49) & ~3ULL; 5470 if (bas == 0) { 5471 return; 5472 } 5473 if (bas == 0xc) { 5474 addr += 2; 5475 } 5476 break; 5477 } 5478 case 2: /* unlinked context ID match */ 5479 case 8: /* unlinked VMID match (reserved if no EL2) */ 5480 case 10: /* unlinked context ID and VMID match (reserved if no EL2) */ 5481 qemu_log_mask(LOG_UNIMP, 5482 "arm: unlinked context breakpoint types not implemented\n"); 5483 return; 5484 case 9: /* linked VMID match (reserved if no EL2) */ 5485 case 11: /* linked context ID and VMID match (reserved if no EL2) */ 5486 case 3: /* linked context ID match */ 5487 default: 5488 /* We must generate no events for Linked context matches (unless 5489 * they are linked to by some other bp/wp, which is handled in 5490 * updates for the linking bp/wp). We choose to also generate no events 5491 * for reserved values. 5492 */ 5493 return; 5494 } 5495 5496 cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]); 5497 } 5498 5499 void hw_breakpoint_update_all(ARMCPU *cpu) 5500 { 5501 int i; 5502 CPUARMState *env = &cpu->env; 5503 5504 /* Completely clear out existing QEMU breakpoints and our array, to 5505 * avoid possible stale entries following migration load. 5506 */ 5507 cpu_breakpoint_remove_all(CPU(cpu), BP_CPU); 5508 memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint)); 5509 5510 for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) { 5511 hw_breakpoint_update(cpu, i); 5512 } 5513 } 5514 5515 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5516 uint64_t value) 5517 { 5518 ARMCPU *cpu = arm_env_get_cpu(env); 5519 int i = ri->crm; 5520 5521 raw_write(env, ri, value); 5522 hw_breakpoint_update(cpu, i); 5523 } 5524 5525 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5526 uint64_t value) 5527 { 5528 ARMCPU *cpu = arm_env_get_cpu(env); 5529 int i = ri->crm; 5530 5531 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only 5532 * copy of BAS[0]. 5533 */ 5534 value = deposit64(value, 6, 1, extract64(value, 5, 1)); 5535 value = deposit64(value, 8, 1, extract64(value, 7, 1)); 5536 5537 raw_write(env, ri, value); 5538 hw_breakpoint_update(cpu, i); 5539 } 5540 5541 static void define_debug_regs(ARMCPU *cpu) 5542 { 5543 /* Define v7 and v8 architectural debug registers. 5544 * These are just dummy implementations for now. 5545 */ 5546 int i; 5547 int wrps, brps, ctx_cmps; 5548 ARMCPRegInfo dbgdidr = { 5549 .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, 5550 .access = PL0_R, .accessfn = access_tda, 5551 .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, 5552 }; 5553 5554 /* Note that all these register fields hold "number of Xs minus 1". */ 5555 brps = extract32(cpu->dbgdidr, 24, 4); 5556 wrps = extract32(cpu->dbgdidr, 28, 4); 5557 ctx_cmps = extract32(cpu->dbgdidr, 20, 4); 5558 5559 assert(ctx_cmps <= brps); 5560 5561 /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties 5562 * of the debug registers such as number of breakpoints; 5563 * check that if they both exist then they agree. 5564 */ 5565 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { 5566 assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); 5567 assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); 5568 assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); 5569 } 5570 5571 define_one_arm_cp_reg(cpu, &dbgdidr); 5572 define_arm_cp_regs(cpu, debug_cp_reginfo); 5573 5574 if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { 5575 define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); 5576 } 5577 5578 for (i = 0; i < brps + 1; i++) { 5579 ARMCPRegInfo dbgregs[] = { 5580 { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, 5581 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, 5582 .access = PL1_RW, .accessfn = access_tda, 5583 .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]), 5584 .writefn = dbgbvr_write, .raw_writefn = raw_write 5585 }, 5586 { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH, 5587 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5, 5588 .access = PL1_RW, .accessfn = access_tda, 5589 .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]), 5590 .writefn = dbgbcr_write, .raw_writefn = raw_write 5591 }, 5592 REGINFO_SENTINEL 5593 }; 5594 define_arm_cp_regs(cpu, dbgregs); 5595 } 5596 5597 for (i = 0; i < wrps + 1; i++) { 5598 ARMCPRegInfo dbgregs[] = { 5599 { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, 5600 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, 5601 .access = PL1_RW, .accessfn = access_tda, 5602 .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]), 5603 .writefn = dbgwvr_write, .raw_writefn = raw_write 5604 }, 5605 { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH, 5606 .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7, 5607 .access = PL1_RW, .accessfn = access_tda, 5608 .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]), 5609 .writefn = dbgwcr_write, .raw_writefn = raw_write 5610 }, 5611 REGINFO_SENTINEL 5612 }; 5613 define_arm_cp_regs(cpu, dbgregs); 5614 } 5615 } 5616 5617 /* We don't know until after realize whether there's a GICv3 5618 * attached, and that is what registers the gicv3 sysregs. 5619 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 5620 * at runtime. 5621 */ 5622 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 5623 { 5624 ARMCPU *cpu = arm_env_get_cpu(env); 5625 uint64_t pfr1 = cpu->id_pfr1; 5626 5627 if (env->gicv3state) { 5628 pfr1 |= 1 << 28; 5629 } 5630 return pfr1; 5631 } 5632 5633 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 5634 { 5635 ARMCPU *cpu = arm_env_get_cpu(env); 5636 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 5637 5638 if (env->gicv3state) { 5639 pfr0 |= 1 << 24; 5640 } 5641 return pfr0; 5642 } 5643 5644 /* Shared logic between LORID and the rest of the LOR* registers. 5645 * Secure state has already been delt with. 5646 */ 5647 static CPAccessResult access_lor_ns(CPUARMState *env) 5648 { 5649 int el = arm_current_el(env); 5650 5651 if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { 5652 return CP_ACCESS_TRAP_EL2; 5653 } 5654 if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { 5655 return CP_ACCESS_TRAP_EL3; 5656 } 5657 return CP_ACCESS_OK; 5658 } 5659 5660 static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri, 5661 bool isread) 5662 { 5663 if (arm_is_secure_below_el3(env)) { 5664 /* Access ok in secure mode. */ 5665 return CP_ACCESS_OK; 5666 } 5667 return access_lor_ns(env); 5668 } 5669 5670 static CPAccessResult access_lor_other(CPUARMState *env, 5671 const ARMCPRegInfo *ri, bool isread) 5672 { 5673 if (arm_is_secure_below_el3(env)) { 5674 /* Access denied in secure mode. */ 5675 return CP_ACCESS_TRAP; 5676 } 5677 return access_lor_ns(env); 5678 } 5679 5680 #ifdef TARGET_AARCH64 5681 static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, 5682 bool isread) 5683 { 5684 int el = arm_current_el(env); 5685 5686 if (el < 2 && 5687 arm_feature(env, ARM_FEATURE_EL2) && 5688 !(arm_hcr_el2_eff(env) & HCR_APK)) { 5689 return CP_ACCESS_TRAP_EL2; 5690 } 5691 if (el < 3 && 5692 arm_feature(env, ARM_FEATURE_EL3) && 5693 !(env->cp15.scr_el3 & SCR_APK)) { 5694 return CP_ACCESS_TRAP_EL3; 5695 } 5696 return CP_ACCESS_OK; 5697 } 5698 5699 static const ARMCPRegInfo pauth_reginfo[] = { 5700 { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5701 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, 5702 .access = PL1_RW, .accessfn = access_pauth, 5703 .fieldoffset = offsetof(CPUARMState, apda_key.lo) }, 5704 { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5705 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, 5706 .access = PL1_RW, .accessfn = access_pauth, 5707 .fieldoffset = offsetof(CPUARMState, apda_key.hi) }, 5708 { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5709 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, 5710 .access = PL1_RW, .accessfn = access_pauth, 5711 .fieldoffset = offsetof(CPUARMState, apdb_key.lo) }, 5712 { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5713 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, 5714 .access = PL1_RW, .accessfn = access_pauth, 5715 .fieldoffset = offsetof(CPUARMState, apdb_key.hi) }, 5716 { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5717 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, 5718 .access = PL1_RW, .accessfn = access_pauth, 5719 .fieldoffset = offsetof(CPUARMState, apga_key.lo) }, 5720 { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5721 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, 5722 .access = PL1_RW, .accessfn = access_pauth, 5723 .fieldoffset = offsetof(CPUARMState, apga_key.hi) }, 5724 { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5725 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, 5726 .access = PL1_RW, .accessfn = access_pauth, 5727 .fieldoffset = offsetof(CPUARMState, apia_key.lo) }, 5728 { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5729 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, 5730 .access = PL1_RW, .accessfn = access_pauth, 5731 .fieldoffset = offsetof(CPUARMState, apia_key.hi) }, 5732 { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, 5733 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, 5734 .access = PL1_RW, .accessfn = access_pauth, 5735 .fieldoffset = offsetof(CPUARMState, apib_key.lo) }, 5736 { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, 5737 .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, 5738 .access = PL1_RW, .accessfn = access_pauth, 5739 .fieldoffset = offsetof(CPUARMState, apib_key.hi) }, 5740 REGINFO_SENTINEL 5741 }; 5742 #endif 5743 5744 void register_cp_regs_for_features(ARMCPU *cpu) 5745 { 5746 /* Register all the coprocessor registers based on feature bits */ 5747 CPUARMState *env = &cpu->env; 5748 if (arm_feature(env, ARM_FEATURE_M)) { 5749 /* M profile has no coprocessor registers */ 5750 return; 5751 } 5752 5753 define_arm_cp_regs(cpu, cp_reginfo); 5754 if (!arm_feature(env, ARM_FEATURE_V8)) { 5755 /* Must go early as it is full of wildcards that may be 5756 * overridden by later definitions. 5757 */ 5758 define_arm_cp_regs(cpu, not_v8_cp_reginfo); 5759 } 5760 5761 if (arm_feature(env, ARM_FEATURE_V6)) { 5762 /* The ID registers all have impdef reset values */ 5763 ARMCPRegInfo v6_idregs[] = { 5764 { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, 5765 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, 5766 .access = PL1_R, .type = ARM_CP_CONST, 5767 .resetvalue = cpu->id_pfr0 }, 5768 /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know 5769 * the value of the GIC field until after we define these regs. 5770 */ 5771 { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, 5772 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, 5773 .access = PL1_R, .type = ARM_CP_NO_RAW, 5774 .readfn = id_pfr1_read, 5775 .writefn = arm_cp_write_ignore }, 5776 { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, 5777 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, 5778 .access = PL1_R, .type = ARM_CP_CONST, 5779 .resetvalue = cpu->id_dfr0 }, 5780 { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, 5781 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, 5782 .access = PL1_R, .type = ARM_CP_CONST, 5783 .resetvalue = cpu->id_afr0 }, 5784 { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, 5785 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, 5786 .access = PL1_R, .type = ARM_CP_CONST, 5787 .resetvalue = cpu->id_mmfr0 }, 5788 { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, 5789 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, 5790 .access = PL1_R, .type = ARM_CP_CONST, 5791 .resetvalue = cpu->id_mmfr1 }, 5792 { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, 5793 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, 5794 .access = PL1_R, .type = ARM_CP_CONST, 5795 .resetvalue = cpu->id_mmfr2 }, 5796 { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, 5797 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, 5798 .access = PL1_R, .type = ARM_CP_CONST, 5799 .resetvalue = cpu->id_mmfr3 }, 5800 { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, 5801 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, 5802 .access = PL1_R, .type = ARM_CP_CONST, 5803 .resetvalue = cpu->isar.id_isar0 }, 5804 { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, 5805 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, 5806 .access = PL1_R, .type = ARM_CP_CONST, 5807 .resetvalue = cpu->isar.id_isar1 }, 5808 { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, 5809 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, 5810 .access = PL1_R, .type = ARM_CP_CONST, 5811 .resetvalue = cpu->isar.id_isar2 }, 5812 { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, 5813 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, 5814 .access = PL1_R, .type = ARM_CP_CONST, 5815 .resetvalue = cpu->isar.id_isar3 }, 5816 { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, 5817 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, 5818 .access = PL1_R, .type = ARM_CP_CONST, 5819 .resetvalue = cpu->isar.id_isar4 }, 5820 { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, 5821 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, 5822 .access = PL1_R, .type = ARM_CP_CONST, 5823 .resetvalue = cpu->isar.id_isar5 }, 5824 { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, 5825 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, 5826 .access = PL1_R, .type = ARM_CP_CONST, 5827 .resetvalue = cpu->id_mmfr4 }, 5828 { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, 5829 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, 5830 .access = PL1_R, .type = ARM_CP_CONST, 5831 .resetvalue = cpu->isar.id_isar6 }, 5832 REGINFO_SENTINEL 5833 }; 5834 define_arm_cp_regs(cpu, v6_idregs); 5835 define_arm_cp_regs(cpu, v6_cp_reginfo); 5836 } else { 5837 define_arm_cp_regs(cpu, not_v6_cp_reginfo); 5838 } 5839 if (arm_feature(env, ARM_FEATURE_V6K)) { 5840 define_arm_cp_regs(cpu, v6k_cp_reginfo); 5841 } 5842 if (arm_feature(env, ARM_FEATURE_V7MP) && 5843 !arm_feature(env, ARM_FEATURE_PMSA)) { 5844 define_arm_cp_regs(cpu, v7mp_cp_reginfo); 5845 } 5846 if (arm_feature(env, ARM_FEATURE_V7VE)) { 5847 define_arm_cp_regs(cpu, pmovsset_cp_reginfo); 5848 } 5849 if (arm_feature(env, ARM_FEATURE_V7)) { 5850 /* v7 performance monitor control register: same implementor 5851 * field as main ID register, and we implement four counters in 5852 * addition to the cycle count register. 5853 */ 5854 unsigned int i, pmcrn = 4; 5855 ARMCPRegInfo pmcr = { 5856 .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, 5857 .access = PL0_RW, 5858 .type = ARM_CP_IO | ARM_CP_ALIAS, 5859 .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), 5860 .accessfn = pmreg_access, .writefn = pmcr_write, 5861 .raw_writefn = raw_write, 5862 }; 5863 ARMCPRegInfo pmcr64 = { 5864 .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, 5865 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, 5866 .access = PL0_RW, .accessfn = pmreg_access, 5867 .type = ARM_CP_IO, 5868 .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), 5869 .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT), 5870 .writefn = pmcr_write, .raw_writefn = raw_write, 5871 }; 5872 define_one_arm_cp_reg(cpu, &pmcr); 5873 define_one_arm_cp_reg(cpu, &pmcr64); 5874 for (i = 0; i < pmcrn; i++) { 5875 char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); 5876 char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); 5877 char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); 5878 char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); 5879 ARMCPRegInfo pmev_regs[] = { 5880 { .name = pmevcntr_name, .cp = 15, .crn = 14, 5881 .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 5882 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 5883 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 5884 .accessfn = pmreg_access }, 5885 { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, 5886 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), 5887 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 5888 .type = ARM_CP_IO, 5889 .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, 5890 .raw_readfn = pmevcntr_rawread, 5891 .raw_writefn = pmevcntr_rawwrite }, 5892 { .name = pmevtyper_name, .cp = 15, .crn = 14, 5893 .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, 5894 .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, 5895 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 5896 .accessfn = pmreg_access }, 5897 { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, 5898 .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), 5899 .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, 5900 .type = ARM_CP_IO, 5901 .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, 5902 .raw_writefn = pmevtyper_rawwrite }, 5903 REGINFO_SENTINEL 5904 }; 5905 define_arm_cp_regs(cpu, pmev_regs); 5906 g_free(pmevcntr_name); 5907 g_free(pmevcntr_el0_name); 5908 g_free(pmevtyper_name); 5909 g_free(pmevtyper_el0_name); 5910 } 5911 ARMCPRegInfo clidr = { 5912 .name = "CLIDR", .state = ARM_CP_STATE_BOTH, 5913 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, 5914 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr 5915 }; 5916 define_one_arm_cp_reg(cpu, &clidr); 5917 define_arm_cp_regs(cpu, v7_cp_reginfo); 5918 define_debug_regs(cpu); 5919 } else { 5920 define_arm_cp_regs(cpu, not_v7_cp_reginfo); 5921 } 5922 if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 && 5923 FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) { 5924 ARMCPRegInfo v81_pmu_regs[] = { 5925 { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, 5926 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, 5927 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5928 .resetvalue = extract64(cpu->pmceid0, 32, 32) }, 5929 { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, 5930 .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, 5931 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 5932 .resetvalue = extract64(cpu->pmceid1, 32, 32) }, 5933 REGINFO_SENTINEL 5934 }; 5935 define_arm_cp_regs(cpu, v81_pmu_regs); 5936 } 5937 if (arm_feature(env, ARM_FEATURE_V8)) { 5938 /* AArch64 ID registers, which all have impdef reset values. 5939 * Note that within the ID register ranges the unused slots 5940 * must all RAZ, not UNDEF; future architecture versions may 5941 * define new registers here. 5942 */ 5943 ARMCPRegInfo v8_idregs[] = { 5944 /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't 5945 * know the right value for the GIC field until after we 5946 * define these regs. 5947 */ 5948 { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, 5949 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, 5950 .access = PL1_R, .type = ARM_CP_NO_RAW, 5951 .readfn = id_aa64pfr0_read, 5952 .writefn = arm_cp_write_ignore }, 5953 { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, 5954 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, 5955 .access = PL1_R, .type = ARM_CP_CONST, 5956 .resetvalue = cpu->isar.id_aa64pfr1}, 5957 { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5958 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, 5959 .access = PL1_R, .type = ARM_CP_CONST, 5960 .resetvalue = 0 }, 5961 { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5962 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, 5963 .access = PL1_R, .type = ARM_CP_CONST, 5964 .resetvalue = 0 }, 5965 { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, 5966 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, 5967 .access = PL1_R, .type = ARM_CP_CONST, 5968 /* At present, only SVEver == 0 is defined anyway. */ 5969 .resetvalue = 0 }, 5970 { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5971 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, 5972 .access = PL1_R, .type = ARM_CP_CONST, 5973 .resetvalue = 0 }, 5974 { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5975 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, 5976 .access = PL1_R, .type = ARM_CP_CONST, 5977 .resetvalue = 0 }, 5978 { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5979 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, 5980 .access = PL1_R, .type = ARM_CP_CONST, 5981 .resetvalue = 0 }, 5982 { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, 5983 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, 5984 .access = PL1_R, .type = ARM_CP_CONST, 5985 .resetvalue = cpu->id_aa64dfr0 }, 5986 { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, 5987 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, 5988 .access = PL1_R, .type = ARM_CP_CONST, 5989 .resetvalue = cpu->id_aa64dfr1 }, 5990 { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5991 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, 5992 .access = PL1_R, .type = ARM_CP_CONST, 5993 .resetvalue = 0 }, 5994 { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 5995 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, 5996 .access = PL1_R, .type = ARM_CP_CONST, 5997 .resetvalue = 0 }, 5998 { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, 5999 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, 6000 .access = PL1_R, .type = ARM_CP_CONST, 6001 .resetvalue = cpu->id_aa64afr0 }, 6002 { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, 6003 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, 6004 .access = PL1_R, .type = ARM_CP_CONST, 6005 .resetvalue = cpu->id_aa64afr1 }, 6006 { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6007 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, 6008 .access = PL1_R, .type = ARM_CP_CONST, 6009 .resetvalue = 0 }, 6010 { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6011 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, 6012 .access = PL1_R, .type = ARM_CP_CONST, 6013 .resetvalue = 0 }, 6014 { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, 6015 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, 6016 .access = PL1_R, .type = ARM_CP_CONST, 6017 .resetvalue = cpu->isar.id_aa64isar0 }, 6018 { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, 6019 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, 6020 .access = PL1_R, .type = ARM_CP_CONST, 6021 .resetvalue = cpu->isar.id_aa64isar1 }, 6022 { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6023 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, 6024 .access = PL1_R, .type = ARM_CP_CONST, 6025 .resetvalue = 0 }, 6026 { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6027 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, 6028 .access = PL1_R, .type = ARM_CP_CONST, 6029 .resetvalue = 0 }, 6030 { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6031 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, 6032 .access = PL1_R, .type = ARM_CP_CONST, 6033 .resetvalue = 0 }, 6034 { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6035 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, 6036 .access = PL1_R, .type = ARM_CP_CONST, 6037 .resetvalue = 0 }, 6038 { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6039 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, 6040 .access = PL1_R, .type = ARM_CP_CONST, 6041 .resetvalue = 0 }, 6042 { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6043 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, 6044 .access = PL1_R, .type = ARM_CP_CONST, 6045 .resetvalue = 0 }, 6046 { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, 6047 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, 6048 .access = PL1_R, .type = ARM_CP_CONST, 6049 .resetvalue = cpu->isar.id_aa64mmfr0 }, 6050 { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, 6051 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, 6052 .access = PL1_R, .type = ARM_CP_CONST, 6053 .resetvalue = cpu->isar.id_aa64mmfr1 }, 6054 { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6055 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, 6056 .access = PL1_R, .type = ARM_CP_CONST, 6057 .resetvalue = 0 }, 6058 { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6059 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, 6060 .access = PL1_R, .type = ARM_CP_CONST, 6061 .resetvalue = 0 }, 6062 { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6063 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, 6064 .access = PL1_R, .type = ARM_CP_CONST, 6065 .resetvalue = 0 }, 6066 { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6067 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, 6068 .access = PL1_R, .type = ARM_CP_CONST, 6069 .resetvalue = 0 }, 6070 { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6071 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, 6072 .access = PL1_R, .type = ARM_CP_CONST, 6073 .resetvalue = 0 }, 6074 { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6075 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, 6076 .access = PL1_R, .type = ARM_CP_CONST, 6077 .resetvalue = 0 }, 6078 { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, 6079 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, 6080 .access = PL1_R, .type = ARM_CP_CONST, 6081 .resetvalue = cpu->isar.mvfr0 }, 6082 { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, 6083 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, 6084 .access = PL1_R, .type = ARM_CP_CONST, 6085 .resetvalue = cpu->isar.mvfr1 }, 6086 { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, 6087 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, 6088 .access = PL1_R, .type = ARM_CP_CONST, 6089 .resetvalue = cpu->isar.mvfr2 }, 6090 { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6091 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, 6092 .access = PL1_R, .type = ARM_CP_CONST, 6093 .resetvalue = 0 }, 6094 { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6095 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, 6096 .access = PL1_R, .type = ARM_CP_CONST, 6097 .resetvalue = 0 }, 6098 { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6099 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, 6100 .access = PL1_R, .type = ARM_CP_CONST, 6101 .resetvalue = 0 }, 6102 { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6103 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, 6104 .access = PL1_R, .type = ARM_CP_CONST, 6105 .resetvalue = 0 }, 6106 { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, 6107 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, 6108 .access = PL1_R, .type = ARM_CP_CONST, 6109 .resetvalue = 0 }, 6110 { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, 6111 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, 6112 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6113 .resetvalue = extract64(cpu->pmceid0, 0, 32) }, 6114 { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, 6115 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, 6116 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6117 .resetvalue = cpu->pmceid0 }, 6118 { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, 6119 .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, 6120 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6121 .resetvalue = extract64(cpu->pmceid1, 0, 32) }, 6122 { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, 6123 .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, 6124 .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, 6125 .resetvalue = cpu->pmceid1 }, 6126 REGINFO_SENTINEL 6127 }; 6128 #ifdef CONFIG_USER_ONLY 6129 ARMCPRegUserSpaceInfo v8_user_idregs[] = { 6130 { .name = "ID_AA64PFR0_EL1", 6131 .exported_bits = 0x000f000f00ff0000, 6132 .fixed_bits = 0x0000000000000011 }, 6133 { .name = "ID_AA64PFR1_EL1", 6134 .exported_bits = 0x00000000000000f0 }, 6135 { .name = "ID_AA64PFR*_EL1_RESERVED", 6136 .is_glob = true }, 6137 { .name = "ID_AA64ZFR0_EL1" }, 6138 { .name = "ID_AA64MMFR0_EL1", 6139 .fixed_bits = 0x00000000ff000000 }, 6140 { .name = "ID_AA64MMFR1_EL1" }, 6141 { .name = "ID_AA64MMFR*_EL1_RESERVED", 6142 .is_glob = true }, 6143 { .name = "ID_AA64DFR0_EL1", 6144 .fixed_bits = 0x0000000000000006 }, 6145 { .name = "ID_AA64DFR1_EL1" }, 6146 { .name = "ID_AA64DFR*_EL1_RESERVED", 6147 .is_glob = true }, 6148 { .name = "ID_AA64AFR*", 6149 .is_glob = true }, 6150 { .name = "ID_AA64ISAR0_EL1", 6151 .exported_bits = 0x00fffffff0fffff0 }, 6152 { .name = "ID_AA64ISAR1_EL1", 6153 .exported_bits = 0x000000f0ffffffff }, 6154 { .name = "ID_AA64ISAR*_EL1_RESERVED", 6155 .is_glob = true }, 6156 REGUSERINFO_SENTINEL 6157 }; 6158 modify_arm_cp_regs(v8_idregs, v8_user_idregs); 6159 #endif 6160 /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ 6161 if (!arm_feature(env, ARM_FEATURE_EL3) && 6162 !arm_feature(env, ARM_FEATURE_EL2)) { 6163 ARMCPRegInfo rvbar = { 6164 .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64, 6165 .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, 6166 .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar 6167 }; 6168 define_one_arm_cp_reg(cpu, &rvbar); 6169 } 6170 define_arm_cp_regs(cpu, v8_idregs); 6171 define_arm_cp_regs(cpu, v8_cp_reginfo); 6172 } 6173 if (arm_feature(env, ARM_FEATURE_EL2)) { 6174 uint64_t vmpidr_def = mpidr_read_val(env); 6175 ARMCPRegInfo vpidr_regs[] = { 6176 { .name = "VPIDR", .state = ARM_CP_STATE_AA32, 6177 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 6178 .access = PL2_RW, .accessfn = access_el3_aa32ns, 6179 .resetvalue = cpu->midr, .type = ARM_CP_ALIAS, 6180 .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, 6181 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, 6182 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 6183 .access = PL2_RW, .resetvalue = cpu->midr, 6184 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 6185 { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, 6186 .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 6187 .access = PL2_RW, .accessfn = access_el3_aa32ns, 6188 .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS, 6189 .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, 6190 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, 6191 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 6192 .access = PL2_RW, 6193 .resetvalue = vmpidr_def, 6194 .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, 6195 REGINFO_SENTINEL 6196 }; 6197 define_arm_cp_regs(cpu, vpidr_regs); 6198 define_arm_cp_regs(cpu, el2_cp_reginfo); 6199 if (arm_feature(env, ARM_FEATURE_V8)) { 6200 define_arm_cp_regs(cpu, el2_v8_cp_reginfo); 6201 } 6202 /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ 6203 if (!arm_feature(env, ARM_FEATURE_EL3)) { 6204 ARMCPRegInfo rvbar = { 6205 .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, 6206 .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, 6207 .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar 6208 }; 6209 define_one_arm_cp_reg(cpu, &rvbar); 6210 } 6211 } else { 6212 /* If EL2 is missing but higher ELs are enabled, we need to 6213 * register the no_el2 reginfos. 6214 */ 6215 if (arm_feature(env, ARM_FEATURE_EL3)) { 6216 /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value 6217 * of MIDR_EL1 and MPIDR_EL1. 6218 */ 6219 ARMCPRegInfo vpidr_regs[] = { 6220 { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH, 6221 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, 6222 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 6223 .type = ARM_CP_CONST, .resetvalue = cpu->midr, 6224 .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, 6225 { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH, 6226 .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, 6227 .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any, 6228 .type = ARM_CP_NO_RAW, 6229 .writefn = arm_cp_write_ignore, .readfn = mpidr_read }, 6230 REGINFO_SENTINEL 6231 }; 6232 define_arm_cp_regs(cpu, vpidr_regs); 6233 define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo); 6234 if (arm_feature(env, ARM_FEATURE_V8)) { 6235 define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo); 6236 } 6237 } 6238 } 6239 if (arm_feature(env, ARM_FEATURE_EL3)) { 6240 define_arm_cp_regs(cpu, el3_cp_reginfo); 6241 ARMCPRegInfo el3_regs[] = { 6242 { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, 6243 .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, 6244 .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar }, 6245 { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, 6246 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, 6247 .access = PL3_RW, 6248 .raw_writefn = raw_write, .writefn = sctlr_write, 6249 .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), 6250 .resetvalue = cpu->reset_sctlr }, 6251 REGINFO_SENTINEL 6252 }; 6253 6254 define_arm_cp_regs(cpu, el3_regs); 6255 } 6256 /* The behaviour of NSACR is sufficiently various that we don't 6257 * try to describe it in a single reginfo: 6258 * if EL3 is 64 bit, then trap to EL3 from S EL1, 6259 * reads as constant 0xc00 from NS EL1 and NS EL2 6260 * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 6261 * if v7 without EL3, register doesn't exist 6262 * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 6263 */ 6264 if (arm_feature(env, ARM_FEATURE_EL3)) { 6265 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 6266 ARMCPRegInfo nsacr = { 6267 .name = "NSACR", .type = ARM_CP_CONST, 6268 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 6269 .access = PL1_RW, .accessfn = nsacr_access, 6270 .resetvalue = 0xc00 6271 }; 6272 define_one_arm_cp_reg(cpu, &nsacr); 6273 } else { 6274 ARMCPRegInfo nsacr = { 6275 .name = "NSACR", 6276 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 6277 .access = PL3_RW | PL1_R, 6278 .resetvalue = 0, 6279 .fieldoffset = offsetof(CPUARMState, cp15.nsacr) 6280 }; 6281 define_one_arm_cp_reg(cpu, &nsacr); 6282 } 6283 } else { 6284 if (arm_feature(env, ARM_FEATURE_V8)) { 6285 ARMCPRegInfo nsacr = { 6286 .name = "NSACR", .type = ARM_CP_CONST, 6287 .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, 6288 .access = PL1_R, 6289 .resetvalue = 0xc00 6290 }; 6291 define_one_arm_cp_reg(cpu, &nsacr); 6292 } 6293 } 6294 6295 if (arm_feature(env, ARM_FEATURE_PMSA)) { 6296 if (arm_feature(env, ARM_FEATURE_V6)) { 6297 /* PMSAv6 not implemented */ 6298 assert(arm_feature(env, ARM_FEATURE_V7)); 6299 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 6300 define_arm_cp_regs(cpu, pmsav7_cp_reginfo); 6301 } else { 6302 define_arm_cp_regs(cpu, pmsav5_cp_reginfo); 6303 } 6304 } else { 6305 define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); 6306 define_arm_cp_regs(cpu, vmsa_cp_reginfo); 6307 /* TTCBR2 is introduced with ARMv8.2-A32HPD. */ 6308 if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) { 6309 define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); 6310 } 6311 } 6312 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { 6313 define_arm_cp_regs(cpu, t2ee_cp_reginfo); 6314 } 6315 if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { 6316 define_arm_cp_regs(cpu, generic_timer_cp_reginfo); 6317 } 6318 if (arm_feature(env, ARM_FEATURE_VAPA)) { 6319 define_arm_cp_regs(cpu, vapa_cp_reginfo); 6320 } 6321 if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { 6322 define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); 6323 } 6324 if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { 6325 define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); 6326 } 6327 if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { 6328 define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); 6329 } 6330 if (arm_feature(env, ARM_FEATURE_OMAPCP)) { 6331 define_arm_cp_regs(cpu, omap_cp_reginfo); 6332 } 6333 if (arm_feature(env, ARM_FEATURE_STRONGARM)) { 6334 define_arm_cp_regs(cpu, strongarm_cp_reginfo); 6335 } 6336 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 6337 define_arm_cp_regs(cpu, xscale_cp_reginfo); 6338 } 6339 if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { 6340 define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); 6341 } 6342 if (arm_feature(env, ARM_FEATURE_LPAE)) { 6343 define_arm_cp_regs(cpu, lpae_cp_reginfo); 6344 } 6345 /* Slightly awkwardly, the OMAP and StrongARM cores need all of 6346 * cp15 crn=0 to be writes-ignored, whereas for other cores they should 6347 * be read-only (ie write causes UNDEF exception). 6348 */ 6349 { 6350 ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { 6351 /* Pre-v8 MIDR space. 6352 * Note that the MIDR isn't a simple constant register because 6353 * of the TI925 behaviour where writes to another register can 6354 * cause the MIDR value to change. 6355 * 6356 * Unimplemented registers in the c15 0 0 0 space default to 6357 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR 6358 * and friends override accordingly. 6359 */ 6360 { .name = "MIDR", 6361 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, 6362 .access = PL1_R, .resetvalue = cpu->midr, 6363 .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, 6364 .readfn = midr_read, 6365 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 6366 .type = ARM_CP_OVERRIDE }, 6367 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ 6368 { .name = "DUMMY", 6369 .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, 6370 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6371 { .name = "DUMMY", 6372 .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, 6373 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6374 { .name = "DUMMY", 6375 .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, 6376 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6377 { .name = "DUMMY", 6378 .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, 6379 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6380 { .name = "DUMMY", 6381 .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, 6382 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6383 REGINFO_SENTINEL 6384 }; 6385 ARMCPRegInfo id_v8_midr_cp_reginfo[] = { 6386 { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, 6387 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, 6388 .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, 6389 .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), 6390 .readfn = midr_read }, 6391 /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */ 6392 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 6393 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 6394 .access = PL1_R, .resetvalue = cpu->midr }, 6395 { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, 6396 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, 6397 .access = PL1_R, .resetvalue = cpu->midr }, 6398 { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, 6399 .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, 6400 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, 6401 REGINFO_SENTINEL 6402 }; 6403 ARMCPRegInfo id_cp_reginfo[] = { 6404 /* These are common to v8 and pre-v8 */ 6405 { .name = "CTR", 6406 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, 6407 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 6408 { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, 6409 .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, 6410 .access = PL0_R, .accessfn = ctr_el0_access, 6411 .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, 6412 /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ 6413 { .name = "TCMTR", 6414 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, 6415 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, 6416 REGINFO_SENTINEL 6417 }; 6418 /* TLBTR is specific to VMSA */ 6419 ARMCPRegInfo id_tlbtr_reginfo = { 6420 .name = "TLBTR", 6421 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, 6422 .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, 6423 }; 6424 /* MPUIR is specific to PMSA V6+ */ 6425 ARMCPRegInfo id_mpuir_reginfo = { 6426 .name = "MPUIR", 6427 .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, 6428 .access = PL1_R, .type = ARM_CP_CONST, 6429 .resetvalue = cpu->pmsav7_dregion << 8 6430 }; 6431 ARMCPRegInfo crn0_wi_reginfo = { 6432 .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, 6433 .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, 6434 .type = ARM_CP_NOP | ARM_CP_OVERRIDE 6435 }; 6436 #ifdef CONFIG_USER_ONLY 6437 ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { 6438 { .name = "MIDR_EL1", 6439 .exported_bits = 0x00000000ffffffff }, 6440 { .name = "REVIDR_EL1" }, 6441 REGUSERINFO_SENTINEL 6442 }; 6443 modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); 6444 #endif 6445 if (arm_feature(env, ARM_FEATURE_OMAPCP) || 6446 arm_feature(env, ARM_FEATURE_STRONGARM)) { 6447 ARMCPRegInfo *r; 6448 /* Register the blanket "writes ignored" value first to cover the 6449 * whole space. Then update the specific ID registers to allow write 6450 * access, so that they ignore writes rather than causing them to 6451 * UNDEF. 6452 */ 6453 define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); 6454 for (r = id_pre_v8_midr_cp_reginfo; 6455 r->type != ARM_CP_SENTINEL; r++) { 6456 r->access = PL1_RW; 6457 } 6458 for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) { 6459 r->access = PL1_RW; 6460 } 6461 id_mpuir_reginfo.access = PL1_RW; 6462 id_tlbtr_reginfo.access = PL1_RW; 6463 } 6464 if (arm_feature(env, ARM_FEATURE_V8)) { 6465 define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); 6466 } else { 6467 define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); 6468 } 6469 define_arm_cp_regs(cpu, id_cp_reginfo); 6470 if (!arm_feature(env, ARM_FEATURE_PMSA)) { 6471 define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); 6472 } else if (arm_feature(env, ARM_FEATURE_V7)) { 6473 define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); 6474 } 6475 } 6476 6477 if (arm_feature(env, ARM_FEATURE_MPIDR)) { 6478 ARMCPRegInfo mpidr_cp_reginfo[] = { 6479 { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, 6480 .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, 6481 .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, 6482 REGINFO_SENTINEL 6483 }; 6484 #ifdef CONFIG_USER_ONLY 6485 ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { 6486 { .name = "MPIDR_EL1", 6487 .fixed_bits = 0x0000000080000000 }, 6488 REGUSERINFO_SENTINEL 6489 }; 6490 modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); 6491 #endif 6492 define_arm_cp_regs(cpu, mpidr_cp_reginfo); 6493 } 6494 6495 if (arm_feature(env, ARM_FEATURE_AUXCR)) { 6496 ARMCPRegInfo auxcr_reginfo[] = { 6497 { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, 6498 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, 6499 .access = PL1_RW, .type = ARM_CP_CONST, 6500 .resetvalue = cpu->reset_auxcr }, 6501 { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, 6502 .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, 6503 .access = PL2_RW, .type = ARM_CP_CONST, 6504 .resetvalue = 0 }, 6505 { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, 6506 .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, 6507 .access = PL3_RW, .type = ARM_CP_CONST, 6508 .resetvalue = 0 }, 6509 REGINFO_SENTINEL 6510 }; 6511 define_arm_cp_regs(cpu, auxcr_reginfo); 6512 if (arm_feature(env, ARM_FEATURE_V8)) { 6513 /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */ 6514 ARMCPRegInfo hactlr2_reginfo = { 6515 .name = "HACTLR2", .state = ARM_CP_STATE_AA32, 6516 .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, 6517 .access = PL2_RW, .type = ARM_CP_CONST, 6518 .resetvalue = 0 6519 }; 6520 define_one_arm_cp_reg(cpu, &hactlr2_reginfo); 6521 } 6522 } 6523 6524 if (arm_feature(env, ARM_FEATURE_CBAR)) { 6525 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 6526 /* 32 bit view is [31:18] 0...0 [43:32]. */ 6527 uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) 6528 | extract64(cpu->reset_cbar, 32, 12); 6529 ARMCPRegInfo cbar_reginfo[] = { 6530 { .name = "CBAR", 6531 .type = ARM_CP_CONST, 6532 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 6533 .access = PL1_R, .resetvalue = cpu->reset_cbar }, 6534 { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, 6535 .type = ARM_CP_CONST, 6536 .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, 6537 .access = PL1_R, .resetvalue = cbar32 }, 6538 REGINFO_SENTINEL 6539 }; 6540 /* We don't implement a r/w 64 bit CBAR currently */ 6541 assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); 6542 define_arm_cp_regs(cpu, cbar_reginfo); 6543 } else { 6544 ARMCPRegInfo cbar = { 6545 .name = "CBAR", 6546 .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, 6547 .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar, 6548 .fieldoffset = offsetof(CPUARMState, 6549 cp15.c15_config_base_address) 6550 }; 6551 if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { 6552 cbar.access = PL1_R; 6553 cbar.fieldoffset = 0; 6554 cbar.type = ARM_CP_CONST; 6555 } 6556 define_one_arm_cp_reg(cpu, &cbar); 6557 } 6558 } 6559 6560 if (arm_feature(env, ARM_FEATURE_VBAR)) { 6561 ARMCPRegInfo vbar_cp_reginfo[] = { 6562 { .name = "VBAR", .state = ARM_CP_STATE_BOTH, 6563 .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, 6564 .access = PL1_RW, .writefn = vbar_write, 6565 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), 6566 offsetof(CPUARMState, cp15.vbar_ns) }, 6567 .resetvalue = 0 }, 6568 REGINFO_SENTINEL 6569 }; 6570 define_arm_cp_regs(cpu, vbar_cp_reginfo); 6571 } 6572 6573 /* Generic registers whose values depend on the implementation */ 6574 { 6575 ARMCPRegInfo sctlr = { 6576 .name = "SCTLR", .state = ARM_CP_STATE_BOTH, 6577 .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, 6578 .access = PL1_RW, 6579 .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), 6580 offsetof(CPUARMState, cp15.sctlr_ns) }, 6581 .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, 6582 .raw_writefn = raw_write, 6583 }; 6584 if (arm_feature(env, ARM_FEATURE_XSCALE)) { 6585 /* Normally we would always end the TB on an SCTLR write, but Linux 6586 * arch/arm/mach-pxa/sleep.S expects two instructions following 6587 * an MMU enable to execute from cache. Imitate this behaviour. 6588 */ 6589 sctlr.type |= ARM_CP_SUPPRESS_TB_END; 6590 } 6591 define_one_arm_cp_reg(cpu, &sctlr); 6592 } 6593 6594 if (cpu_isar_feature(aa64_lor, cpu)) { 6595 /* 6596 * A trivial implementation of ARMv8.1-LOR leaves all of these 6597 * registers fixed at 0, which indicates that there are zero 6598 * supported Limited Ordering regions. 6599 */ 6600 static const ARMCPRegInfo lor_reginfo[] = { 6601 { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, 6602 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, 6603 .access = PL1_RW, .accessfn = access_lor_other, 6604 .type = ARM_CP_CONST, .resetvalue = 0 }, 6605 { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, 6606 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, 6607 .access = PL1_RW, .accessfn = access_lor_other, 6608 .type = ARM_CP_CONST, .resetvalue = 0 }, 6609 { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, 6610 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, 6611 .access = PL1_RW, .accessfn = access_lor_other, 6612 .type = ARM_CP_CONST, .resetvalue = 0 }, 6613 { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, 6614 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, 6615 .access = PL1_RW, .accessfn = access_lor_other, 6616 .type = ARM_CP_CONST, .resetvalue = 0 }, 6617 { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, 6618 .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, 6619 .access = PL1_R, .accessfn = access_lorid, 6620 .type = ARM_CP_CONST, .resetvalue = 0 }, 6621 REGINFO_SENTINEL 6622 }; 6623 define_arm_cp_regs(cpu, lor_reginfo); 6624 } 6625 6626 if (cpu_isar_feature(aa64_sve, cpu)) { 6627 define_one_arm_cp_reg(cpu, &zcr_el1_reginfo); 6628 if (arm_feature(env, ARM_FEATURE_EL2)) { 6629 define_one_arm_cp_reg(cpu, &zcr_el2_reginfo); 6630 } else { 6631 define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo); 6632 } 6633 if (arm_feature(env, ARM_FEATURE_EL3)) { 6634 define_one_arm_cp_reg(cpu, &zcr_el3_reginfo); 6635 } 6636 } 6637 6638 #ifdef TARGET_AARCH64 6639 if (cpu_isar_feature(aa64_pauth, cpu)) { 6640 define_arm_cp_regs(cpu, pauth_reginfo); 6641 } 6642 #endif 6643 } 6644 6645 void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) 6646 { 6647 CPUState *cs = CPU(cpu); 6648 CPUARMState *env = &cpu->env; 6649 6650 if (arm_feature(env, ARM_FEATURE_AARCH64)) { 6651 gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, 6652 aarch64_fpu_gdb_set_reg, 6653 34, "aarch64-fpu.xml", 0); 6654 } else if (arm_feature(env, ARM_FEATURE_NEON)) { 6655 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 6656 51, "arm-neon.xml", 0); 6657 } else if (arm_feature(env, ARM_FEATURE_VFP3)) { 6658 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 6659 35, "arm-vfp3.xml", 0); 6660 } else if (arm_feature(env, ARM_FEATURE_VFP)) { 6661 gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 6662 19, "arm-vfp.xml", 0); 6663 } 6664 gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, 6665 arm_gen_dynamic_xml(cs), 6666 "system-registers.xml", 0); 6667 } 6668 6669 /* Sort alphabetically by type name, except for "any". */ 6670 static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) 6671 { 6672 ObjectClass *class_a = (ObjectClass *)a; 6673 ObjectClass *class_b = (ObjectClass *)b; 6674 const char *name_a, *name_b; 6675 6676 name_a = object_class_get_name(class_a); 6677 name_b = object_class_get_name(class_b); 6678 if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { 6679 return 1; 6680 } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { 6681 return -1; 6682 } else { 6683 return strcmp(name_a, name_b); 6684 } 6685 } 6686 6687 static void arm_cpu_list_entry(gpointer data, gpointer user_data) 6688 { 6689 ObjectClass *oc = data; 6690 CPUListState *s = user_data; 6691 const char *typename; 6692 char *name; 6693 6694 typename = object_class_get_name(oc); 6695 name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); 6696 (*s->cpu_fprintf)(s->file, " %s\n", 6697 name); 6698 g_free(name); 6699 } 6700 6701 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf) 6702 { 6703 CPUListState s = { 6704 .file = f, 6705 .cpu_fprintf = cpu_fprintf, 6706 }; 6707 GSList *list; 6708 6709 list = object_class_get_list(TYPE_ARM_CPU, false); 6710 list = g_slist_sort(list, arm_cpu_list_compare); 6711 (*cpu_fprintf)(f, "Available CPUs:\n"); 6712 g_slist_foreach(list, arm_cpu_list_entry, &s); 6713 g_slist_free(list); 6714 } 6715 6716 static void arm_cpu_add_definition(gpointer data, gpointer user_data) 6717 { 6718 ObjectClass *oc = data; 6719 CpuDefinitionInfoList **cpu_list = user_data; 6720 CpuDefinitionInfoList *entry; 6721 CpuDefinitionInfo *info; 6722 const char *typename; 6723 6724 typename = object_class_get_name(oc); 6725 info = g_malloc0(sizeof(*info)); 6726 info->name = g_strndup(typename, 6727 strlen(typename) - strlen("-" TYPE_ARM_CPU)); 6728 info->q_typename = g_strdup(typename); 6729 6730 entry = g_malloc0(sizeof(*entry)); 6731 entry->value = info; 6732 entry->next = *cpu_list; 6733 *cpu_list = entry; 6734 } 6735 6736 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) 6737 { 6738 CpuDefinitionInfoList *cpu_list = NULL; 6739 GSList *list; 6740 6741 list = object_class_get_list(TYPE_ARM_CPU, false); 6742 g_slist_foreach(list, arm_cpu_add_definition, &cpu_list); 6743 g_slist_free(list); 6744 6745 return cpu_list; 6746 } 6747 6748 static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, 6749 void *opaque, int state, int secstate, 6750 int crm, int opc1, int opc2, 6751 const char *name) 6752 { 6753 /* Private utility function for define_one_arm_cp_reg_with_opaque(): 6754 * add a single reginfo struct to the hash table. 6755 */ 6756 uint32_t *key = g_new(uint32_t, 1); 6757 ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo)); 6758 int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0; 6759 int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0; 6760 6761 r2->name = g_strdup(name); 6762 /* Reset the secure state to the specific incoming state. This is 6763 * necessary as the register may have been defined with both states. 6764 */ 6765 r2->secure = secstate; 6766 6767 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 6768 /* Register is banked (using both entries in array). 6769 * Overwriting fieldoffset as the array is only used to define 6770 * banked registers but later only fieldoffset is used. 6771 */ 6772 r2->fieldoffset = r->bank_fieldoffsets[ns]; 6773 } 6774 6775 if (state == ARM_CP_STATE_AA32) { 6776 if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) { 6777 /* If the register is banked then we don't need to migrate or 6778 * reset the 32-bit instance in certain cases: 6779 * 6780 * 1) If the register has both 32-bit and 64-bit instances then we 6781 * can count on the 64-bit instance taking care of the 6782 * non-secure bank. 6783 * 2) If ARMv8 is enabled then we can count on a 64-bit version 6784 * taking care of the secure bank. This requires that separate 6785 * 32 and 64-bit definitions are provided. 6786 */ 6787 if ((r->state == ARM_CP_STATE_BOTH && ns) || 6788 (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) { 6789 r2->type |= ARM_CP_ALIAS; 6790 } 6791 } else if ((secstate != r->secure) && !ns) { 6792 /* The register is not banked so we only want to allow migration of 6793 * the non-secure instance. 6794 */ 6795 r2->type |= ARM_CP_ALIAS; 6796 } 6797 6798 if (r->state == ARM_CP_STATE_BOTH) { 6799 /* We assume it is a cp15 register if the .cp field is left unset. 6800 */ 6801 if (r2->cp == 0) { 6802 r2->cp = 15; 6803 } 6804 6805 #ifdef HOST_WORDS_BIGENDIAN 6806 if (r2->fieldoffset) { 6807 r2->fieldoffset += sizeof(uint32_t); 6808 } 6809 #endif 6810 } 6811 } 6812 if (state == ARM_CP_STATE_AA64) { 6813 /* To allow abbreviation of ARMCPRegInfo 6814 * definitions, we treat cp == 0 as equivalent to 6815 * the value for "standard guest-visible sysreg". 6816 * STATE_BOTH definitions are also always "standard 6817 * sysreg" in their AArch64 view (the .cp value may 6818 * be non-zero for the benefit of the AArch32 view). 6819 */ 6820 if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) { 6821 r2->cp = CP_REG_ARM64_SYSREG_CP; 6822 } 6823 *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm, 6824 r2->opc0, opc1, opc2); 6825 } else { 6826 *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2); 6827 } 6828 if (opaque) { 6829 r2->opaque = opaque; 6830 } 6831 /* reginfo passed to helpers is correct for the actual access, 6832 * and is never ARM_CP_STATE_BOTH: 6833 */ 6834 r2->state = state; 6835 /* Make sure reginfo passed to helpers for wildcarded regs 6836 * has the correct crm/opc1/opc2 for this reg, not CP_ANY: 6837 */ 6838 r2->crm = crm; 6839 r2->opc1 = opc1; 6840 r2->opc2 = opc2; 6841 /* By convention, for wildcarded registers only the first 6842 * entry is used for migration; the others are marked as 6843 * ALIAS so we don't try to transfer the register 6844 * multiple times. Special registers (ie NOP/WFI) are 6845 * never migratable and not even raw-accessible. 6846 */ 6847 if ((r->type & ARM_CP_SPECIAL)) { 6848 r2->type |= ARM_CP_NO_RAW; 6849 } 6850 if (((r->crm == CP_ANY) && crm != 0) || 6851 ((r->opc1 == CP_ANY) && opc1 != 0) || 6852 ((r->opc2 == CP_ANY) && opc2 != 0)) { 6853 r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; 6854 } 6855 6856 /* Check that raw accesses are either forbidden or handled. Note that 6857 * we can't assert this earlier because the setup of fieldoffset for 6858 * banked registers has to be done first. 6859 */ 6860 if (!(r2->type & ARM_CP_NO_RAW)) { 6861 assert(!raw_accessors_invalid(r2)); 6862 } 6863 6864 /* Overriding of an existing definition must be explicitly 6865 * requested. 6866 */ 6867 if (!(r->type & ARM_CP_OVERRIDE)) { 6868 ARMCPRegInfo *oldreg; 6869 oldreg = g_hash_table_lookup(cpu->cp_regs, key); 6870 if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) { 6871 fprintf(stderr, "Register redefined: cp=%d %d bit " 6872 "crn=%d crm=%d opc1=%d opc2=%d, " 6873 "was %s, now %s\n", r2->cp, 32 + 32 * is64, 6874 r2->crn, r2->crm, r2->opc1, r2->opc2, 6875 oldreg->name, r2->name); 6876 g_assert_not_reached(); 6877 } 6878 } 6879 g_hash_table_insert(cpu->cp_regs, key, r2); 6880 } 6881 6882 6883 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, 6884 const ARMCPRegInfo *r, void *opaque) 6885 { 6886 /* Define implementations of coprocessor registers. 6887 * We store these in a hashtable because typically 6888 * there are less than 150 registers in a space which 6889 * is 16*16*16*8*8 = 262144 in size. 6890 * Wildcarding is supported for the crm, opc1 and opc2 fields. 6891 * If a register is defined twice then the second definition is 6892 * used, so this can be used to define some generic registers and 6893 * then override them with implementation specific variations. 6894 * At least one of the original and the second definition should 6895 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard 6896 * against accidental use. 6897 * 6898 * The state field defines whether the register is to be 6899 * visible in the AArch32 or AArch64 execution state. If the 6900 * state is set to ARM_CP_STATE_BOTH then we synthesise a 6901 * reginfo structure for the AArch32 view, which sees the lower 6902 * 32 bits of the 64 bit register. 6903 * 6904 * Only registers visible in AArch64 may set r->opc0; opc0 cannot 6905 * be wildcarded. AArch64 registers are always considered to be 64 6906 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of 6907 * the register, if any. 6908 */ 6909 int crm, opc1, opc2, state; 6910 int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; 6911 int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; 6912 int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; 6913 int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; 6914 int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; 6915 int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; 6916 /* 64 bit registers have only CRm and Opc1 fields */ 6917 assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); 6918 /* op0 only exists in the AArch64 encodings */ 6919 assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); 6920 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ 6921 assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); 6922 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1 6923 * encodes a minimum access level for the register. We roll this 6924 * runtime check into our general permission check code, so check 6925 * here that the reginfo's specified permissions are strict enough 6926 * to encompass the generic architectural permission check. 6927 */ 6928 if (r->state != ARM_CP_STATE_AA32) { 6929 int mask = 0; 6930 switch (r->opc1) { 6931 case 0: 6932 /* min_EL EL1, but some accessible to EL0 via kernel ABI */ 6933 mask = PL0U_R | PL1_RW; 6934 break; 6935 case 1: case 2: 6936 /* min_EL EL1 */ 6937 mask = PL1_RW; 6938 break; 6939 case 3: 6940 /* min_EL EL0 */ 6941 mask = PL0_RW; 6942 break; 6943 case 4: 6944 /* min_EL EL2 */ 6945 mask = PL2_RW; 6946 break; 6947 case 5: 6948 /* unallocated encoding, so not possible */ 6949 assert(false); 6950 break; 6951 case 6: 6952 /* min_EL EL3 */ 6953 mask = PL3_RW; 6954 break; 6955 case 7: 6956 /* min_EL EL1, secure mode only (we don't check the latter) */ 6957 mask = PL1_RW; 6958 break; 6959 default: 6960 /* broken reginfo with out-of-range opc1 */ 6961 assert(false); 6962 break; 6963 } 6964 /* assert our permissions are not too lax (stricter is fine) */ 6965 assert((r->access & ~mask) == 0); 6966 } 6967 6968 /* Check that the register definition has enough info to handle 6969 * reads and writes if they are permitted. 6970 */ 6971 if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) { 6972 if (r->access & PL3_R) { 6973 assert((r->fieldoffset || 6974 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 6975 r->readfn); 6976 } 6977 if (r->access & PL3_W) { 6978 assert((r->fieldoffset || 6979 (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || 6980 r->writefn); 6981 } 6982 } 6983 /* Bad type field probably means missing sentinel at end of reg list */ 6984 assert(cptype_valid(r->type)); 6985 for (crm = crmmin; crm <= crmmax; crm++) { 6986 for (opc1 = opc1min; opc1 <= opc1max; opc1++) { 6987 for (opc2 = opc2min; opc2 <= opc2max; opc2++) { 6988 for (state = ARM_CP_STATE_AA32; 6989 state <= ARM_CP_STATE_AA64; state++) { 6990 if (r->state != state && r->state != ARM_CP_STATE_BOTH) { 6991 continue; 6992 } 6993 if (state == ARM_CP_STATE_AA32) { 6994 /* Under AArch32 CP registers can be common 6995 * (same for secure and non-secure world) or banked. 6996 */ 6997 char *name; 6998 6999 switch (r->secure) { 7000 case ARM_CP_SECSTATE_S: 7001 case ARM_CP_SECSTATE_NS: 7002 add_cpreg_to_hashtable(cpu, r, opaque, state, 7003 r->secure, crm, opc1, opc2, 7004 r->name); 7005 break; 7006 default: 7007 name = g_strdup_printf("%s_S", r->name); 7008 add_cpreg_to_hashtable(cpu, r, opaque, state, 7009 ARM_CP_SECSTATE_S, 7010 crm, opc1, opc2, name); 7011 g_free(name); 7012 add_cpreg_to_hashtable(cpu, r, opaque, state, 7013 ARM_CP_SECSTATE_NS, 7014 crm, opc1, opc2, r->name); 7015 break; 7016 } 7017 } else { 7018 /* AArch64 registers get mapped to non-secure instance 7019 * of AArch32 */ 7020 add_cpreg_to_hashtable(cpu, r, opaque, state, 7021 ARM_CP_SECSTATE_NS, 7022 crm, opc1, opc2, r->name); 7023 } 7024 } 7025 } 7026 } 7027 } 7028 } 7029 7030 void define_arm_cp_regs_with_opaque(ARMCPU *cpu, 7031 const ARMCPRegInfo *regs, void *opaque) 7032 { 7033 /* Define a whole list of registers */ 7034 const ARMCPRegInfo *r; 7035 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 7036 define_one_arm_cp_reg_with_opaque(cpu, r, opaque); 7037 } 7038 } 7039 7040 /* 7041 * Modify ARMCPRegInfo for access from userspace. 7042 * 7043 * This is a data driven modification directed by 7044 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as 7045 * user-space cannot alter any values and dynamic values pertaining to 7046 * execution state are hidden from user space view anyway. 7047 */ 7048 void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods) 7049 { 7050 const ARMCPRegUserSpaceInfo *m; 7051 ARMCPRegInfo *r; 7052 7053 for (m = mods; m->name; m++) { 7054 GPatternSpec *pat = NULL; 7055 if (m->is_glob) { 7056 pat = g_pattern_spec_new(m->name); 7057 } 7058 for (r = regs; r->type != ARM_CP_SENTINEL; r++) { 7059 if (pat && g_pattern_match_string(pat, r->name)) { 7060 r->type = ARM_CP_CONST; 7061 r->access = PL0U_R; 7062 r->resetvalue = 0; 7063 /* continue */ 7064 } else if (strcmp(r->name, m->name) == 0) { 7065 r->type = ARM_CP_CONST; 7066 r->access = PL0U_R; 7067 r->resetvalue &= m->exported_bits; 7068 r->resetvalue |= m->fixed_bits; 7069 break; 7070 } 7071 } 7072 if (pat) { 7073 g_pattern_spec_free(pat); 7074 } 7075 } 7076 } 7077 7078 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) 7079 { 7080 return g_hash_table_lookup(cpregs, &encoded_cp); 7081 } 7082 7083 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, 7084 uint64_t value) 7085 { 7086 /* Helper coprocessor write function for write-ignore registers */ 7087 } 7088 7089 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) 7090 { 7091 /* Helper coprocessor write function for read-as-zero registers */ 7092 return 0; 7093 } 7094 7095 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) 7096 { 7097 /* Helper coprocessor reset function for do-nothing-on-reset registers */ 7098 } 7099 7100 static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) 7101 { 7102 /* Return true if it is not valid for us to switch to 7103 * this CPU mode (ie all the UNPREDICTABLE cases in 7104 * the ARM ARM CPSRWriteByInstr pseudocode). 7105 */ 7106 7107 /* Changes to or from Hyp via MSR and CPS are illegal. */ 7108 if (write_type == CPSRWriteByInstr && 7109 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || 7110 mode == ARM_CPU_MODE_HYP)) { 7111 return 1; 7112 } 7113 7114 switch (mode) { 7115 case ARM_CPU_MODE_USR: 7116 return 0; 7117 case ARM_CPU_MODE_SYS: 7118 case ARM_CPU_MODE_SVC: 7119 case ARM_CPU_MODE_ABT: 7120 case ARM_CPU_MODE_UND: 7121 case ARM_CPU_MODE_IRQ: 7122 case ARM_CPU_MODE_FIQ: 7123 /* Note that we don't implement the IMPDEF NSACR.RFR which in v7 7124 * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) 7125 */ 7126 /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR 7127 * and CPS are treated as illegal mode changes. 7128 */ 7129 if (write_type == CPSRWriteByInstr && 7130 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && 7131 (arm_hcr_el2_eff(env) & HCR_TGE)) { 7132 return 1; 7133 } 7134 return 0; 7135 case ARM_CPU_MODE_HYP: 7136 return !arm_feature(env, ARM_FEATURE_EL2) 7137 || arm_current_el(env) < 2 || arm_is_secure_below_el3(env); 7138 case ARM_CPU_MODE_MON: 7139 return arm_current_el(env) < 3; 7140 default: 7141 return 1; 7142 } 7143 } 7144 7145 uint32_t cpsr_read(CPUARMState *env) 7146 { 7147 int ZF; 7148 ZF = (env->ZF == 0); 7149 return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | 7150 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) 7151 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) 7152 | ((env->condexec_bits & 0xfc) << 8) 7153 | (env->GE << 16) | (env->daif & CPSR_AIF); 7154 } 7155 7156 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, 7157 CPSRWriteType write_type) 7158 { 7159 uint32_t changed_daif; 7160 7161 if (mask & CPSR_NZCV) { 7162 env->ZF = (~val) & CPSR_Z; 7163 env->NF = val; 7164 env->CF = (val >> 29) & 1; 7165 env->VF = (val << 3) & 0x80000000; 7166 } 7167 if (mask & CPSR_Q) 7168 env->QF = ((val & CPSR_Q) != 0); 7169 if (mask & CPSR_T) 7170 env->thumb = ((val & CPSR_T) != 0); 7171 if (mask & CPSR_IT_0_1) { 7172 env->condexec_bits &= ~3; 7173 env->condexec_bits |= (val >> 25) & 3; 7174 } 7175 if (mask & CPSR_IT_2_7) { 7176 env->condexec_bits &= 3; 7177 env->condexec_bits |= (val >> 8) & 0xfc; 7178 } 7179 if (mask & CPSR_GE) { 7180 env->GE = (val >> 16) & 0xf; 7181 } 7182 7183 /* In a V7 implementation that includes the security extensions but does 7184 * not include Virtualization Extensions the SCR.FW and SCR.AW bits control 7185 * whether non-secure software is allowed to change the CPSR_F and CPSR_A 7186 * bits respectively. 7187 * 7188 * In a V8 implementation, it is permitted for privileged software to 7189 * change the CPSR A/F bits regardless of the SCR.AW/FW bits. 7190 */ 7191 if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && 7192 arm_feature(env, ARM_FEATURE_EL3) && 7193 !arm_feature(env, ARM_FEATURE_EL2) && 7194 !arm_is_secure(env)) { 7195 7196 changed_daif = (env->daif ^ val) & mask; 7197 7198 if (changed_daif & CPSR_A) { 7199 /* Check to see if we are allowed to change the masking of async 7200 * abort exceptions from a non-secure state. 7201 */ 7202 if (!(env->cp15.scr_el3 & SCR_AW)) { 7203 qemu_log_mask(LOG_GUEST_ERROR, 7204 "Ignoring attempt to switch CPSR_A flag from " 7205 "non-secure world with SCR.AW bit clear\n"); 7206 mask &= ~CPSR_A; 7207 } 7208 } 7209 7210 if (changed_daif & CPSR_F) { 7211 /* Check to see if we are allowed to change the masking of FIQ 7212 * exceptions from a non-secure state. 7213 */ 7214 if (!(env->cp15.scr_el3 & SCR_FW)) { 7215 qemu_log_mask(LOG_GUEST_ERROR, 7216 "Ignoring attempt to switch CPSR_F flag from " 7217 "non-secure world with SCR.FW bit clear\n"); 7218 mask &= ~CPSR_F; 7219 } 7220 7221 /* Check whether non-maskable FIQ (NMFI) support is enabled. 7222 * If this bit is set software is not allowed to mask 7223 * FIQs, but is allowed to set CPSR_F to 0. 7224 */ 7225 if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && 7226 (val & CPSR_F)) { 7227 qemu_log_mask(LOG_GUEST_ERROR, 7228 "Ignoring attempt to enable CPSR_F flag " 7229 "(non-maskable FIQ [NMFI] support enabled)\n"); 7230 mask &= ~CPSR_F; 7231 } 7232 } 7233 } 7234 7235 env->daif &= ~(CPSR_AIF & mask); 7236 env->daif |= val & CPSR_AIF & mask; 7237 7238 if (write_type != CPSRWriteRaw && 7239 ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { 7240 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { 7241 /* Note that we can only get here in USR mode if this is a 7242 * gdb stub write; for this case we follow the architectural 7243 * behaviour for guest writes in USR mode of ignoring an attempt 7244 * to switch mode. (Those are caught by translate.c for writes 7245 * triggered by guest instructions.) 7246 */ 7247 mask &= ~CPSR_M; 7248 } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { 7249 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in 7250 * v7, and has defined behaviour in v8: 7251 * + leave CPSR.M untouched 7252 * + allow changes to the other CPSR fields 7253 * + set PSTATE.IL 7254 * For user changes via the GDB stub, we don't set PSTATE.IL, 7255 * as this would be unnecessarily harsh for a user error. 7256 */ 7257 mask &= ~CPSR_M; 7258 if (write_type != CPSRWriteByGDBStub && 7259 arm_feature(env, ARM_FEATURE_V8)) { 7260 mask |= CPSR_IL; 7261 val |= CPSR_IL; 7262 } 7263 qemu_log_mask(LOG_GUEST_ERROR, 7264 "Illegal AArch32 mode switch attempt from %s to %s\n", 7265 aarch32_mode_name(env->uncached_cpsr), 7266 aarch32_mode_name(val)); 7267 } else { 7268 qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", 7269 write_type == CPSRWriteExceptionReturn ? 7270 "Exception return from AArch32" : 7271 "AArch32 mode switch from", 7272 aarch32_mode_name(env->uncached_cpsr), 7273 aarch32_mode_name(val), env->regs[15]); 7274 switch_mode(env, val & CPSR_M); 7275 } 7276 } 7277 mask &= ~CACHED_CPSR_BITS; 7278 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); 7279 } 7280 7281 /* Sign/zero extend */ 7282 uint32_t HELPER(sxtb16)(uint32_t x) 7283 { 7284 uint32_t res; 7285 res = (uint16_t)(int8_t)x; 7286 res |= (uint32_t)(int8_t)(x >> 16) << 16; 7287 return res; 7288 } 7289 7290 uint32_t HELPER(uxtb16)(uint32_t x) 7291 { 7292 uint32_t res; 7293 res = (uint16_t)(uint8_t)x; 7294 res |= (uint32_t)(uint8_t)(x >> 16) << 16; 7295 return res; 7296 } 7297 7298 int32_t HELPER(sdiv)(int32_t num, int32_t den) 7299 { 7300 if (den == 0) 7301 return 0; 7302 if (num == INT_MIN && den == -1) 7303 return INT_MIN; 7304 return num / den; 7305 } 7306 7307 uint32_t HELPER(udiv)(uint32_t num, uint32_t den) 7308 { 7309 if (den == 0) 7310 return 0; 7311 return num / den; 7312 } 7313 7314 uint32_t HELPER(rbit)(uint32_t x) 7315 { 7316 return revbit32(x); 7317 } 7318 7319 #ifdef CONFIG_USER_ONLY 7320 7321 /* These should probably raise undefined insn exceptions. */ 7322 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 7323 { 7324 ARMCPU *cpu = arm_env_get_cpu(env); 7325 7326 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); 7327 } 7328 7329 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 7330 { 7331 ARMCPU *cpu = arm_env_get_cpu(env); 7332 7333 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); 7334 return 0; 7335 } 7336 7337 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 7338 { 7339 /* translate.c should never generate calls here in user-only mode */ 7340 g_assert_not_reached(); 7341 } 7342 7343 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 7344 { 7345 /* translate.c should never generate calls here in user-only mode */ 7346 g_assert_not_reached(); 7347 } 7348 7349 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 7350 { 7351 /* The TT instructions can be used by unprivileged code, but in 7352 * user-only emulation we don't have the MPU. 7353 * Luckily since we know we are NonSecure unprivileged (and that in 7354 * turn means that the A flag wasn't specified), all the bits in the 7355 * register must be zero: 7356 * IREGION: 0 because IRVALID is 0 7357 * IRVALID: 0 because NS 7358 * S: 0 because NS 7359 * NSRW: 0 because NS 7360 * NSR: 0 because NS 7361 * RW: 0 because unpriv and A flag not set 7362 * R: 0 because unpriv and A flag not set 7363 * SRVALID: 0 because NS 7364 * MRVALID: 0 because unpriv and A flag not set 7365 * SREGION: 0 becaus SRVALID is 0 7366 * MREGION: 0 because MRVALID is 0 7367 */ 7368 return 0; 7369 } 7370 7371 static void switch_mode(CPUARMState *env, int mode) 7372 { 7373 ARMCPU *cpu = arm_env_get_cpu(env); 7374 7375 if (mode != ARM_CPU_MODE_USR) { 7376 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); 7377 } 7378 } 7379 7380 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 7381 uint32_t cur_el, bool secure) 7382 { 7383 return 1; 7384 } 7385 7386 void aarch64_sync_64_to_32(CPUARMState *env) 7387 { 7388 g_assert_not_reached(); 7389 } 7390 7391 #else 7392 7393 static void switch_mode(CPUARMState *env, int mode) 7394 { 7395 int old_mode; 7396 int i; 7397 7398 old_mode = env->uncached_cpsr & CPSR_M; 7399 if (mode == old_mode) 7400 return; 7401 7402 if (old_mode == ARM_CPU_MODE_FIQ) { 7403 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); 7404 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); 7405 } else if (mode == ARM_CPU_MODE_FIQ) { 7406 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); 7407 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); 7408 } 7409 7410 i = bank_number(old_mode); 7411 env->banked_r13[i] = env->regs[13]; 7412 env->banked_spsr[i] = env->spsr; 7413 7414 i = bank_number(mode); 7415 env->regs[13] = env->banked_r13[i]; 7416 env->spsr = env->banked_spsr[i]; 7417 7418 env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; 7419 env->regs[14] = env->banked_r14[r14_bank_number(mode)]; 7420 } 7421 7422 /* Physical Interrupt Target EL Lookup Table 7423 * 7424 * [ From ARM ARM section G1.13.4 (Table G1-15) ] 7425 * 7426 * The below multi-dimensional table is used for looking up the target 7427 * exception level given numerous condition criteria. Specifically, the 7428 * target EL is based on SCR and HCR routing controls as well as the 7429 * currently executing EL and secure state. 7430 * 7431 * Dimensions: 7432 * target_el_table[2][2][2][2][2][4] 7433 * | | | | | +--- Current EL 7434 * | | | | +------ Non-secure(0)/Secure(1) 7435 * | | | +--------- HCR mask override 7436 * | | +------------ SCR exec state control 7437 * | +--------------- SCR mask override 7438 * +------------------ 32-bit(0)/64-bit(1) EL3 7439 * 7440 * The table values are as such: 7441 * 0-3 = EL0-EL3 7442 * -1 = Cannot occur 7443 * 7444 * The ARM ARM target EL table includes entries indicating that an "exception 7445 * is not taken". The two cases where this is applicable are: 7446 * 1) An exception is taken from EL3 but the SCR does not have the exception 7447 * routed to EL3. 7448 * 2) An exception is taken from EL2 but the HCR does not have the exception 7449 * routed to EL2. 7450 * In these two cases, the below table contain a target of EL1. This value is 7451 * returned as it is expected that the consumer of the table data will check 7452 * for "target EL >= current EL" to ensure the exception is not taken. 7453 * 7454 * SCR HCR 7455 * 64 EA AMO From 7456 * BIT IRQ IMO Non-secure Secure 7457 * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 7458 */ 7459 static const int8_t target_el_table[2][2][2][2][2][4] = { 7460 {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 7461 {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, 7462 {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, 7463 {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, 7464 {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 7465 {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, 7466 {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, 7467 {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, 7468 {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, 7469 {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},}, 7470 {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },}, 7471 {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},}, 7472 {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 7473 {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, 7474 {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, 7475 {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},}, 7476 }; 7477 7478 /* 7479 * Determine the target EL for physical exceptions 7480 */ 7481 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, 7482 uint32_t cur_el, bool secure) 7483 { 7484 CPUARMState *env = cs->env_ptr; 7485 bool rw; 7486 bool scr; 7487 bool hcr; 7488 int target_el; 7489 /* Is the highest EL AArch64? */ 7490 bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); 7491 uint64_t hcr_el2; 7492 7493 if (arm_feature(env, ARM_FEATURE_EL3)) { 7494 rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); 7495 } else { 7496 /* Either EL2 is the highest EL (and so the EL2 register width 7497 * is given by is64); or there is no EL2 or EL3, in which case 7498 * the value of 'rw' does not affect the table lookup anyway. 7499 */ 7500 rw = is64; 7501 } 7502 7503 hcr_el2 = arm_hcr_el2_eff(env); 7504 switch (excp_idx) { 7505 case EXCP_IRQ: 7506 scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); 7507 hcr = hcr_el2 & HCR_IMO; 7508 break; 7509 case EXCP_FIQ: 7510 scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); 7511 hcr = hcr_el2 & HCR_FMO; 7512 break; 7513 default: 7514 scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); 7515 hcr = hcr_el2 & HCR_AMO; 7516 break; 7517 }; 7518 7519 /* Perform a table-lookup for the target EL given the current state */ 7520 target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; 7521 7522 assert(target_el > 0); 7523 7524 return target_el; 7525 } 7526 7527 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value, 7528 ARMMMUIdx mmu_idx, bool ignfault) 7529 { 7530 CPUState *cs = CPU(cpu); 7531 CPUARMState *env = &cpu->env; 7532 MemTxAttrs attrs = {}; 7533 MemTxResult txres; 7534 target_ulong page_size; 7535 hwaddr physaddr; 7536 int prot; 7537 ARMMMUFaultInfo fi = {}; 7538 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 7539 int exc; 7540 bool exc_secure; 7541 7542 if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr, 7543 &attrs, &prot, &page_size, &fi, NULL)) { 7544 /* MPU/SAU lookup failed */ 7545 if (fi.type == ARMFault_QEMU_SFault) { 7546 qemu_log_mask(CPU_LOG_INT, 7547 "...SecureFault with SFSR.AUVIOL during stacking\n"); 7548 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 7549 env->v7m.sfar = addr; 7550 exc = ARMV7M_EXCP_SECURE; 7551 exc_secure = false; 7552 } else { 7553 qemu_log_mask(CPU_LOG_INT, "...MemManageFault with CFSR.MSTKERR\n"); 7554 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK; 7555 exc = ARMV7M_EXCP_MEM; 7556 exc_secure = secure; 7557 } 7558 goto pend_fault; 7559 } 7560 address_space_stl_le(arm_addressspace(cs, attrs), physaddr, value, 7561 attrs, &txres); 7562 if (txres != MEMTX_OK) { 7563 /* BusFault trying to write the data */ 7564 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n"); 7565 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK; 7566 exc = ARMV7M_EXCP_BUS; 7567 exc_secure = false; 7568 goto pend_fault; 7569 } 7570 return true; 7571 7572 pend_fault: 7573 /* By pending the exception at this point we are making 7574 * the IMPDEF choice "overridden exceptions pended" (see the 7575 * MergeExcInfo() pseudocode). The other choice would be to not 7576 * pend them now and then make a choice about which to throw away 7577 * later if we have two derived exceptions. 7578 * The only case when we must not pend the exception but instead 7579 * throw it away is if we are doing the push of the callee registers 7580 * and we've already generated a derived exception. Even in this 7581 * case we will still update the fault status registers. 7582 */ 7583 if (!ignfault) { 7584 armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure); 7585 } 7586 return false; 7587 } 7588 7589 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr, 7590 ARMMMUIdx mmu_idx) 7591 { 7592 CPUState *cs = CPU(cpu); 7593 CPUARMState *env = &cpu->env; 7594 MemTxAttrs attrs = {}; 7595 MemTxResult txres; 7596 target_ulong page_size; 7597 hwaddr physaddr; 7598 int prot; 7599 ARMMMUFaultInfo fi = {}; 7600 bool secure = mmu_idx & ARM_MMU_IDX_M_S; 7601 int exc; 7602 bool exc_secure; 7603 uint32_t value; 7604 7605 if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr, 7606 &attrs, &prot, &page_size, &fi, NULL)) { 7607 /* MPU/SAU lookup failed */ 7608 if (fi.type == ARMFault_QEMU_SFault) { 7609 qemu_log_mask(CPU_LOG_INT, 7610 "...SecureFault with SFSR.AUVIOL during unstack\n"); 7611 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK; 7612 env->v7m.sfar = addr; 7613 exc = ARMV7M_EXCP_SECURE; 7614 exc_secure = false; 7615 } else { 7616 qemu_log_mask(CPU_LOG_INT, 7617 "...MemManageFault with CFSR.MUNSTKERR\n"); 7618 env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK; 7619 exc = ARMV7M_EXCP_MEM; 7620 exc_secure = secure; 7621 } 7622 goto pend_fault; 7623 } 7624 7625 value = address_space_ldl(arm_addressspace(cs, attrs), physaddr, 7626 attrs, &txres); 7627 if (txres != MEMTX_OK) { 7628 /* BusFault trying to read the data */ 7629 qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n"); 7630 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK; 7631 exc = ARMV7M_EXCP_BUS; 7632 exc_secure = false; 7633 goto pend_fault; 7634 } 7635 7636 *dest = value; 7637 return true; 7638 7639 pend_fault: 7640 /* By pending the exception at this point we are making 7641 * the IMPDEF choice "overridden exceptions pended" (see the 7642 * MergeExcInfo() pseudocode). The other choice would be to not 7643 * pend them now and then make a choice about which to throw away 7644 * later if we have two derived exceptions. 7645 */ 7646 armv7m_nvic_set_pending(env->nvic, exc, exc_secure); 7647 return false; 7648 } 7649 7650 /* Write to v7M CONTROL.SPSEL bit for the specified security bank. 7651 * This may change the current stack pointer between Main and Process 7652 * stack pointers if it is done for the CONTROL register for the current 7653 * security state. 7654 */ 7655 static void write_v7m_control_spsel_for_secstate(CPUARMState *env, 7656 bool new_spsel, 7657 bool secstate) 7658 { 7659 bool old_is_psp = v7m_using_psp(env); 7660 7661 env->v7m.control[secstate] = 7662 deposit32(env->v7m.control[secstate], 7663 R_V7M_CONTROL_SPSEL_SHIFT, 7664 R_V7M_CONTROL_SPSEL_LENGTH, new_spsel); 7665 7666 if (secstate == env->v7m.secure) { 7667 bool new_is_psp = v7m_using_psp(env); 7668 uint32_t tmp; 7669 7670 if (old_is_psp != new_is_psp) { 7671 tmp = env->v7m.other_sp; 7672 env->v7m.other_sp = env->regs[13]; 7673 env->regs[13] = tmp; 7674 } 7675 } 7676 } 7677 7678 /* Write to v7M CONTROL.SPSEL bit. This may change the current 7679 * stack pointer between Main and Process stack pointers. 7680 */ 7681 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel) 7682 { 7683 write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure); 7684 } 7685 7686 void write_v7m_exception(CPUARMState *env, uint32_t new_exc) 7687 { 7688 /* Write a new value to v7m.exception, thus transitioning into or out 7689 * of Handler mode; this may result in a change of active stack pointer. 7690 */ 7691 bool new_is_psp, old_is_psp = v7m_using_psp(env); 7692 uint32_t tmp; 7693 7694 env->v7m.exception = new_exc; 7695 7696 new_is_psp = v7m_using_psp(env); 7697 7698 if (old_is_psp != new_is_psp) { 7699 tmp = env->v7m.other_sp; 7700 env->v7m.other_sp = env->regs[13]; 7701 env->regs[13] = tmp; 7702 } 7703 } 7704 7705 /* Switch M profile security state between NS and S */ 7706 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate) 7707 { 7708 uint32_t new_ss_msp, new_ss_psp; 7709 7710 if (env->v7m.secure == new_secstate) { 7711 return; 7712 } 7713 7714 /* All the banked state is accessed by looking at env->v7m.secure 7715 * except for the stack pointer; rearrange the SP appropriately. 7716 */ 7717 new_ss_msp = env->v7m.other_ss_msp; 7718 new_ss_psp = env->v7m.other_ss_psp; 7719 7720 if (v7m_using_psp(env)) { 7721 env->v7m.other_ss_psp = env->regs[13]; 7722 env->v7m.other_ss_msp = env->v7m.other_sp; 7723 } else { 7724 env->v7m.other_ss_msp = env->regs[13]; 7725 env->v7m.other_ss_psp = env->v7m.other_sp; 7726 } 7727 7728 env->v7m.secure = new_secstate; 7729 7730 if (v7m_using_psp(env)) { 7731 env->regs[13] = new_ss_psp; 7732 env->v7m.other_sp = new_ss_msp; 7733 } else { 7734 env->regs[13] = new_ss_msp; 7735 env->v7m.other_sp = new_ss_psp; 7736 } 7737 } 7738 7739 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest) 7740 { 7741 /* Handle v7M BXNS: 7742 * - if the return value is a magic value, do exception return (like BX) 7743 * - otherwise bit 0 of the return value is the target security state 7744 */ 7745 uint32_t min_magic; 7746 7747 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7748 /* Covers FNC_RETURN and EXC_RETURN magic */ 7749 min_magic = FNC_RETURN_MIN_MAGIC; 7750 } else { 7751 /* EXC_RETURN magic only */ 7752 min_magic = EXC_RETURN_MIN_MAGIC; 7753 } 7754 7755 if (dest >= min_magic) { 7756 /* This is an exception return magic value; put it where 7757 * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT. 7758 * Note that if we ever add gen_ss_advance() singlestep support to 7759 * M profile this should count as an "instruction execution complete" 7760 * event (compare gen_bx_excret_final_code()). 7761 */ 7762 env->regs[15] = dest & ~1; 7763 env->thumb = dest & 1; 7764 HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT); 7765 /* notreached */ 7766 } 7767 7768 /* translate.c should have made BXNS UNDEF unless we're secure */ 7769 assert(env->v7m.secure); 7770 7771 switch_v7m_security_state(env, dest & 1); 7772 env->thumb = 1; 7773 env->regs[15] = dest & ~1; 7774 } 7775 7776 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest) 7777 { 7778 /* Handle v7M BLXNS: 7779 * - bit 0 of the destination address is the target security state 7780 */ 7781 7782 /* At this point regs[15] is the address just after the BLXNS */ 7783 uint32_t nextinst = env->regs[15] | 1; 7784 uint32_t sp = env->regs[13] - 8; 7785 uint32_t saved_psr; 7786 7787 /* translate.c will have made BLXNS UNDEF unless we're secure */ 7788 assert(env->v7m.secure); 7789 7790 if (dest & 1) { 7791 /* target is Secure, so this is just a normal BLX, 7792 * except that the low bit doesn't indicate Thumb/not. 7793 */ 7794 env->regs[14] = nextinst; 7795 env->thumb = 1; 7796 env->regs[15] = dest & ~1; 7797 return; 7798 } 7799 7800 /* Target is non-secure: first push a stack frame */ 7801 if (!QEMU_IS_ALIGNED(sp, 8)) { 7802 qemu_log_mask(LOG_GUEST_ERROR, 7803 "BLXNS with misaligned SP is UNPREDICTABLE\n"); 7804 } 7805 7806 if (sp < v7m_sp_limit(env)) { 7807 raise_exception(env, EXCP_STKOF, 0, 1); 7808 } 7809 7810 saved_psr = env->v7m.exception; 7811 if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) { 7812 saved_psr |= XPSR_SFPA; 7813 } 7814 7815 /* Note that these stores can throw exceptions on MPU faults */ 7816 cpu_stl_data(env, sp, nextinst); 7817 cpu_stl_data(env, sp + 4, saved_psr); 7818 7819 env->regs[13] = sp; 7820 env->regs[14] = 0xfeffffff; 7821 if (arm_v7m_is_handler_mode(env)) { 7822 /* Write a dummy value to IPSR, to avoid leaking the current secure 7823 * exception number to non-secure code. This is guaranteed not 7824 * to cause write_v7m_exception() to actually change stacks. 7825 */ 7826 write_v7m_exception(env, 1); 7827 } 7828 switch_v7m_security_state(env, 0); 7829 env->thumb = 1; 7830 env->regs[15] = dest; 7831 } 7832 7833 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode, 7834 bool spsel) 7835 { 7836 /* Return a pointer to the location where we currently store the 7837 * stack pointer for the requested security state and thread mode. 7838 * This pointer will become invalid if the CPU state is updated 7839 * such that the stack pointers are switched around (eg changing 7840 * the SPSEL control bit). 7841 * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode(). 7842 * Unlike that pseudocode, we require the caller to pass us in the 7843 * SPSEL control bit value; this is because we also use this 7844 * function in handling of pushing of the callee-saves registers 7845 * part of the v8M stack frame (pseudocode PushCalleeStack()), 7846 * and in the tailchain codepath the SPSEL bit comes from the exception 7847 * return magic LR value from the previous exception. The pseudocode 7848 * opencodes the stack-selection in PushCalleeStack(), but we prefer 7849 * to make this utility function generic enough to do the job. 7850 */ 7851 bool want_psp = threadmode && spsel; 7852 7853 if (secure == env->v7m.secure) { 7854 if (want_psp == v7m_using_psp(env)) { 7855 return &env->regs[13]; 7856 } else { 7857 return &env->v7m.other_sp; 7858 } 7859 } else { 7860 if (want_psp) { 7861 return &env->v7m.other_ss_psp; 7862 } else { 7863 return &env->v7m.other_ss_msp; 7864 } 7865 } 7866 } 7867 7868 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure, 7869 uint32_t *pvec) 7870 { 7871 CPUState *cs = CPU(cpu); 7872 CPUARMState *env = &cpu->env; 7873 MemTxResult result; 7874 uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4; 7875 uint32_t vector_entry; 7876 MemTxAttrs attrs = {}; 7877 ARMMMUIdx mmu_idx; 7878 bool exc_secure; 7879 7880 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true); 7881 7882 /* We don't do a get_phys_addr() here because the rules for vector 7883 * loads are special: they always use the default memory map, and 7884 * the default memory map permits reads from all addresses. 7885 * Since there's no easy way to pass through to pmsav8_mpu_lookup() 7886 * that we want this special case which would always say "yes", 7887 * we just do the SAU lookup here followed by a direct physical load. 7888 */ 7889 attrs.secure = targets_secure; 7890 attrs.user = false; 7891 7892 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 7893 V8M_SAttributes sattrs = {}; 7894 7895 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 7896 if (sattrs.ns) { 7897 attrs.secure = false; 7898 } else if (!targets_secure) { 7899 /* NS access to S memory */ 7900 goto load_fail; 7901 } 7902 } 7903 7904 vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr, 7905 attrs, &result); 7906 if (result != MEMTX_OK) { 7907 goto load_fail; 7908 } 7909 *pvec = vector_entry; 7910 return true; 7911 7912 load_fail: 7913 /* All vector table fetch fails are reported as HardFault, with 7914 * HFSR.VECTTBL and .FORCED set. (FORCED is set because 7915 * technically the underlying exception is a MemManage or BusFault 7916 * that is escalated to HardFault.) This is a terminal exception, 7917 * so we will either take the HardFault immediately or else enter 7918 * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()). 7919 */ 7920 exc_secure = targets_secure || 7921 !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK); 7922 env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK | R_V7M_HFSR_FORCED_MASK; 7923 armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure); 7924 return false; 7925 } 7926 7927 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain, 7928 bool ignore_faults) 7929 { 7930 /* For v8M, push the callee-saves register part of the stack frame. 7931 * Compare the v8M pseudocode PushCalleeStack(). 7932 * In the tailchaining case this may not be the current stack. 7933 */ 7934 CPUARMState *env = &cpu->env; 7935 uint32_t *frame_sp_p; 7936 uint32_t frameptr; 7937 ARMMMUIdx mmu_idx; 7938 bool stacked_ok; 7939 uint32_t limit; 7940 bool want_psp; 7941 7942 if (dotailchain) { 7943 bool mode = lr & R_V7M_EXCRET_MODE_MASK; 7944 bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) || 7945 !mode; 7946 7947 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv); 7948 frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode, 7949 lr & R_V7M_EXCRET_SPSEL_MASK); 7950 want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK); 7951 if (want_psp) { 7952 limit = env->v7m.psplim[M_REG_S]; 7953 } else { 7954 limit = env->v7m.msplim[M_REG_S]; 7955 } 7956 } else { 7957 mmu_idx = arm_mmu_idx(env); 7958 frame_sp_p = &env->regs[13]; 7959 limit = v7m_sp_limit(env); 7960 } 7961 7962 frameptr = *frame_sp_p - 0x28; 7963 if (frameptr < limit) { 7964 /* 7965 * Stack limit failure: set SP to the limit value, and generate 7966 * STKOF UsageFault. Stack pushes below the limit must not be 7967 * performed. It is IMPDEF whether pushes above the limit are 7968 * performed; we choose not to. 7969 */ 7970 qemu_log_mask(CPU_LOG_INT, 7971 "...STKOF during callee-saves register stacking\n"); 7972 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; 7973 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 7974 env->v7m.secure); 7975 *frame_sp_p = limit; 7976 return true; 7977 } 7978 7979 /* Write as much of the stack frame as we can. A write failure may 7980 * cause us to pend a derived exception. 7981 */ 7982 stacked_ok = 7983 v7m_stack_write(cpu, frameptr, 0xfefa125b, mmu_idx, ignore_faults) && 7984 v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, 7985 ignore_faults) && 7986 v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, 7987 ignore_faults) && 7988 v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, 7989 ignore_faults) && 7990 v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, 7991 ignore_faults) && 7992 v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, 7993 ignore_faults) && 7994 v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, 7995 ignore_faults) && 7996 v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, 7997 ignore_faults) && 7998 v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, 7999 ignore_faults); 8000 8001 /* Update SP regardless of whether any of the stack accesses failed. */ 8002 *frame_sp_p = frameptr; 8003 8004 return !stacked_ok; 8005 } 8006 8007 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain, 8008 bool ignore_stackfaults) 8009 { 8010 /* Do the "take the exception" parts of exception entry, 8011 * but not the pushing of state to the stack. This is 8012 * similar to the pseudocode ExceptionTaken() function. 8013 */ 8014 CPUARMState *env = &cpu->env; 8015 uint32_t addr; 8016 bool targets_secure; 8017 int exc; 8018 bool push_failed = false; 8019 8020 armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure); 8021 qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n", 8022 targets_secure ? "secure" : "nonsecure", exc); 8023 8024 if (arm_feature(env, ARM_FEATURE_V8)) { 8025 if (arm_feature(env, ARM_FEATURE_M_SECURITY) && 8026 (lr & R_V7M_EXCRET_S_MASK)) { 8027 /* The background code (the owner of the registers in the 8028 * exception frame) is Secure. This means it may either already 8029 * have or now needs to push callee-saves registers. 8030 */ 8031 if (targets_secure) { 8032 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) { 8033 /* We took an exception from Secure to NonSecure 8034 * (which means the callee-saved registers got stacked) 8035 * and are now tailchaining to a Secure exception. 8036 * Clear DCRS so eventual return from this Secure 8037 * exception unstacks the callee-saved registers. 8038 */ 8039 lr &= ~R_V7M_EXCRET_DCRS_MASK; 8040 } 8041 } else { 8042 /* We're going to a non-secure exception; push the 8043 * callee-saves registers to the stack now, if they're 8044 * not already saved. 8045 */ 8046 if (lr & R_V7M_EXCRET_DCRS_MASK && 8047 !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) { 8048 push_failed = v7m_push_callee_stack(cpu, lr, dotailchain, 8049 ignore_stackfaults); 8050 } 8051 lr |= R_V7M_EXCRET_DCRS_MASK; 8052 } 8053 } 8054 8055 lr &= ~R_V7M_EXCRET_ES_MASK; 8056 if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) { 8057 lr |= R_V7M_EXCRET_ES_MASK; 8058 } 8059 lr &= ~R_V7M_EXCRET_SPSEL_MASK; 8060 if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) { 8061 lr |= R_V7M_EXCRET_SPSEL_MASK; 8062 } 8063 8064 /* Clear registers if necessary to prevent non-secure exception 8065 * code being able to see register values from secure code. 8066 * Where register values become architecturally UNKNOWN we leave 8067 * them with their previous values. 8068 */ 8069 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 8070 if (!targets_secure) { 8071 /* Always clear the caller-saved registers (they have been 8072 * pushed to the stack earlier in v7m_push_stack()). 8073 * Clear callee-saved registers if the background code is 8074 * Secure (in which case these regs were saved in 8075 * v7m_push_callee_stack()). 8076 */ 8077 int i; 8078 8079 for (i = 0; i < 13; i++) { 8080 /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */ 8081 if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) { 8082 env->regs[i] = 0; 8083 } 8084 } 8085 /* Clear EAPSR */ 8086 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT); 8087 } 8088 } 8089 } 8090 8091 if (push_failed && !ignore_stackfaults) { 8092 /* Derived exception on callee-saves register stacking: 8093 * we might now want to take a different exception which 8094 * targets a different security state, so try again from the top. 8095 */ 8096 qemu_log_mask(CPU_LOG_INT, 8097 "...derived exception on callee-saves register stacking"); 8098 v7m_exception_taken(cpu, lr, true, true); 8099 return; 8100 } 8101 8102 if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) { 8103 /* Vector load failed: derived exception */ 8104 qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load"); 8105 v7m_exception_taken(cpu, lr, true, true); 8106 return; 8107 } 8108 8109 /* Now we've done everything that might cause a derived exception 8110 * we can go ahead and activate whichever exception we're going to 8111 * take (which might now be the derived exception). 8112 */ 8113 armv7m_nvic_acknowledge_irq(env->nvic); 8114 8115 /* Switch to target security state -- must do this before writing SPSEL */ 8116 switch_v7m_security_state(env, targets_secure); 8117 write_v7m_control_spsel(env, 0); 8118 arm_clear_exclusive(env); 8119 /* Clear IT bits */ 8120 env->condexec_bits = 0; 8121 env->regs[14] = lr; 8122 env->regs[15] = addr & 0xfffffffe; 8123 env->thumb = addr & 1; 8124 } 8125 8126 static bool v7m_push_stack(ARMCPU *cpu) 8127 { 8128 /* Do the "set up stack frame" part of exception entry, 8129 * similar to pseudocode PushStack(). 8130 * Return true if we generate a derived exception (and so 8131 * should ignore further stack faults trying to process 8132 * that derived exception.) 8133 */ 8134 bool stacked_ok; 8135 CPUARMState *env = &cpu->env; 8136 uint32_t xpsr = xpsr_read(env); 8137 uint32_t frameptr = env->regs[13]; 8138 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 8139 8140 /* Align stack pointer if the guest wants that */ 8141 if ((frameptr & 4) && 8142 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) { 8143 frameptr -= 4; 8144 xpsr |= XPSR_SPREALIGN; 8145 } 8146 8147 frameptr -= 0x20; 8148 8149 if (arm_feature(env, ARM_FEATURE_V8)) { 8150 uint32_t limit = v7m_sp_limit(env); 8151 8152 if (frameptr < limit) { 8153 /* 8154 * Stack limit failure: set SP to the limit value, and generate 8155 * STKOF UsageFault. Stack pushes below the limit must not be 8156 * performed. It is IMPDEF whether pushes above the limit are 8157 * performed; we choose not to. 8158 */ 8159 qemu_log_mask(CPU_LOG_INT, 8160 "...STKOF during stacking\n"); 8161 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; 8162 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 8163 env->v7m.secure); 8164 env->regs[13] = limit; 8165 return true; 8166 } 8167 } 8168 8169 /* Write as much of the stack frame as we can. If we fail a stack 8170 * write this will result in a derived exception being pended 8171 * (which may be taken in preference to the one we started with 8172 * if it has higher priority). 8173 */ 8174 stacked_ok = 8175 v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, false) && 8176 v7m_stack_write(cpu, frameptr + 4, env->regs[1], mmu_idx, false) && 8177 v7m_stack_write(cpu, frameptr + 8, env->regs[2], mmu_idx, false) && 8178 v7m_stack_write(cpu, frameptr + 12, env->regs[3], mmu_idx, false) && 8179 v7m_stack_write(cpu, frameptr + 16, env->regs[12], mmu_idx, false) && 8180 v7m_stack_write(cpu, frameptr + 20, env->regs[14], mmu_idx, false) && 8181 v7m_stack_write(cpu, frameptr + 24, env->regs[15], mmu_idx, false) && 8182 v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, false); 8183 8184 /* Update SP regardless of whether any of the stack accesses failed. */ 8185 env->regs[13] = frameptr; 8186 8187 return !stacked_ok; 8188 } 8189 8190 static void do_v7m_exception_exit(ARMCPU *cpu) 8191 { 8192 CPUARMState *env = &cpu->env; 8193 uint32_t excret; 8194 uint32_t xpsr; 8195 bool ufault = false; 8196 bool sfault = false; 8197 bool return_to_sp_process; 8198 bool return_to_handler; 8199 bool rettobase = false; 8200 bool exc_secure = false; 8201 bool return_to_secure; 8202 8203 /* If we're not in Handler mode then jumps to magic exception-exit 8204 * addresses don't have magic behaviour. However for the v8M 8205 * security extensions the magic secure-function-return has to 8206 * work in thread mode too, so to avoid doing an extra check in 8207 * the generated code we allow exception-exit magic to also cause the 8208 * internal exception and bring us here in thread mode. Correct code 8209 * will never try to do this (the following insn fetch will always 8210 * fault) so we the overhead of having taken an unnecessary exception 8211 * doesn't matter. 8212 */ 8213 if (!arm_v7m_is_handler_mode(env)) { 8214 return; 8215 } 8216 8217 /* In the spec pseudocode ExceptionReturn() is called directly 8218 * from BXWritePC() and gets the full target PC value including 8219 * bit zero. In QEMU's implementation we treat it as a normal 8220 * jump-to-register (which is then caught later on), and so split 8221 * the target value up between env->regs[15] and env->thumb in 8222 * gen_bx(). Reconstitute it. 8223 */ 8224 excret = env->regs[15]; 8225 if (env->thumb) { 8226 excret |= 1; 8227 } 8228 8229 qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32 8230 " previous exception %d\n", 8231 excret, env->v7m.exception); 8232 8233 if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) { 8234 qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception " 8235 "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", 8236 excret); 8237 } 8238 8239 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 8240 /* EXC_RETURN.ES validation check (R_SMFL). We must do this before 8241 * we pick which FAULTMASK to clear. 8242 */ 8243 if (!env->v7m.secure && 8244 ((excret & R_V7M_EXCRET_ES_MASK) || 8245 !(excret & R_V7M_EXCRET_DCRS_MASK))) { 8246 sfault = 1; 8247 /* For all other purposes, treat ES as 0 (R_HXSR) */ 8248 excret &= ~R_V7M_EXCRET_ES_MASK; 8249 } 8250 exc_secure = excret & R_V7M_EXCRET_ES_MASK; 8251 } 8252 8253 if (env->v7m.exception != ARMV7M_EXCP_NMI) { 8254 /* Auto-clear FAULTMASK on return from other than NMI. 8255 * If the security extension is implemented then this only 8256 * happens if the raw execution priority is >= 0; the 8257 * value of the ES bit in the exception return value indicates 8258 * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.) 8259 */ 8260 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 8261 if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) { 8262 env->v7m.faultmask[exc_secure] = 0; 8263 } 8264 } else { 8265 env->v7m.faultmask[M_REG_NS] = 0; 8266 } 8267 } 8268 8269 switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception, 8270 exc_secure)) { 8271 case -1: 8272 /* attempt to exit an exception that isn't active */ 8273 ufault = true; 8274 break; 8275 case 0: 8276 /* still an irq active now */ 8277 break; 8278 case 1: 8279 /* we returned to base exception level, no nesting. 8280 * (In the pseudocode this is written using "NestedActivation != 1" 8281 * where we have 'rettobase == false'.) 8282 */ 8283 rettobase = true; 8284 break; 8285 default: 8286 g_assert_not_reached(); 8287 } 8288 8289 return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK); 8290 return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK; 8291 return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) && 8292 (excret & R_V7M_EXCRET_S_MASK); 8293 8294 if (arm_feature(env, ARM_FEATURE_V8)) { 8295 if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) { 8296 /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP); 8297 * we choose to take the UsageFault. 8298 */ 8299 if ((excret & R_V7M_EXCRET_S_MASK) || 8300 (excret & R_V7M_EXCRET_ES_MASK) || 8301 !(excret & R_V7M_EXCRET_DCRS_MASK)) { 8302 ufault = true; 8303 } 8304 } 8305 if (excret & R_V7M_EXCRET_RES0_MASK) { 8306 ufault = true; 8307 } 8308 } else { 8309 /* For v7M we only recognize certain combinations of the low bits */ 8310 switch (excret & 0xf) { 8311 case 1: /* Return to Handler */ 8312 break; 8313 case 13: /* Return to Thread using Process stack */ 8314 case 9: /* Return to Thread using Main stack */ 8315 /* We only need to check NONBASETHRDENA for v7M, because in 8316 * v8M this bit does not exist (it is RES1). 8317 */ 8318 if (!rettobase && 8319 !(env->v7m.ccr[env->v7m.secure] & 8320 R_V7M_CCR_NONBASETHRDENA_MASK)) { 8321 ufault = true; 8322 } 8323 break; 8324 default: 8325 ufault = true; 8326 } 8327 } 8328 8329 /* 8330 * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in 8331 * Handler mode (and will be until we write the new XPSR.Interrupt 8332 * field) this does not switch around the current stack pointer. 8333 * We must do this before we do any kind of tailchaining, including 8334 * for the derived exceptions on integrity check failures, or we will 8335 * give the guest an incorrect EXCRET.SPSEL value on exception entry. 8336 */ 8337 write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure); 8338 8339 if (sfault) { 8340 env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK; 8341 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 8342 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 8343 "stackframe: failed EXC_RETURN.ES validity check\n"); 8344 v7m_exception_taken(cpu, excret, true, false); 8345 return; 8346 } 8347 8348 if (ufault) { 8349 /* Bad exception return: instead of popping the exception 8350 * stack, directly take a usage fault on the current stack. 8351 */ 8352 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 8353 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 8354 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 8355 "stackframe: failed exception return integrity check\n"); 8356 v7m_exception_taken(cpu, excret, true, false); 8357 return; 8358 } 8359 8360 /* 8361 * Tailchaining: if there is currently a pending exception that 8362 * is high enough priority to preempt execution at the level we're 8363 * about to return to, then just directly take that exception now, 8364 * avoiding an unstack-and-then-stack. Note that now we have 8365 * deactivated the previous exception by calling armv7m_nvic_complete_irq() 8366 * our current execution priority is already the execution priority we are 8367 * returning to -- none of the state we would unstack or set based on 8368 * the EXCRET value affects it. 8369 */ 8370 if (armv7m_nvic_can_take_pending_exception(env->nvic)) { 8371 qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n"); 8372 v7m_exception_taken(cpu, excret, true, false); 8373 return; 8374 } 8375 8376 switch_v7m_security_state(env, return_to_secure); 8377 8378 { 8379 /* The stack pointer we should be reading the exception frame from 8380 * depends on bits in the magic exception return type value (and 8381 * for v8M isn't necessarily the stack pointer we will eventually 8382 * end up resuming execution with). Get a pointer to the location 8383 * in the CPU state struct where the SP we need is currently being 8384 * stored; we will use and modify it in place. 8385 * We use this limited C variable scope so we don't accidentally 8386 * use 'frame_sp_p' after we do something that makes it invalid. 8387 */ 8388 uint32_t *frame_sp_p = get_v7m_sp_ptr(env, 8389 return_to_secure, 8390 !return_to_handler, 8391 return_to_sp_process); 8392 uint32_t frameptr = *frame_sp_p; 8393 bool pop_ok = true; 8394 ARMMMUIdx mmu_idx; 8395 bool return_to_priv = return_to_handler || 8396 !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK); 8397 8398 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure, 8399 return_to_priv); 8400 8401 if (!QEMU_IS_ALIGNED(frameptr, 8) && 8402 arm_feature(env, ARM_FEATURE_V8)) { 8403 qemu_log_mask(LOG_GUEST_ERROR, 8404 "M profile exception return with non-8-aligned SP " 8405 "for destination state is UNPREDICTABLE\n"); 8406 } 8407 8408 /* Do we need to pop callee-saved registers? */ 8409 if (return_to_secure && 8410 ((excret & R_V7M_EXCRET_ES_MASK) == 0 || 8411 (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { 8412 uint32_t expected_sig = 0xfefa125b; 8413 uint32_t actual_sig; 8414 8415 pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx); 8416 8417 if (pop_ok && expected_sig != actual_sig) { 8418 /* Take a SecureFault on the current stack */ 8419 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; 8420 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 8421 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing " 8422 "stackframe: failed exception return integrity " 8423 "signature check\n"); 8424 v7m_exception_taken(cpu, excret, true, false); 8425 return; 8426 } 8427 8428 pop_ok = pop_ok && 8429 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && 8430 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && 8431 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) && 8432 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) && 8433 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) && 8434 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) && 8435 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) && 8436 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx); 8437 8438 frameptr += 0x28; 8439 } 8440 8441 /* Pop registers */ 8442 pop_ok = pop_ok && 8443 v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) && 8444 v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) && 8445 v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) && 8446 v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) && 8447 v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) && 8448 v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) && 8449 v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) && 8450 v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx); 8451 8452 if (!pop_ok) { 8453 /* v7m_stack_read() pended a fault, so take it (as a tail 8454 * chained exception on the same stack frame) 8455 */ 8456 qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n"); 8457 v7m_exception_taken(cpu, excret, true, false); 8458 return; 8459 } 8460 8461 /* Returning from an exception with a PC with bit 0 set is defined 8462 * behaviour on v8M (bit 0 is ignored), but for v7M it was specified 8463 * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore 8464 * the lsbit, and there are several RTOSes out there which incorrectly 8465 * assume the r15 in the stack frame should be a Thumb-style "lsbit 8466 * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but 8467 * complain about the badly behaved guest. 8468 */ 8469 if (env->regs[15] & 1) { 8470 env->regs[15] &= ~1U; 8471 if (!arm_feature(env, ARM_FEATURE_V8)) { 8472 qemu_log_mask(LOG_GUEST_ERROR, 8473 "M profile return from interrupt with misaligned " 8474 "PC is UNPREDICTABLE on v7M\n"); 8475 } 8476 } 8477 8478 if (arm_feature(env, ARM_FEATURE_V8)) { 8479 /* For v8M we have to check whether the xPSR exception field 8480 * matches the EXCRET value for return to handler/thread 8481 * before we commit to changing the SP and xPSR. 8482 */ 8483 bool will_be_handler = (xpsr & XPSR_EXCP) != 0; 8484 if (return_to_handler != will_be_handler) { 8485 /* Take an INVPC UsageFault on the current stack. 8486 * By this point we will have switched to the security state 8487 * for the background state, so this UsageFault will target 8488 * that state. 8489 */ 8490 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 8491 env->v7m.secure); 8492 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 8493 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing " 8494 "stackframe: failed exception return integrity " 8495 "check\n"); 8496 v7m_exception_taken(cpu, excret, true, false); 8497 return; 8498 } 8499 } 8500 8501 /* Commit to consuming the stack frame */ 8502 frameptr += 0x20; 8503 /* Undo stack alignment (the SPREALIGN bit indicates that the original 8504 * pre-exception SP was not 8-aligned and we added a padding word to 8505 * align it, so we undo this by ORing in the bit that increases it 8506 * from the current 8-aligned value to the 8-unaligned value. (Adding 4 8507 * would work too but a logical OR is how the pseudocode specifies it.) 8508 */ 8509 if (xpsr & XPSR_SPREALIGN) { 8510 frameptr |= 4; 8511 } 8512 *frame_sp_p = frameptr; 8513 } 8514 /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */ 8515 xpsr_write(env, xpsr, ~XPSR_SPREALIGN); 8516 8517 /* The restored xPSR exception field will be zero if we're 8518 * resuming in Thread mode. If that doesn't match what the 8519 * exception return excret specified then this is a UsageFault. 8520 * v7M requires we make this check here; v8M did it earlier. 8521 */ 8522 if (return_to_handler != arm_v7m_is_handler_mode(env)) { 8523 /* Take an INVPC UsageFault by pushing the stack again; 8524 * we know we're v7M so this is never a Secure UsageFault. 8525 */ 8526 bool ignore_stackfaults; 8527 8528 assert(!arm_feature(env, ARM_FEATURE_V8)); 8529 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false); 8530 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 8531 ignore_stackfaults = v7m_push_stack(cpu); 8532 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: " 8533 "failed exception return integrity check\n"); 8534 v7m_exception_taken(cpu, excret, false, ignore_stackfaults); 8535 return; 8536 } 8537 8538 /* Otherwise, we have a successful exception exit. */ 8539 arm_clear_exclusive(env); 8540 qemu_log_mask(CPU_LOG_INT, "...successful exception return\n"); 8541 } 8542 8543 static bool do_v7m_function_return(ARMCPU *cpu) 8544 { 8545 /* v8M security extensions magic function return. 8546 * We may either: 8547 * (1) throw an exception (longjump) 8548 * (2) return true if we successfully handled the function return 8549 * (3) return false if we failed a consistency check and have 8550 * pended a UsageFault that needs to be taken now 8551 * 8552 * At this point the magic return value is split between env->regs[15] 8553 * and env->thumb. We don't bother to reconstitute it because we don't 8554 * need it (all values are handled the same way). 8555 */ 8556 CPUARMState *env = &cpu->env; 8557 uint32_t newpc, newpsr, newpsr_exc; 8558 8559 qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n"); 8560 8561 { 8562 bool threadmode, spsel; 8563 TCGMemOpIdx oi; 8564 ARMMMUIdx mmu_idx; 8565 uint32_t *frame_sp_p; 8566 uint32_t frameptr; 8567 8568 /* Pull the return address and IPSR from the Secure stack */ 8569 threadmode = !arm_v7m_is_handler_mode(env); 8570 spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK; 8571 8572 frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel); 8573 frameptr = *frame_sp_p; 8574 8575 /* These loads may throw an exception (for MPU faults). We want to 8576 * do them as secure, so work out what MMU index that is. 8577 */ 8578 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 8579 oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); 8580 newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); 8581 newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); 8582 8583 /* Consistency checks on new IPSR */ 8584 newpsr_exc = newpsr & XPSR_EXCP; 8585 if (!((env->v7m.exception == 0 && newpsr_exc == 0) || 8586 (env->v7m.exception == 1 && newpsr_exc != 0))) { 8587 /* Pend the fault and tell our caller to take it */ 8588 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK; 8589 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, 8590 env->v7m.secure); 8591 qemu_log_mask(CPU_LOG_INT, 8592 "...taking INVPC UsageFault: " 8593 "IPSR consistency check failed\n"); 8594 return false; 8595 } 8596 8597 *frame_sp_p = frameptr + 8; 8598 } 8599 8600 /* This invalidates frame_sp_p */ 8601 switch_v7m_security_state(env, true); 8602 env->v7m.exception = newpsr_exc; 8603 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK; 8604 if (newpsr & XPSR_SFPA) { 8605 env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK; 8606 } 8607 xpsr_write(env, 0, XPSR_IT); 8608 env->thumb = newpc & 1; 8609 env->regs[15] = newpc & ~1; 8610 8611 qemu_log_mask(CPU_LOG_INT, "...function return successful\n"); 8612 return true; 8613 } 8614 8615 static void arm_log_exception(int idx) 8616 { 8617 if (qemu_loglevel_mask(CPU_LOG_INT)) { 8618 const char *exc = NULL; 8619 static const char * const excnames[] = { 8620 [EXCP_UDEF] = "Undefined Instruction", 8621 [EXCP_SWI] = "SVC", 8622 [EXCP_PREFETCH_ABORT] = "Prefetch Abort", 8623 [EXCP_DATA_ABORT] = "Data Abort", 8624 [EXCP_IRQ] = "IRQ", 8625 [EXCP_FIQ] = "FIQ", 8626 [EXCP_BKPT] = "Breakpoint", 8627 [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", 8628 [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", 8629 [EXCP_HVC] = "Hypervisor Call", 8630 [EXCP_HYP_TRAP] = "Hypervisor Trap", 8631 [EXCP_SMC] = "Secure Monitor Call", 8632 [EXCP_VIRQ] = "Virtual IRQ", 8633 [EXCP_VFIQ] = "Virtual FIQ", 8634 [EXCP_SEMIHOST] = "Semihosting call", 8635 [EXCP_NOCP] = "v7M NOCP UsageFault", 8636 [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", 8637 [EXCP_STKOF] = "v8M STKOF UsageFault", 8638 }; 8639 8640 if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { 8641 exc = excnames[idx]; 8642 } 8643 if (!exc) { 8644 exc = "unknown"; 8645 } 8646 qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc); 8647 } 8648 } 8649 8650 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, 8651 uint32_t addr, uint16_t *insn) 8652 { 8653 /* Load a 16-bit portion of a v7M instruction, returning true on success, 8654 * or false on failure (in which case we will have pended the appropriate 8655 * exception). 8656 * We need to do the instruction fetch's MPU and SAU checks 8657 * like this because there is no MMU index that would allow 8658 * doing the load with a single function call. Instead we must 8659 * first check that the security attributes permit the load 8660 * and that they don't mismatch on the two halves of the instruction, 8661 * and then we do the load as a secure load (ie using the security 8662 * attributes of the address, not the CPU, as architecturally required). 8663 */ 8664 CPUState *cs = CPU(cpu); 8665 CPUARMState *env = &cpu->env; 8666 V8M_SAttributes sattrs = {}; 8667 MemTxAttrs attrs = {}; 8668 ARMMMUFaultInfo fi = {}; 8669 MemTxResult txres; 8670 target_ulong page_size; 8671 hwaddr physaddr; 8672 int prot; 8673 8674 v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs); 8675 if (!sattrs.nsc || sattrs.ns) { 8676 /* This must be the second half of the insn, and it straddles a 8677 * region boundary with the second half not being S&NSC. 8678 */ 8679 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 8680 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 8681 qemu_log_mask(CPU_LOG_INT, 8682 "...really SecureFault with SFSR.INVEP\n"); 8683 return false; 8684 } 8685 if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, 8686 &physaddr, &attrs, &prot, &page_size, &fi, NULL)) { 8687 /* the MPU lookup failed */ 8688 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 8689 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure); 8690 qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n"); 8691 return false; 8692 } 8693 *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr, 8694 attrs, &txres); 8695 if (txres != MEMTX_OK) { 8696 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 8697 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 8698 qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n"); 8699 return false; 8700 } 8701 return true; 8702 } 8703 8704 static bool v7m_handle_execute_nsc(ARMCPU *cpu) 8705 { 8706 /* Check whether this attempt to execute code in a Secure & NS-Callable 8707 * memory region is for an SG instruction; if so, then emulate the 8708 * effect of the SG instruction and return true. Otherwise pend 8709 * the correct kind of exception and return false. 8710 */ 8711 CPUARMState *env = &cpu->env; 8712 ARMMMUIdx mmu_idx; 8713 uint16_t insn; 8714 8715 /* We should never get here unless get_phys_addr_pmsav8() caused 8716 * an exception for NS executing in S&NSC memory. 8717 */ 8718 assert(!env->v7m.secure); 8719 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 8720 8721 /* We want to do the MPU lookup as secure; work out what mmu_idx that is */ 8722 mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); 8723 8724 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) { 8725 return false; 8726 } 8727 8728 if (!env->thumb) { 8729 goto gen_invep; 8730 } 8731 8732 if (insn != 0xe97f) { 8733 /* Not an SG instruction first half (we choose the IMPDEF 8734 * early-SG-check option). 8735 */ 8736 goto gen_invep; 8737 } 8738 8739 if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) { 8740 return false; 8741 } 8742 8743 if (insn != 0xe97f) { 8744 /* Not an SG instruction second half (yes, both halves of the SG 8745 * insn have the same hex value) 8746 */ 8747 goto gen_invep; 8748 } 8749 8750 /* OK, we have confirmed that we really have an SG instruction. 8751 * We know we're NS in S memory so don't need to repeat those checks. 8752 */ 8753 qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32 8754 ", executing it\n", env->regs[15]); 8755 env->regs[14] &= ~1; 8756 switch_v7m_security_state(env, true); 8757 xpsr_write(env, 0, XPSR_IT); 8758 env->regs[15] += 4; 8759 return true; 8760 8761 gen_invep: 8762 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 8763 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 8764 qemu_log_mask(CPU_LOG_INT, 8765 "...really SecureFault with SFSR.INVEP\n"); 8766 return false; 8767 } 8768 8769 void arm_v7m_cpu_do_interrupt(CPUState *cs) 8770 { 8771 ARMCPU *cpu = ARM_CPU(cs); 8772 CPUARMState *env = &cpu->env; 8773 uint32_t lr; 8774 bool ignore_stackfaults; 8775 8776 arm_log_exception(cs->exception_index); 8777 8778 /* For exceptions we just mark as pending on the NVIC, and let that 8779 handle it. */ 8780 switch (cs->exception_index) { 8781 case EXCP_UDEF: 8782 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 8783 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK; 8784 break; 8785 case EXCP_NOCP: 8786 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 8787 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK; 8788 break; 8789 case EXCP_INVSTATE: 8790 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 8791 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK; 8792 break; 8793 case EXCP_STKOF: 8794 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure); 8795 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK; 8796 break; 8797 case EXCP_SWI: 8798 /* The PC already points to the next instruction. */ 8799 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure); 8800 break; 8801 case EXCP_PREFETCH_ABORT: 8802 case EXCP_DATA_ABORT: 8803 /* Note that for M profile we don't have a guest facing FSR, but 8804 * the env->exception.fsr will be populated by the code that 8805 * raises the fault, in the A profile short-descriptor format. 8806 */ 8807 switch (env->exception.fsr & 0xf) { 8808 case M_FAKE_FSR_NSC_EXEC: 8809 /* Exception generated when we try to execute code at an address 8810 * which is marked as Secure & Non-Secure Callable and the CPU 8811 * is in the Non-Secure state. The only instruction which can 8812 * be executed like this is SG (and that only if both halves of 8813 * the SG instruction have the same security attributes.) 8814 * Everything else must generate an INVEP SecureFault, so we 8815 * emulate the SG instruction here. 8816 */ 8817 if (v7m_handle_execute_nsc(cpu)) { 8818 return; 8819 } 8820 break; 8821 case M_FAKE_FSR_SFAULT: 8822 /* Various flavours of SecureFault for attempts to execute or 8823 * access data in the wrong security state. 8824 */ 8825 switch (cs->exception_index) { 8826 case EXCP_PREFETCH_ABORT: 8827 if (env->v7m.secure) { 8828 env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK; 8829 qemu_log_mask(CPU_LOG_INT, 8830 "...really SecureFault with SFSR.INVTRAN\n"); 8831 } else { 8832 env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK; 8833 qemu_log_mask(CPU_LOG_INT, 8834 "...really SecureFault with SFSR.INVEP\n"); 8835 } 8836 break; 8837 case EXCP_DATA_ABORT: 8838 /* This must be an NS access to S memory */ 8839 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK; 8840 qemu_log_mask(CPU_LOG_INT, 8841 "...really SecureFault with SFSR.AUVIOL\n"); 8842 break; 8843 } 8844 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); 8845 break; 8846 case 0x8: /* External Abort */ 8847 switch (cs->exception_index) { 8848 case EXCP_PREFETCH_ABORT: 8849 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK; 8850 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n"); 8851 break; 8852 case EXCP_DATA_ABORT: 8853 env->v7m.cfsr[M_REG_NS] |= 8854 (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK); 8855 env->v7m.bfar = env->exception.vaddress; 8856 qemu_log_mask(CPU_LOG_INT, 8857 "...with CFSR.PRECISERR and BFAR 0x%x\n", 8858 env->v7m.bfar); 8859 break; 8860 } 8861 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false); 8862 break; 8863 default: 8864 /* All other FSR values are either MPU faults or "can't happen 8865 * for M profile" cases. 8866 */ 8867 switch (cs->exception_index) { 8868 case EXCP_PREFETCH_ABORT: 8869 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK; 8870 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n"); 8871 break; 8872 case EXCP_DATA_ABORT: 8873 env->v7m.cfsr[env->v7m.secure] |= 8874 (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK); 8875 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress; 8876 qemu_log_mask(CPU_LOG_INT, 8877 "...with CFSR.DACCVIOL and MMFAR 0x%x\n", 8878 env->v7m.mmfar[env->v7m.secure]); 8879 break; 8880 } 8881 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, 8882 env->v7m.secure); 8883 break; 8884 } 8885 break; 8886 case EXCP_BKPT: 8887 if (semihosting_enabled()) { 8888 int nr; 8889 nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff; 8890 if (nr == 0xab) { 8891 env->regs[15] += 2; 8892 qemu_log_mask(CPU_LOG_INT, 8893 "...handling as semihosting call 0x%x\n", 8894 env->regs[0]); 8895 env->regs[0] = do_arm_semihosting(env); 8896 return; 8897 } 8898 } 8899 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false); 8900 break; 8901 case EXCP_IRQ: 8902 break; 8903 case EXCP_EXCEPTION_EXIT: 8904 if (env->regs[15] < EXC_RETURN_MIN_MAGIC) { 8905 /* Must be v8M security extension function return */ 8906 assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC); 8907 assert(arm_feature(env, ARM_FEATURE_M_SECURITY)); 8908 if (do_v7m_function_return(cpu)) { 8909 return; 8910 } 8911 } else { 8912 do_v7m_exception_exit(cpu); 8913 return; 8914 } 8915 break; 8916 default: 8917 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 8918 return; /* Never happens. Keep compiler happy. */ 8919 } 8920 8921 if (arm_feature(env, ARM_FEATURE_V8)) { 8922 lr = R_V7M_EXCRET_RES1_MASK | 8923 R_V7M_EXCRET_DCRS_MASK | 8924 R_V7M_EXCRET_FTYPE_MASK; 8925 /* The S bit indicates whether we should return to Secure 8926 * or NonSecure (ie our current state). 8927 * The ES bit indicates whether we're taking this exception 8928 * to Secure or NonSecure (ie our target state). We set it 8929 * later, in v7m_exception_taken(). 8930 * The SPSEL bit is also set in v7m_exception_taken() for v8M. 8931 * This corresponds to the ARM ARM pseudocode for v8M setting 8932 * some LR bits in PushStack() and some in ExceptionTaken(); 8933 * the distinction matters for the tailchain cases where we 8934 * can take an exception without pushing the stack. 8935 */ 8936 if (env->v7m.secure) { 8937 lr |= R_V7M_EXCRET_S_MASK; 8938 } 8939 } else { 8940 lr = R_V7M_EXCRET_RES1_MASK | 8941 R_V7M_EXCRET_S_MASK | 8942 R_V7M_EXCRET_DCRS_MASK | 8943 R_V7M_EXCRET_FTYPE_MASK | 8944 R_V7M_EXCRET_ES_MASK; 8945 if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) { 8946 lr |= R_V7M_EXCRET_SPSEL_MASK; 8947 } 8948 } 8949 if (!arm_v7m_is_handler_mode(env)) { 8950 lr |= R_V7M_EXCRET_MODE_MASK; 8951 } 8952 8953 ignore_stackfaults = v7m_push_stack(cpu); 8954 v7m_exception_taken(cpu, lr, false, ignore_stackfaults); 8955 } 8956 8957 /* Function used to synchronize QEMU's AArch64 register set with AArch32 8958 * register set. This is necessary when switching between AArch32 and AArch64 8959 * execution state. 8960 */ 8961 void aarch64_sync_32_to_64(CPUARMState *env) 8962 { 8963 int i; 8964 uint32_t mode = env->uncached_cpsr & CPSR_M; 8965 8966 /* We can blanket copy R[0:7] to X[0:7] */ 8967 for (i = 0; i < 8; i++) { 8968 env->xregs[i] = env->regs[i]; 8969 } 8970 8971 /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. 8972 * Otherwise, they come from the banked user regs. 8973 */ 8974 if (mode == ARM_CPU_MODE_FIQ) { 8975 for (i = 8; i < 13; i++) { 8976 env->xregs[i] = env->usr_regs[i - 8]; 8977 } 8978 } else { 8979 for (i = 8; i < 13; i++) { 8980 env->xregs[i] = env->regs[i]; 8981 } 8982 } 8983 8984 /* Registers x13-x23 are the various mode SP and FP registers. Registers 8985 * r13 and r14 are only copied if we are in that mode, otherwise we copy 8986 * from the mode banked register. 8987 */ 8988 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 8989 env->xregs[13] = env->regs[13]; 8990 env->xregs[14] = env->regs[14]; 8991 } else { 8992 env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; 8993 /* HYP is an exception in that it is copied from r14 */ 8994 if (mode == ARM_CPU_MODE_HYP) { 8995 env->xregs[14] = env->regs[14]; 8996 } else { 8997 env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; 8998 } 8999 } 9000 9001 if (mode == ARM_CPU_MODE_HYP) { 9002 env->xregs[15] = env->regs[13]; 9003 } else { 9004 env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; 9005 } 9006 9007 if (mode == ARM_CPU_MODE_IRQ) { 9008 env->xregs[16] = env->regs[14]; 9009 env->xregs[17] = env->regs[13]; 9010 } else { 9011 env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; 9012 env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; 9013 } 9014 9015 if (mode == ARM_CPU_MODE_SVC) { 9016 env->xregs[18] = env->regs[14]; 9017 env->xregs[19] = env->regs[13]; 9018 } else { 9019 env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; 9020 env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; 9021 } 9022 9023 if (mode == ARM_CPU_MODE_ABT) { 9024 env->xregs[20] = env->regs[14]; 9025 env->xregs[21] = env->regs[13]; 9026 } else { 9027 env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; 9028 env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; 9029 } 9030 9031 if (mode == ARM_CPU_MODE_UND) { 9032 env->xregs[22] = env->regs[14]; 9033 env->xregs[23] = env->regs[13]; 9034 } else { 9035 env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; 9036 env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; 9037 } 9038 9039 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9040 * mode, then we can copy from r8-r14. Otherwise, we copy from the 9041 * FIQ bank for r8-r14. 9042 */ 9043 if (mode == ARM_CPU_MODE_FIQ) { 9044 for (i = 24; i < 31; i++) { 9045 env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ 9046 } 9047 } else { 9048 for (i = 24; i < 29; i++) { 9049 env->xregs[i] = env->fiq_regs[i - 24]; 9050 } 9051 env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; 9052 env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; 9053 } 9054 9055 env->pc = env->regs[15]; 9056 } 9057 9058 /* Function used to synchronize QEMU's AArch32 register set with AArch64 9059 * register set. This is necessary when switching between AArch32 and AArch64 9060 * execution state. 9061 */ 9062 void aarch64_sync_64_to_32(CPUARMState *env) 9063 { 9064 int i; 9065 uint32_t mode = env->uncached_cpsr & CPSR_M; 9066 9067 /* We can blanket copy X[0:7] to R[0:7] */ 9068 for (i = 0; i < 8; i++) { 9069 env->regs[i] = env->xregs[i]; 9070 } 9071 9072 /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. 9073 * Otherwise, we copy x8-x12 into the banked user regs. 9074 */ 9075 if (mode == ARM_CPU_MODE_FIQ) { 9076 for (i = 8; i < 13; i++) { 9077 env->usr_regs[i - 8] = env->xregs[i]; 9078 } 9079 } else { 9080 for (i = 8; i < 13; i++) { 9081 env->regs[i] = env->xregs[i]; 9082 } 9083 } 9084 9085 /* Registers r13 & r14 depend on the current mode. 9086 * If we are in a given mode, we copy the corresponding x registers to r13 9087 * and r14. Otherwise, we copy the x register to the banked r13 and r14 9088 * for the mode. 9089 */ 9090 if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { 9091 env->regs[13] = env->xregs[13]; 9092 env->regs[14] = env->xregs[14]; 9093 } else { 9094 env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; 9095 9096 /* HYP is an exception in that it does not have its own banked r14 but 9097 * shares the USR r14 9098 */ 9099 if (mode == ARM_CPU_MODE_HYP) { 9100 env->regs[14] = env->xregs[14]; 9101 } else { 9102 env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; 9103 } 9104 } 9105 9106 if (mode == ARM_CPU_MODE_HYP) { 9107 env->regs[13] = env->xregs[15]; 9108 } else { 9109 env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; 9110 } 9111 9112 if (mode == ARM_CPU_MODE_IRQ) { 9113 env->regs[14] = env->xregs[16]; 9114 env->regs[13] = env->xregs[17]; 9115 } else { 9116 env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; 9117 env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; 9118 } 9119 9120 if (mode == ARM_CPU_MODE_SVC) { 9121 env->regs[14] = env->xregs[18]; 9122 env->regs[13] = env->xregs[19]; 9123 } else { 9124 env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; 9125 env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; 9126 } 9127 9128 if (mode == ARM_CPU_MODE_ABT) { 9129 env->regs[14] = env->xregs[20]; 9130 env->regs[13] = env->xregs[21]; 9131 } else { 9132 env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; 9133 env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; 9134 } 9135 9136 if (mode == ARM_CPU_MODE_UND) { 9137 env->regs[14] = env->xregs[22]; 9138 env->regs[13] = env->xregs[23]; 9139 } else { 9140 env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; 9141 env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; 9142 } 9143 9144 /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ 9145 * mode, then we can copy to r8-r14. Otherwise, we copy to the 9146 * FIQ bank for r8-r14. 9147 */ 9148 if (mode == ARM_CPU_MODE_FIQ) { 9149 for (i = 24; i < 31; i++) { 9150 env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ 9151 } 9152 } else { 9153 for (i = 24; i < 29; i++) { 9154 env->fiq_regs[i - 24] = env->xregs[i]; 9155 } 9156 env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; 9157 env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; 9158 } 9159 9160 env->regs[15] = env->pc; 9161 } 9162 9163 static void take_aarch32_exception(CPUARMState *env, int new_mode, 9164 uint32_t mask, uint32_t offset, 9165 uint32_t newpc) 9166 { 9167 /* Change the CPU state so as to actually take the exception. */ 9168 switch_mode(env, new_mode); 9169 /* 9170 * For exceptions taken to AArch32 we must clear the SS bit in both 9171 * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. 9172 */ 9173 env->uncached_cpsr &= ~PSTATE_SS; 9174 env->spsr = cpsr_read(env); 9175 /* Clear IT bits. */ 9176 env->condexec_bits = 0; 9177 /* Switch to the new mode, and to the correct instruction set. */ 9178 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; 9179 /* Set new mode endianness */ 9180 env->uncached_cpsr &= ~CPSR_E; 9181 if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) { 9182 env->uncached_cpsr |= CPSR_E; 9183 } 9184 /* J and IL must always be cleared for exception entry */ 9185 env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); 9186 env->daif |= mask; 9187 9188 if (new_mode == ARM_CPU_MODE_HYP) { 9189 env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; 9190 env->elr_el[2] = env->regs[15]; 9191 } else { 9192 /* 9193 * this is a lie, as there was no c1_sys on V4T/V5, but who cares 9194 * and we should just guard the thumb mode on V4 9195 */ 9196 if (arm_feature(env, ARM_FEATURE_V4T)) { 9197 env->thumb = 9198 (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; 9199 } 9200 env->regs[14] = env->regs[15] + offset; 9201 } 9202 env->regs[15] = newpc; 9203 } 9204 9205 static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) 9206 { 9207 /* 9208 * Handle exception entry to Hyp mode; this is sufficiently 9209 * different to entry to other AArch32 modes that we handle it 9210 * separately here. 9211 * 9212 * The vector table entry used is always the 0x14 Hyp mode entry point, 9213 * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp. 9214 * The offset applied to the preferred return address is always zero 9215 * (see DDI0487C.a section G1.12.3). 9216 * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. 9217 */ 9218 uint32_t addr, mask; 9219 ARMCPU *cpu = ARM_CPU(cs); 9220 CPUARMState *env = &cpu->env; 9221 9222 switch (cs->exception_index) { 9223 case EXCP_UDEF: 9224 addr = 0x04; 9225 break; 9226 case EXCP_SWI: 9227 addr = 0x14; 9228 break; 9229 case EXCP_BKPT: 9230 /* Fall through to prefetch abort. */ 9231 case EXCP_PREFETCH_ABORT: 9232 env->cp15.ifar_s = env->exception.vaddress; 9233 qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", 9234 (uint32_t)env->exception.vaddress); 9235 addr = 0x0c; 9236 break; 9237 case EXCP_DATA_ABORT: 9238 env->cp15.dfar_s = env->exception.vaddress; 9239 qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", 9240 (uint32_t)env->exception.vaddress); 9241 addr = 0x10; 9242 break; 9243 case EXCP_IRQ: 9244 addr = 0x18; 9245 break; 9246 case EXCP_FIQ: 9247 addr = 0x1c; 9248 break; 9249 case EXCP_HVC: 9250 addr = 0x08; 9251 break; 9252 case EXCP_HYP_TRAP: 9253 addr = 0x14; 9254 default: 9255 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9256 } 9257 9258 if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { 9259 if (!arm_feature(env, ARM_FEATURE_V8)) { 9260 /* 9261 * QEMU syndrome values are v8-style. v7 has the IL bit 9262 * UNK/SBZP for "field not valid" cases, where v8 uses RES1. 9263 * If this is a v7 CPU, squash the IL bit in those cases. 9264 */ 9265 if (cs->exception_index == EXCP_PREFETCH_ABORT || 9266 (cs->exception_index == EXCP_DATA_ABORT && 9267 !(env->exception.syndrome & ARM_EL_ISV)) || 9268 syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { 9269 env->exception.syndrome &= ~ARM_EL_IL; 9270 } 9271 } 9272 env->cp15.esr_el[2] = env->exception.syndrome; 9273 } 9274 9275 if (arm_current_el(env) != 2 && addr < 0x14) { 9276 addr = 0x14; 9277 } 9278 9279 mask = 0; 9280 if (!(env->cp15.scr_el3 & SCR_EA)) { 9281 mask |= CPSR_A; 9282 } 9283 if (!(env->cp15.scr_el3 & SCR_IRQ)) { 9284 mask |= CPSR_I; 9285 } 9286 if (!(env->cp15.scr_el3 & SCR_FIQ)) { 9287 mask |= CPSR_F; 9288 } 9289 9290 addr += env->cp15.hvbar; 9291 9292 take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); 9293 } 9294 9295 static void arm_cpu_do_interrupt_aarch32(CPUState *cs) 9296 { 9297 ARMCPU *cpu = ARM_CPU(cs); 9298 CPUARMState *env = &cpu->env; 9299 uint32_t addr; 9300 uint32_t mask; 9301 int new_mode; 9302 uint32_t offset; 9303 uint32_t moe; 9304 9305 /* If this is a debug exception we must update the DBGDSCR.MOE bits */ 9306 switch (syn_get_ec(env->exception.syndrome)) { 9307 case EC_BREAKPOINT: 9308 case EC_BREAKPOINT_SAME_EL: 9309 moe = 1; 9310 break; 9311 case EC_WATCHPOINT: 9312 case EC_WATCHPOINT_SAME_EL: 9313 moe = 10; 9314 break; 9315 case EC_AA32_BKPT: 9316 moe = 3; 9317 break; 9318 case EC_VECTORCATCH: 9319 moe = 5; 9320 break; 9321 default: 9322 moe = 0; 9323 break; 9324 } 9325 9326 if (moe) { 9327 env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); 9328 } 9329 9330 if (env->exception.target_el == 2) { 9331 arm_cpu_do_interrupt_aarch32_hyp(cs); 9332 return; 9333 } 9334 9335 switch (cs->exception_index) { 9336 case EXCP_UDEF: 9337 new_mode = ARM_CPU_MODE_UND; 9338 addr = 0x04; 9339 mask = CPSR_I; 9340 if (env->thumb) 9341 offset = 2; 9342 else 9343 offset = 4; 9344 break; 9345 case EXCP_SWI: 9346 new_mode = ARM_CPU_MODE_SVC; 9347 addr = 0x08; 9348 mask = CPSR_I; 9349 /* The PC already points to the next instruction. */ 9350 offset = 0; 9351 break; 9352 case EXCP_BKPT: 9353 /* Fall through to prefetch abort. */ 9354 case EXCP_PREFETCH_ABORT: 9355 A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); 9356 A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); 9357 qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", 9358 env->exception.fsr, (uint32_t)env->exception.vaddress); 9359 new_mode = ARM_CPU_MODE_ABT; 9360 addr = 0x0c; 9361 mask = CPSR_A | CPSR_I; 9362 offset = 4; 9363 break; 9364 case EXCP_DATA_ABORT: 9365 A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); 9366 A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); 9367 qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", 9368 env->exception.fsr, 9369 (uint32_t)env->exception.vaddress); 9370 new_mode = ARM_CPU_MODE_ABT; 9371 addr = 0x10; 9372 mask = CPSR_A | CPSR_I; 9373 offset = 8; 9374 break; 9375 case EXCP_IRQ: 9376 new_mode = ARM_CPU_MODE_IRQ; 9377 addr = 0x18; 9378 /* Disable IRQ and imprecise data aborts. */ 9379 mask = CPSR_A | CPSR_I; 9380 offset = 4; 9381 if (env->cp15.scr_el3 & SCR_IRQ) { 9382 /* IRQ routed to monitor mode */ 9383 new_mode = ARM_CPU_MODE_MON; 9384 mask |= CPSR_F; 9385 } 9386 break; 9387 case EXCP_FIQ: 9388 new_mode = ARM_CPU_MODE_FIQ; 9389 addr = 0x1c; 9390 /* Disable FIQ, IRQ and imprecise data aborts. */ 9391 mask = CPSR_A | CPSR_I | CPSR_F; 9392 if (env->cp15.scr_el3 & SCR_FIQ) { 9393 /* FIQ routed to monitor mode */ 9394 new_mode = ARM_CPU_MODE_MON; 9395 } 9396 offset = 4; 9397 break; 9398 case EXCP_VIRQ: 9399 new_mode = ARM_CPU_MODE_IRQ; 9400 addr = 0x18; 9401 /* Disable IRQ and imprecise data aborts. */ 9402 mask = CPSR_A | CPSR_I; 9403 offset = 4; 9404 break; 9405 case EXCP_VFIQ: 9406 new_mode = ARM_CPU_MODE_FIQ; 9407 addr = 0x1c; 9408 /* Disable FIQ, IRQ and imprecise data aborts. */ 9409 mask = CPSR_A | CPSR_I | CPSR_F; 9410 offset = 4; 9411 break; 9412 case EXCP_SMC: 9413 new_mode = ARM_CPU_MODE_MON; 9414 addr = 0x08; 9415 mask = CPSR_A | CPSR_I | CPSR_F; 9416 offset = 0; 9417 break; 9418 default: 9419 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9420 return; /* Never happens. Keep compiler happy. */ 9421 } 9422 9423 if (new_mode == ARM_CPU_MODE_MON) { 9424 addr += env->cp15.mvbar; 9425 } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { 9426 /* High vectors. When enabled, base address cannot be remapped. */ 9427 addr += 0xffff0000; 9428 } else { 9429 /* ARM v7 architectures provide a vector base address register to remap 9430 * the interrupt vector table. 9431 * This register is only followed in non-monitor mode, and is banked. 9432 * Note: only bits 31:5 are valid. 9433 */ 9434 addr += A32_BANKED_CURRENT_REG_GET(env, vbar); 9435 } 9436 9437 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { 9438 env->cp15.scr_el3 &= ~SCR_NS; 9439 } 9440 9441 take_aarch32_exception(env, new_mode, mask, offset, addr); 9442 } 9443 9444 /* Handle exception entry to a target EL which is using AArch64 */ 9445 static void arm_cpu_do_interrupt_aarch64(CPUState *cs) 9446 { 9447 ARMCPU *cpu = ARM_CPU(cs); 9448 CPUARMState *env = &cpu->env; 9449 unsigned int new_el = env->exception.target_el; 9450 target_ulong addr = env->cp15.vbar_el[new_el]; 9451 unsigned int new_mode = aarch64_pstate_mode(new_el, true); 9452 unsigned int cur_el = arm_current_el(env); 9453 9454 /* 9455 * Note that new_el can never be 0. If cur_el is 0, then 9456 * el0_a64 is is_a64(), else el0_a64 is ignored. 9457 */ 9458 aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); 9459 9460 if (cur_el < new_el) { 9461 /* Entry vector offset depends on whether the implemented EL 9462 * immediately lower than the target level is using AArch32 or AArch64 9463 */ 9464 bool is_aa64; 9465 9466 switch (new_el) { 9467 case 3: 9468 is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; 9469 break; 9470 case 2: 9471 is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0; 9472 break; 9473 case 1: 9474 is_aa64 = is_a64(env); 9475 break; 9476 default: 9477 g_assert_not_reached(); 9478 } 9479 9480 if (is_aa64) { 9481 addr += 0x400; 9482 } else { 9483 addr += 0x600; 9484 } 9485 } else if (pstate_read(env) & PSTATE_SP) { 9486 addr += 0x200; 9487 } 9488 9489 switch (cs->exception_index) { 9490 case EXCP_PREFETCH_ABORT: 9491 case EXCP_DATA_ABORT: 9492 env->cp15.far_el[new_el] = env->exception.vaddress; 9493 qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", 9494 env->cp15.far_el[new_el]); 9495 /* fall through */ 9496 case EXCP_BKPT: 9497 case EXCP_UDEF: 9498 case EXCP_SWI: 9499 case EXCP_HVC: 9500 case EXCP_HYP_TRAP: 9501 case EXCP_SMC: 9502 if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) { 9503 /* 9504 * QEMU internal FP/SIMD syndromes from AArch32 include the 9505 * TA and coproc fields which are only exposed if the exception 9506 * is taken to AArch32 Hyp mode. Mask them out to get a valid 9507 * AArch64 format syndrome. 9508 */ 9509 env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); 9510 } 9511 env->cp15.esr_el[new_el] = env->exception.syndrome; 9512 break; 9513 case EXCP_IRQ: 9514 case EXCP_VIRQ: 9515 addr += 0x80; 9516 break; 9517 case EXCP_FIQ: 9518 case EXCP_VFIQ: 9519 addr += 0x100; 9520 break; 9521 case EXCP_SEMIHOST: 9522 qemu_log_mask(CPU_LOG_INT, 9523 "...handling as semihosting call 0x%" PRIx64 "\n", 9524 env->xregs[0]); 9525 env->xregs[0] = do_arm_semihosting(env); 9526 return; 9527 default: 9528 cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); 9529 } 9530 9531 if (is_a64(env)) { 9532 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); 9533 aarch64_save_sp(env, arm_current_el(env)); 9534 env->elr_el[new_el] = env->pc; 9535 } else { 9536 env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); 9537 env->elr_el[new_el] = env->regs[15]; 9538 9539 aarch64_sync_32_to_64(env); 9540 9541 env->condexec_bits = 0; 9542 } 9543 qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", 9544 env->elr_el[new_el]); 9545 9546 pstate_write(env, PSTATE_DAIF | new_mode); 9547 env->aarch64 = 1; 9548 aarch64_restore_sp(env, new_el); 9549 9550 env->pc = addr; 9551 9552 qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", 9553 new_el, env->pc, pstate_read(env)); 9554 } 9555 9556 static inline bool check_for_semihosting(CPUState *cs) 9557 { 9558 /* Check whether this exception is a semihosting call; if so 9559 * then handle it and return true; otherwise return false. 9560 */ 9561 ARMCPU *cpu = ARM_CPU(cs); 9562 CPUARMState *env = &cpu->env; 9563 9564 if (is_a64(env)) { 9565 if (cs->exception_index == EXCP_SEMIHOST) { 9566 /* This is always the 64-bit semihosting exception. 9567 * The "is this usermode" and "is semihosting enabled" 9568 * checks have been done at translate time. 9569 */ 9570 qemu_log_mask(CPU_LOG_INT, 9571 "...handling as semihosting call 0x%" PRIx64 "\n", 9572 env->xregs[0]); 9573 env->xregs[0] = do_arm_semihosting(env); 9574 return true; 9575 } 9576 return false; 9577 } else { 9578 uint32_t imm; 9579 9580 /* Only intercept calls from privileged modes, to provide some 9581 * semblance of security. 9582 */ 9583 if (cs->exception_index != EXCP_SEMIHOST && 9584 (!semihosting_enabled() || 9585 ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) { 9586 return false; 9587 } 9588 9589 switch (cs->exception_index) { 9590 case EXCP_SEMIHOST: 9591 /* This is always a semihosting call; the "is this usermode" 9592 * and "is semihosting enabled" checks have been done at 9593 * translate time. 9594 */ 9595 break; 9596 case EXCP_SWI: 9597 /* Check for semihosting interrupt. */ 9598 if (env->thumb) { 9599 imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env)) 9600 & 0xff; 9601 if (imm == 0xab) { 9602 break; 9603 } 9604 } else { 9605 imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env)) 9606 & 0xffffff; 9607 if (imm == 0x123456) { 9608 break; 9609 } 9610 } 9611 return false; 9612 case EXCP_BKPT: 9613 /* See if this is a semihosting syscall. */ 9614 if (env->thumb) { 9615 imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) 9616 & 0xff; 9617 if (imm == 0xab) { 9618 env->regs[15] += 2; 9619 break; 9620 } 9621 } 9622 return false; 9623 default: 9624 return false; 9625 } 9626 9627 qemu_log_mask(CPU_LOG_INT, 9628 "...handling as semihosting call 0x%x\n", 9629 env->regs[0]); 9630 env->regs[0] = do_arm_semihosting(env); 9631 return true; 9632 } 9633 } 9634 9635 /* Handle a CPU exception for A and R profile CPUs. 9636 * Do any appropriate logging, handle PSCI calls, and then hand off 9637 * to the AArch64-entry or AArch32-entry function depending on the 9638 * target exception level's register width. 9639 */ 9640 void arm_cpu_do_interrupt(CPUState *cs) 9641 { 9642 ARMCPU *cpu = ARM_CPU(cs); 9643 CPUARMState *env = &cpu->env; 9644 unsigned int new_el = env->exception.target_el; 9645 9646 assert(!arm_feature(env, ARM_FEATURE_M)); 9647 9648 arm_log_exception(cs->exception_index); 9649 qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), 9650 new_el); 9651 if (qemu_loglevel_mask(CPU_LOG_INT) 9652 && !excp_is_internal(cs->exception_index)) { 9653 qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", 9654 syn_get_ec(env->exception.syndrome), 9655 env->exception.syndrome); 9656 } 9657 9658 if (arm_is_psci_call(cpu, cs->exception_index)) { 9659 arm_handle_psci_call(cpu); 9660 qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); 9661 return; 9662 } 9663 9664 /* Semihosting semantics depend on the register width of the 9665 * code that caused the exception, not the target exception level, 9666 * so must be handled here. 9667 */ 9668 if (check_for_semihosting(cs)) { 9669 return; 9670 } 9671 9672 /* Hooks may change global state so BQL should be held, also the 9673 * BQL needs to be held for any modification of 9674 * cs->interrupt_request. 9675 */ 9676 g_assert(qemu_mutex_iothread_locked()); 9677 9678 arm_call_pre_el_change_hook(cpu); 9679 9680 assert(!excp_is_internal(cs->exception_index)); 9681 if (arm_el_is_aa64(env, new_el)) { 9682 arm_cpu_do_interrupt_aarch64(cs); 9683 } else { 9684 arm_cpu_do_interrupt_aarch32(cs); 9685 } 9686 9687 arm_call_el_change_hook(cpu); 9688 9689 if (!kvm_enabled()) { 9690 cs->interrupt_request |= CPU_INTERRUPT_EXITTB; 9691 } 9692 } 9693 #endif /* !CONFIG_USER_ONLY */ 9694 9695 /* Return the exception level which controls this address translation regime */ 9696 static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) 9697 { 9698 switch (mmu_idx) { 9699 case ARMMMUIdx_S2NS: 9700 case ARMMMUIdx_S1E2: 9701 return 2; 9702 case ARMMMUIdx_S1E3: 9703 return 3; 9704 case ARMMMUIdx_S1SE0: 9705 return arm_el_is_aa64(env, 3) ? 1 : 3; 9706 case ARMMMUIdx_S1SE1: 9707 case ARMMMUIdx_S1NSE0: 9708 case ARMMMUIdx_S1NSE1: 9709 case ARMMMUIdx_MPrivNegPri: 9710 case ARMMMUIdx_MUserNegPri: 9711 case ARMMMUIdx_MPriv: 9712 case ARMMMUIdx_MUser: 9713 case ARMMMUIdx_MSPrivNegPri: 9714 case ARMMMUIdx_MSUserNegPri: 9715 case ARMMMUIdx_MSPriv: 9716 case ARMMMUIdx_MSUser: 9717 return 1; 9718 default: 9719 g_assert_not_reached(); 9720 } 9721 } 9722 9723 #ifndef CONFIG_USER_ONLY 9724 9725 /* Return the SCTLR value which controls this address translation regime */ 9726 static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) 9727 { 9728 return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; 9729 } 9730 9731 /* Return true if the specified stage of address translation is disabled */ 9732 static inline bool regime_translation_disabled(CPUARMState *env, 9733 ARMMMUIdx mmu_idx) 9734 { 9735 if (arm_feature(env, ARM_FEATURE_M)) { 9736 switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] & 9737 (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) { 9738 case R_V7M_MPU_CTRL_ENABLE_MASK: 9739 /* Enabled, but not for HardFault and NMI */ 9740 return mmu_idx & ARM_MMU_IDX_M_NEGPRI; 9741 case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK: 9742 /* Enabled for all cases */ 9743 return false; 9744 case 0: 9745 default: 9746 /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but 9747 * we warned about that in armv7m_nvic.c when the guest set it. 9748 */ 9749 return true; 9750 } 9751 } 9752 9753 if (mmu_idx == ARMMMUIdx_S2NS) { 9754 /* HCR.DC means HCR.VM behaves as 1 */ 9755 return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0; 9756 } 9757 9758 if (env->cp15.hcr_el2 & HCR_TGE) { 9759 /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */ 9760 if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) { 9761 return true; 9762 } 9763 } 9764 9765 if ((env->cp15.hcr_el2 & HCR_DC) && 9766 (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) { 9767 /* HCR.DC means SCTLR_EL1.M behaves as 0 */ 9768 return true; 9769 } 9770 9771 return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0; 9772 } 9773 9774 static inline bool regime_translation_big_endian(CPUARMState *env, 9775 ARMMMUIdx mmu_idx) 9776 { 9777 return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; 9778 } 9779 9780 /* Return the TTBR associated with this translation regime */ 9781 static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, 9782 int ttbrn) 9783 { 9784 if (mmu_idx == ARMMMUIdx_S2NS) { 9785 return env->cp15.vttbr_el2; 9786 } 9787 if (ttbrn == 0) { 9788 return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; 9789 } else { 9790 return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; 9791 } 9792 } 9793 9794 #endif /* !CONFIG_USER_ONLY */ 9795 9796 /* Return the TCR controlling this translation regime */ 9797 static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) 9798 { 9799 if (mmu_idx == ARMMMUIdx_S2NS) { 9800 return &env->cp15.vtcr_el2; 9801 } 9802 return &env->cp15.tcr_el[regime_el(env, mmu_idx)]; 9803 } 9804 9805 /* Convert a possible stage1+2 MMU index into the appropriate 9806 * stage 1 MMU index 9807 */ 9808 static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) 9809 { 9810 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 9811 mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0); 9812 } 9813 return mmu_idx; 9814 } 9815 9816 /* Return true if the translation regime is using LPAE format page tables */ 9817 static inline bool regime_using_lpae_format(CPUARMState *env, 9818 ARMMMUIdx mmu_idx) 9819 { 9820 int el = regime_el(env, mmu_idx); 9821 if (el == 2 || arm_el_is_aa64(env, el)) { 9822 return true; 9823 } 9824 if (arm_feature(env, ARM_FEATURE_LPAE) 9825 && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) { 9826 return true; 9827 } 9828 return false; 9829 } 9830 9831 /* Returns true if the stage 1 translation regime is using LPAE format page 9832 * tables. Used when raising alignment exceptions, whose FSR changes depending 9833 * on whether the long or short descriptor format is in use. */ 9834 bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) 9835 { 9836 mmu_idx = stage_1_mmu_idx(mmu_idx); 9837 9838 return regime_using_lpae_format(env, mmu_idx); 9839 } 9840 9841 #ifndef CONFIG_USER_ONLY 9842 static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) 9843 { 9844 switch (mmu_idx) { 9845 case ARMMMUIdx_S1SE0: 9846 case ARMMMUIdx_S1NSE0: 9847 case ARMMMUIdx_MUser: 9848 case ARMMMUIdx_MSUser: 9849 case ARMMMUIdx_MUserNegPri: 9850 case ARMMMUIdx_MSUserNegPri: 9851 return true; 9852 default: 9853 return false; 9854 case ARMMMUIdx_S12NSE0: 9855 case ARMMMUIdx_S12NSE1: 9856 g_assert_not_reached(); 9857 } 9858 } 9859 9860 /* Translate section/page access permissions to page 9861 * R/W protection flags 9862 * 9863 * @env: CPUARMState 9864 * @mmu_idx: MMU index indicating required translation regime 9865 * @ap: The 3-bit access permissions (AP[2:0]) 9866 * @domain_prot: The 2-bit domain access permissions 9867 */ 9868 static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, 9869 int ap, int domain_prot) 9870 { 9871 bool is_user = regime_is_user(env, mmu_idx); 9872 9873 if (domain_prot == 3) { 9874 return PAGE_READ | PAGE_WRITE; 9875 } 9876 9877 switch (ap) { 9878 case 0: 9879 if (arm_feature(env, ARM_FEATURE_V7)) { 9880 return 0; 9881 } 9882 switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) { 9883 case SCTLR_S: 9884 return is_user ? 0 : PAGE_READ; 9885 case SCTLR_R: 9886 return PAGE_READ; 9887 default: 9888 return 0; 9889 } 9890 case 1: 9891 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9892 case 2: 9893 if (is_user) { 9894 return PAGE_READ; 9895 } else { 9896 return PAGE_READ | PAGE_WRITE; 9897 } 9898 case 3: 9899 return PAGE_READ | PAGE_WRITE; 9900 case 4: /* Reserved. */ 9901 return 0; 9902 case 5: 9903 return is_user ? 0 : PAGE_READ; 9904 case 6: 9905 return PAGE_READ; 9906 case 7: 9907 if (!arm_feature(env, ARM_FEATURE_V6K)) { 9908 return 0; 9909 } 9910 return PAGE_READ; 9911 default: 9912 g_assert_not_reached(); 9913 } 9914 } 9915 9916 /* Translate section/page access permissions to page 9917 * R/W protection flags. 9918 * 9919 * @ap: The 2-bit simple AP (AP[2:1]) 9920 * @is_user: TRUE if accessing from PL0 9921 */ 9922 static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user) 9923 { 9924 switch (ap) { 9925 case 0: 9926 return is_user ? 0 : PAGE_READ | PAGE_WRITE; 9927 case 1: 9928 return PAGE_READ | PAGE_WRITE; 9929 case 2: 9930 return is_user ? 0 : PAGE_READ; 9931 case 3: 9932 return PAGE_READ; 9933 default: 9934 g_assert_not_reached(); 9935 } 9936 } 9937 9938 static inline int 9939 simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) 9940 { 9941 return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); 9942 } 9943 9944 /* Translate S2 section/page access permissions to protection flags 9945 * 9946 * @env: CPUARMState 9947 * @s2ap: The 2-bit stage2 access permissions (S2AP) 9948 * @xn: XN (execute-never) bit 9949 */ 9950 static int get_S2prot(CPUARMState *env, int s2ap, int xn) 9951 { 9952 int prot = 0; 9953 9954 if (s2ap & 1) { 9955 prot |= PAGE_READ; 9956 } 9957 if (s2ap & 2) { 9958 prot |= PAGE_WRITE; 9959 } 9960 if (!xn) { 9961 if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) { 9962 prot |= PAGE_EXEC; 9963 } 9964 } 9965 return prot; 9966 } 9967 9968 /* Translate section/page access permissions to protection flags 9969 * 9970 * @env: CPUARMState 9971 * @mmu_idx: MMU index indicating required translation regime 9972 * @is_aa64: TRUE if AArch64 9973 * @ap: The 2-bit simple AP (AP[2:1]) 9974 * @ns: NS (non-secure) bit 9975 * @xn: XN (execute-never) bit 9976 * @pxn: PXN (privileged execute-never) bit 9977 */ 9978 static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, 9979 int ap, int ns, int xn, int pxn) 9980 { 9981 bool is_user = regime_is_user(env, mmu_idx); 9982 int prot_rw, user_rw; 9983 bool have_wxn; 9984 int wxn = 0; 9985 9986 assert(mmu_idx != ARMMMUIdx_S2NS); 9987 9988 user_rw = simple_ap_to_rw_prot_is_user(ap, true); 9989 if (is_user) { 9990 prot_rw = user_rw; 9991 } else { 9992 prot_rw = simple_ap_to_rw_prot_is_user(ap, false); 9993 } 9994 9995 if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) { 9996 return prot_rw; 9997 } 9998 9999 /* TODO have_wxn should be replaced with 10000 * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2) 10001 * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE 10002 * compatible processors have EL2, which is required for [U]WXN. 10003 */ 10004 have_wxn = arm_feature(env, ARM_FEATURE_LPAE); 10005 10006 if (have_wxn) { 10007 wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN; 10008 } 10009 10010 if (is_aa64) { 10011 switch (regime_el(env, mmu_idx)) { 10012 case 1: 10013 if (!is_user) { 10014 xn = pxn || (user_rw & PAGE_WRITE); 10015 } 10016 break; 10017 case 2: 10018 case 3: 10019 break; 10020 } 10021 } else if (arm_feature(env, ARM_FEATURE_V7)) { 10022 switch (regime_el(env, mmu_idx)) { 10023 case 1: 10024 case 3: 10025 if (is_user) { 10026 xn = xn || !(user_rw & PAGE_READ); 10027 } else { 10028 int uwxn = 0; 10029 if (have_wxn) { 10030 uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN; 10031 } 10032 xn = xn || !(prot_rw & PAGE_READ) || pxn || 10033 (uwxn && (user_rw & PAGE_WRITE)); 10034 } 10035 break; 10036 case 2: 10037 break; 10038 } 10039 } else { 10040 xn = wxn = 0; 10041 } 10042 10043 if (xn || (wxn && (prot_rw & PAGE_WRITE))) { 10044 return prot_rw; 10045 } 10046 return prot_rw | PAGE_EXEC; 10047 } 10048 10049 static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, 10050 uint32_t *table, uint32_t address) 10051 { 10052 /* Note that we can only get here for an AArch32 PL0/PL1 lookup */ 10053 TCR *tcr = regime_tcr(env, mmu_idx); 10054 10055 if (address & tcr->mask) { 10056 if (tcr->raw_tcr & TTBCR_PD1) { 10057 /* Translation table walk disabled for TTBR1 */ 10058 return false; 10059 } 10060 *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000; 10061 } else { 10062 if (tcr->raw_tcr & TTBCR_PD0) { 10063 /* Translation table walk disabled for TTBR0 */ 10064 return false; 10065 } 10066 *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask; 10067 } 10068 *table |= (address >> 18) & 0x3ffc; 10069 return true; 10070 } 10071 10072 /* Translate a S1 pagetable walk through S2 if needed. */ 10073 static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, 10074 hwaddr addr, MemTxAttrs txattrs, 10075 ARMMMUFaultInfo *fi) 10076 { 10077 if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) && 10078 !regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 10079 target_ulong s2size; 10080 hwaddr s2pa; 10081 int s2prot; 10082 int ret; 10083 ARMCacheAttrs cacheattrs = {}; 10084 ARMCacheAttrs *pcacheattrs = NULL; 10085 10086 if (env->cp15.hcr_el2 & HCR_PTW) { 10087 /* 10088 * PTW means we must fault if this S1 walk touches S2 Device 10089 * memory; otherwise we don't care about the attributes and can 10090 * save the S2 translation the effort of computing them. 10091 */ 10092 pcacheattrs = &cacheattrs; 10093 } 10094 10095 ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa, 10096 &txattrs, &s2prot, &s2size, fi, pcacheattrs); 10097 if (ret) { 10098 assert(fi->type != ARMFault_None); 10099 fi->s2addr = addr; 10100 fi->stage2 = true; 10101 fi->s1ptw = true; 10102 return ~0; 10103 } 10104 if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) { 10105 /* Access was to Device memory: generate Permission fault */ 10106 fi->type = ARMFault_Permission; 10107 fi->s2addr = addr; 10108 fi->stage2 = true; 10109 fi->s1ptw = true; 10110 return ~0; 10111 } 10112 addr = s2pa; 10113 } 10114 return addr; 10115 } 10116 10117 /* All loads done in the course of a page table walk go through here. */ 10118 static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10119 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10120 { 10121 ARMCPU *cpu = ARM_CPU(cs); 10122 CPUARMState *env = &cpu->env; 10123 MemTxAttrs attrs = {}; 10124 MemTxResult result = MEMTX_OK; 10125 AddressSpace *as; 10126 uint32_t data; 10127 10128 attrs.secure = is_secure; 10129 as = arm_addressspace(cs, attrs); 10130 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 10131 if (fi->s1ptw) { 10132 return 0; 10133 } 10134 if (regime_translation_big_endian(env, mmu_idx)) { 10135 data = address_space_ldl_be(as, addr, attrs, &result); 10136 } else { 10137 data = address_space_ldl_le(as, addr, attrs, &result); 10138 } 10139 if (result == MEMTX_OK) { 10140 return data; 10141 } 10142 fi->type = ARMFault_SyncExternalOnWalk; 10143 fi->ea = arm_extabort_type(result); 10144 return 0; 10145 } 10146 10147 static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, 10148 ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) 10149 { 10150 ARMCPU *cpu = ARM_CPU(cs); 10151 CPUARMState *env = &cpu->env; 10152 MemTxAttrs attrs = {}; 10153 MemTxResult result = MEMTX_OK; 10154 AddressSpace *as; 10155 uint64_t data; 10156 10157 attrs.secure = is_secure; 10158 as = arm_addressspace(cs, attrs); 10159 addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi); 10160 if (fi->s1ptw) { 10161 return 0; 10162 } 10163 if (regime_translation_big_endian(env, mmu_idx)) { 10164 data = address_space_ldq_be(as, addr, attrs, &result); 10165 } else { 10166 data = address_space_ldq_le(as, addr, attrs, &result); 10167 } 10168 if (result == MEMTX_OK) { 10169 return data; 10170 } 10171 fi->type = ARMFault_SyncExternalOnWalk; 10172 fi->ea = arm_extabort_type(result); 10173 return 0; 10174 } 10175 10176 static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, 10177 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10178 hwaddr *phys_ptr, int *prot, 10179 target_ulong *page_size, 10180 ARMMMUFaultInfo *fi) 10181 { 10182 CPUState *cs = CPU(arm_env_get_cpu(env)); 10183 int level = 1; 10184 uint32_t table; 10185 uint32_t desc; 10186 int type; 10187 int ap; 10188 int domain = 0; 10189 int domain_prot; 10190 hwaddr phys_addr; 10191 uint32_t dacr; 10192 10193 /* Pagetable walk. */ 10194 /* Lookup l1 descriptor. */ 10195 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10196 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10197 fi->type = ARMFault_Translation; 10198 goto do_fault; 10199 } 10200 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10201 mmu_idx, fi); 10202 if (fi->type != ARMFault_None) { 10203 goto do_fault; 10204 } 10205 type = (desc & 3); 10206 domain = (desc >> 5) & 0x0f; 10207 if (regime_el(env, mmu_idx) == 1) { 10208 dacr = env->cp15.dacr_ns; 10209 } else { 10210 dacr = env->cp15.dacr_s; 10211 } 10212 domain_prot = (dacr >> (domain * 2)) & 3; 10213 if (type == 0) { 10214 /* Section translation fault. */ 10215 fi->type = ARMFault_Translation; 10216 goto do_fault; 10217 } 10218 if (type != 2) { 10219 level = 2; 10220 } 10221 if (domain_prot == 0 || domain_prot == 2) { 10222 fi->type = ARMFault_Domain; 10223 goto do_fault; 10224 } 10225 if (type == 2) { 10226 /* 1Mb section. */ 10227 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10228 ap = (desc >> 10) & 3; 10229 *page_size = 1024 * 1024; 10230 } else { 10231 /* Lookup l2 entry. */ 10232 if (type == 1) { 10233 /* Coarse pagetable. */ 10234 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10235 } else { 10236 /* Fine pagetable. */ 10237 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc); 10238 } 10239 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10240 mmu_idx, fi); 10241 if (fi->type != ARMFault_None) { 10242 goto do_fault; 10243 } 10244 switch (desc & 3) { 10245 case 0: /* Page translation fault. */ 10246 fi->type = ARMFault_Translation; 10247 goto do_fault; 10248 case 1: /* 64k page. */ 10249 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10250 ap = (desc >> (4 + ((address >> 13) & 6))) & 3; 10251 *page_size = 0x10000; 10252 break; 10253 case 2: /* 4k page. */ 10254 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10255 ap = (desc >> (4 + ((address >> 9) & 6))) & 3; 10256 *page_size = 0x1000; 10257 break; 10258 case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ 10259 if (type == 1) { 10260 /* ARMv6/XScale extended small page format */ 10261 if (arm_feature(env, ARM_FEATURE_XSCALE) 10262 || arm_feature(env, ARM_FEATURE_V6)) { 10263 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10264 *page_size = 0x1000; 10265 } else { 10266 /* UNPREDICTABLE in ARMv5; we choose to take a 10267 * page translation fault. 10268 */ 10269 fi->type = ARMFault_Translation; 10270 goto do_fault; 10271 } 10272 } else { 10273 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff); 10274 *page_size = 0x400; 10275 } 10276 ap = (desc >> 4) & 3; 10277 break; 10278 default: 10279 /* Never happens, but compiler isn't smart enough to tell. */ 10280 abort(); 10281 } 10282 } 10283 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10284 *prot |= *prot ? PAGE_EXEC : 0; 10285 if (!(*prot & (1 << access_type))) { 10286 /* Access permission fault. */ 10287 fi->type = ARMFault_Permission; 10288 goto do_fault; 10289 } 10290 *phys_ptr = phys_addr; 10291 return false; 10292 do_fault: 10293 fi->domain = domain; 10294 fi->level = level; 10295 return true; 10296 } 10297 10298 static bool get_phys_addr_v6(CPUARMState *env, uint32_t address, 10299 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10300 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 10301 target_ulong *page_size, ARMMMUFaultInfo *fi) 10302 { 10303 CPUState *cs = CPU(arm_env_get_cpu(env)); 10304 int level = 1; 10305 uint32_t table; 10306 uint32_t desc; 10307 uint32_t xn; 10308 uint32_t pxn = 0; 10309 int type; 10310 int ap; 10311 int domain = 0; 10312 int domain_prot; 10313 hwaddr phys_addr; 10314 uint32_t dacr; 10315 bool ns; 10316 10317 /* Pagetable walk. */ 10318 /* Lookup l1 descriptor. */ 10319 if (!get_level1_table_address(env, mmu_idx, &table, address)) { 10320 /* Section translation fault if page walk is disabled by PD0 or PD1 */ 10321 fi->type = ARMFault_Translation; 10322 goto do_fault; 10323 } 10324 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10325 mmu_idx, fi); 10326 if (fi->type != ARMFault_None) { 10327 goto do_fault; 10328 } 10329 type = (desc & 3); 10330 if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) { 10331 /* Section translation fault, or attempt to use the encoding 10332 * which is Reserved on implementations without PXN. 10333 */ 10334 fi->type = ARMFault_Translation; 10335 goto do_fault; 10336 } 10337 if ((type == 1) || !(desc & (1 << 18))) { 10338 /* Page or Section. */ 10339 domain = (desc >> 5) & 0x0f; 10340 } 10341 if (regime_el(env, mmu_idx) == 1) { 10342 dacr = env->cp15.dacr_ns; 10343 } else { 10344 dacr = env->cp15.dacr_s; 10345 } 10346 if (type == 1) { 10347 level = 2; 10348 } 10349 domain_prot = (dacr >> (domain * 2)) & 3; 10350 if (domain_prot == 0 || domain_prot == 2) { 10351 /* Section or Page domain fault */ 10352 fi->type = ARMFault_Domain; 10353 goto do_fault; 10354 } 10355 if (type != 1) { 10356 if (desc & (1 << 18)) { 10357 /* Supersection. */ 10358 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff); 10359 phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32; 10360 phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36; 10361 *page_size = 0x1000000; 10362 } else { 10363 /* Section. */ 10364 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff); 10365 *page_size = 0x100000; 10366 } 10367 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4); 10368 xn = desc & (1 << 4); 10369 pxn = desc & 1; 10370 ns = extract32(desc, 19, 1); 10371 } else { 10372 if (arm_feature(env, ARM_FEATURE_PXN)) { 10373 pxn = (desc >> 2) & 1; 10374 } 10375 ns = extract32(desc, 3, 1); 10376 /* Lookup l2 entry. */ 10377 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc); 10378 desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx), 10379 mmu_idx, fi); 10380 if (fi->type != ARMFault_None) { 10381 goto do_fault; 10382 } 10383 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4); 10384 switch (desc & 3) { 10385 case 0: /* Page translation fault. */ 10386 fi->type = ARMFault_Translation; 10387 goto do_fault; 10388 case 1: /* 64k page. */ 10389 phys_addr = (desc & 0xffff0000) | (address & 0xffff); 10390 xn = desc & (1 << 15); 10391 *page_size = 0x10000; 10392 break; 10393 case 2: case 3: /* 4k page. */ 10394 phys_addr = (desc & 0xfffff000) | (address & 0xfff); 10395 xn = desc & 1; 10396 *page_size = 0x1000; 10397 break; 10398 default: 10399 /* Never happens, but compiler isn't smart enough to tell. */ 10400 abort(); 10401 } 10402 } 10403 if (domain_prot == 3) { 10404 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10405 } else { 10406 if (pxn && !regime_is_user(env, mmu_idx)) { 10407 xn = 1; 10408 } 10409 if (xn && access_type == MMU_INST_FETCH) { 10410 fi->type = ARMFault_Permission; 10411 goto do_fault; 10412 } 10413 10414 if (arm_feature(env, ARM_FEATURE_V6K) && 10415 (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { 10416 /* The simplified model uses AP[0] as an access control bit. */ 10417 if ((ap & 1) == 0) { 10418 /* Access flag fault. */ 10419 fi->type = ARMFault_AccessFlag; 10420 goto do_fault; 10421 } 10422 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); 10423 } else { 10424 *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); 10425 } 10426 if (*prot && !xn) { 10427 *prot |= PAGE_EXEC; 10428 } 10429 if (!(*prot & (1 << access_type))) { 10430 /* Access permission fault. */ 10431 fi->type = ARMFault_Permission; 10432 goto do_fault; 10433 } 10434 } 10435 if (ns) { 10436 /* The NS bit will (as required by the architecture) have no effect if 10437 * the CPU doesn't support TZ or this is a non-secure translation 10438 * regime, because the attribute will already be non-secure. 10439 */ 10440 attrs->secure = false; 10441 } 10442 *phys_ptr = phys_addr; 10443 return false; 10444 do_fault: 10445 fi->domain = domain; 10446 fi->level = level; 10447 return true; 10448 } 10449 10450 /* 10451 * check_s2_mmu_setup 10452 * @cpu: ARMCPU 10453 * @is_aa64: True if the translation regime is in AArch64 state 10454 * @startlevel: Suggested starting level 10455 * @inputsize: Bitsize of IPAs 10456 * @stride: Page-table stride (See the ARM ARM) 10457 * 10458 * Returns true if the suggested S2 translation parameters are OK and 10459 * false otherwise. 10460 */ 10461 static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, 10462 int inputsize, int stride) 10463 { 10464 const int grainsize = stride + 3; 10465 int startsizecheck; 10466 10467 /* Negative levels are never allowed. */ 10468 if (level < 0) { 10469 return false; 10470 } 10471 10472 startsizecheck = inputsize - ((3 - level) * stride + grainsize); 10473 if (startsizecheck < 1 || startsizecheck > stride + 4) { 10474 return false; 10475 } 10476 10477 if (is_aa64) { 10478 CPUARMState *env = &cpu->env; 10479 unsigned int pamax = arm_pamax(cpu); 10480 10481 switch (stride) { 10482 case 13: /* 64KB Pages. */ 10483 if (level == 0 || (level == 1 && pamax <= 42)) { 10484 return false; 10485 } 10486 break; 10487 case 11: /* 16KB Pages. */ 10488 if (level == 0 || (level == 1 && pamax <= 40)) { 10489 return false; 10490 } 10491 break; 10492 case 9: /* 4KB Pages. */ 10493 if (level == 0 && pamax <= 42) { 10494 return false; 10495 } 10496 break; 10497 default: 10498 g_assert_not_reached(); 10499 } 10500 10501 /* Inputsize checks. */ 10502 if (inputsize > pamax && 10503 (arm_el_is_aa64(env, 1) || inputsize > 40)) { 10504 /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ 10505 return false; 10506 } 10507 } else { 10508 /* AArch32 only supports 4KB pages. Assert on that. */ 10509 assert(stride == 9); 10510 10511 if (level == 0) { 10512 return false; 10513 } 10514 } 10515 return true; 10516 } 10517 10518 /* Translate from the 4-bit stage 2 representation of 10519 * memory attributes (without cache-allocation hints) to 10520 * the 8-bit representation of the stage 1 MAIR registers 10521 * (which includes allocation hints). 10522 * 10523 * ref: shared/translation/attrs/S2AttrDecode() 10524 * .../S2ConvertAttrsHints() 10525 */ 10526 static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) 10527 { 10528 uint8_t hiattr = extract32(s2attrs, 2, 2); 10529 uint8_t loattr = extract32(s2attrs, 0, 2); 10530 uint8_t hihint = 0, lohint = 0; 10531 10532 if (hiattr != 0) { /* normal memory */ 10533 if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */ 10534 hiattr = loattr = 1; /* non-cacheable */ 10535 } else { 10536 if (hiattr != 1) { /* Write-through or write-back */ 10537 hihint = 3; /* RW allocate */ 10538 } 10539 if (loattr != 1) { /* Write-through or write-back */ 10540 lohint = 3; /* RW allocate */ 10541 } 10542 } 10543 } 10544 10545 return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint; 10546 } 10547 #endif /* !CONFIG_USER_ONLY */ 10548 10549 ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va, 10550 ARMMMUIdx mmu_idx) 10551 { 10552 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10553 uint32_t el = regime_el(env, mmu_idx); 10554 bool tbi, tbid, epd, hpd, using16k, using64k; 10555 int select, tsz; 10556 10557 /* 10558 * Bit 55 is always between the two regions, and is canonical for 10559 * determining if address tagging is enabled. 10560 */ 10561 select = extract64(va, 55, 1); 10562 10563 if (el > 1) { 10564 tsz = extract32(tcr, 0, 6); 10565 using64k = extract32(tcr, 14, 1); 10566 using16k = extract32(tcr, 15, 1); 10567 if (mmu_idx == ARMMMUIdx_S2NS) { 10568 /* VTCR_EL2 */ 10569 tbi = tbid = hpd = false; 10570 } else { 10571 tbi = extract32(tcr, 20, 1); 10572 hpd = extract32(tcr, 24, 1); 10573 tbid = extract32(tcr, 29, 1); 10574 } 10575 epd = false; 10576 } else if (!select) { 10577 tsz = extract32(tcr, 0, 6); 10578 epd = extract32(tcr, 7, 1); 10579 using64k = extract32(tcr, 14, 1); 10580 using16k = extract32(tcr, 15, 1); 10581 tbi = extract64(tcr, 37, 1); 10582 hpd = extract64(tcr, 41, 1); 10583 tbid = extract64(tcr, 51, 1); 10584 } else { 10585 int tg = extract32(tcr, 30, 2); 10586 using16k = tg == 1; 10587 using64k = tg == 3; 10588 tsz = extract32(tcr, 16, 6); 10589 epd = extract32(tcr, 23, 1); 10590 tbi = extract64(tcr, 38, 1); 10591 hpd = extract64(tcr, 42, 1); 10592 tbid = extract64(tcr, 52, 1); 10593 } 10594 tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ 10595 tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ 10596 10597 return (ARMVAParameters) { 10598 .tsz = tsz, 10599 .select = select, 10600 .tbi = tbi, 10601 .tbid = tbid, 10602 .epd = epd, 10603 .hpd = hpd, 10604 .using16k = using16k, 10605 .using64k = using64k, 10606 }; 10607 } 10608 10609 ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, 10610 ARMMMUIdx mmu_idx, bool data) 10611 { 10612 ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx); 10613 10614 /* Present TBI as a composite with TBID. */ 10615 ret.tbi &= (data || !ret.tbid); 10616 return ret; 10617 } 10618 10619 #ifndef CONFIG_USER_ONLY 10620 static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, 10621 ARMMMUIdx mmu_idx) 10622 { 10623 uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; 10624 uint32_t el = regime_el(env, mmu_idx); 10625 int select, tsz; 10626 bool epd, hpd; 10627 10628 if (mmu_idx == ARMMMUIdx_S2NS) { 10629 /* VTCR */ 10630 bool sext = extract32(tcr, 4, 1); 10631 bool sign = extract32(tcr, 3, 1); 10632 10633 /* 10634 * If the sign-extend bit is not the same as t0sz[3], the result 10635 * is unpredictable. Flag this as a guest error. 10636 */ 10637 if (sign != sext) { 10638 qemu_log_mask(LOG_GUEST_ERROR, 10639 "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n"); 10640 } 10641 tsz = sextract32(tcr, 0, 4) + 8; 10642 select = 0; 10643 hpd = false; 10644 epd = false; 10645 } else if (el == 2) { 10646 /* HTCR */ 10647 tsz = extract32(tcr, 0, 3); 10648 select = 0; 10649 hpd = extract64(tcr, 24, 1); 10650 epd = false; 10651 } else { 10652 int t0sz = extract32(tcr, 0, 3); 10653 int t1sz = extract32(tcr, 16, 3); 10654 10655 if (t1sz == 0) { 10656 select = va > (0xffffffffu >> t0sz); 10657 } else { 10658 /* Note that we will detect errors later. */ 10659 select = va >= ~(0xffffffffu >> t1sz); 10660 } 10661 if (!select) { 10662 tsz = t0sz; 10663 epd = extract32(tcr, 7, 1); 10664 hpd = extract64(tcr, 41, 1); 10665 } else { 10666 tsz = t1sz; 10667 epd = extract32(tcr, 23, 1); 10668 hpd = extract64(tcr, 42, 1); 10669 } 10670 /* For aarch32, hpd0 is not enabled without t2e as well. */ 10671 hpd &= extract32(tcr, 6, 1); 10672 } 10673 10674 return (ARMVAParameters) { 10675 .tsz = tsz, 10676 .select = select, 10677 .epd = epd, 10678 .hpd = hpd, 10679 }; 10680 } 10681 10682 static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, 10683 MMUAccessType access_type, ARMMMUIdx mmu_idx, 10684 hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, 10685 target_ulong *page_size_ptr, 10686 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 10687 { 10688 ARMCPU *cpu = arm_env_get_cpu(env); 10689 CPUState *cs = CPU(cpu); 10690 /* Read an LPAE long-descriptor translation table. */ 10691 ARMFaultType fault_type = ARMFault_Translation; 10692 uint32_t level; 10693 ARMVAParameters param; 10694 uint64_t ttbr; 10695 hwaddr descaddr, indexmask, indexmask_grainsize; 10696 uint32_t tableattrs; 10697 target_ulong page_size; 10698 uint32_t attrs; 10699 int32_t stride; 10700 int addrsize, inputsize; 10701 TCR *tcr = regime_tcr(env, mmu_idx); 10702 int ap, ns, xn, pxn; 10703 uint32_t el = regime_el(env, mmu_idx); 10704 bool ttbr1_valid; 10705 uint64_t descaddrmask; 10706 bool aarch64 = arm_el_is_aa64(env, el); 10707 bool guarded = false; 10708 10709 /* TODO: 10710 * This code does not handle the different format TCR for VTCR_EL2. 10711 * This code also does not support shareability levels. 10712 * Attribute and permission bit handling should also be checked when adding 10713 * support for those page table walks. 10714 */ 10715 if (aarch64) { 10716 param = aa64_va_parameters(env, address, mmu_idx, 10717 access_type != MMU_INST_FETCH); 10718 level = 0; 10719 /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it 10720 * invalid. 10721 */ 10722 ttbr1_valid = (el < 2); 10723 addrsize = 64 - 8 * param.tbi; 10724 inputsize = 64 - param.tsz; 10725 } else { 10726 param = aa32_va_parameters(env, address, mmu_idx); 10727 level = 1; 10728 /* There is no TTBR1 for EL2 */ 10729 ttbr1_valid = (el != 2); 10730 addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32); 10731 inputsize = addrsize - param.tsz; 10732 } 10733 10734 /* 10735 * We determined the region when collecting the parameters, but we 10736 * have not yet validated that the address is valid for the region. 10737 * Extract the top bits and verify that they all match select. 10738 * 10739 * For aa32, if inputsize == addrsize, then we have selected the 10740 * region by exclusion in aa32_va_parameters and there is no more 10741 * validation to do here. 10742 */ 10743 if (inputsize < addrsize) { 10744 target_ulong top_bits = sextract64(address, inputsize, 10745 addrsize - inputsize); 10746 if (-top_bits != param.select || (param.select && !ttbr1_valid)) { 10747 /* The gap between the two regions is a Translation fault */ 10748 fault_type = ARMFault_Translation; 10749 goto do_fault; 10750 } 10751 } 10752 10753 if (param.using64k) { 10754 stride = 13; 10755 } else if (param.using16k) { 10756 stride = 11; 10757 } else { 10758 stride = 9; 10759 } 10760 10761 /* Note that QEMU ignores shareability and cacheability attributes, 10762 * so we don't need to do anything with the SH, ORGN, IRGN fields 10763 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the 10764 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently 10765 * implement any ASID-like capability so we can ignore it (instead 10766 * we will always flush the TLB any time the ASID is changed). 10767 */ 10768 ttbr = regime_ttbr(env, mmu_idx, param.select); 10769 10770 /* Here we should have set up all the parameters for the translation: 10771 * inputsize, ttbr, epd, stride, tbi 10772 */ 10773 10774 if (param.epd) { 10775 /* Translation table walk disabled => Translation fault on TLB miss 10776 * Note: This is always 0 on 64-bit EL2 and EL3. 10777 */ 10778 goto do_fault; 10779 } 10780 10781 if (mmu_idx != ARMMMUIdx_S2NS) { 10782 /* The starting level depends on the virtual address size (which can 10783 * be up to 48 bits) and the translation granule size. It indicates 10784 * the number of strides (stride bits at a time) needed to 10785 * consume the bits of the input address. In the pseudocode this is: 10786 * level = 4 - RoundUp((inputsize - grainsize) / stride) 10787 * where their 'inputsize' is our 'inputsize', 'grainsize' is 10788 * our 'stride + 3' and 'stride' is our 'stride'. 10789 * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying: 10790 * = 4 - (inputsize - stride - 3 + stride - 1) / stride 10791 * = 4 - (inputsize - 4) / stride; 10792 */ 10793 level = 4 - (inputsize - 4) / stride; 10794 } else { 10795 /* For stage 2 translations the starting level is specified by the 10796 * VTCR_EL2.SL0 field (whose interpretation depends on the page size) 10797 */ 10798 uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2); 10799 uint32_t startlevel; 10800 bool ok; 10801 10802 if (!aarch64 || stride == 9) { 10803 /* AArch32 or 4KB pages */ 10804 startlevel = 2 - sl0; 10805 } else { 10806 /* 16KB or 64KB pages */ 10807 startlevel = 3 - sl0; 10808 } 10809 10810 /* Check that the starting level is valid. */ 10811 ok = check_s2_mmu_setup(cpu, aarch64, startlevel, 10812 inputsize, stride); 10813 if (!ok) { 10814 fault_type = ARMFault_Translation; 10815 goto do_fault; 10816 } 10817 level = startlevel; 10818 } 10819 10820 indexmask_grainsize = (1ULL << (stride + 3)) - 1; 10821 indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1; 10822 10823 /* Now we can extract the actual base address from the TTBR */ 10824 descaddr = extract64(ttbr, 0, 48); 10825 descaddr &= ~indexmask; 10826 10827 /* The address field in the descriptor goes up to bit 39 for ARMv7 10828 * but up to bit 47 for ARMv8, but we use the descaddrmask 10829 * up to bit 39 for AArch32, because we don't need other bits in that case 10830 * to construct next descriptor address (anyway they should be all zeroes). 10831 */ 10832 descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) & 10833 ~indexmask_grainsize; 10834 10835 /* Secure accesses start with the page table in secure memory and 10836 * can be downgraded to non-secure at any step. Non-secure accesses 10837 * remain non-secure. We implement this by just ORing in the NSTable/NS 10838 * bits at each step. 10839 */ 10840 tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4); 10841 for (;;) { 10842 uint64_t descriptor; 10843 bool nstable; 10844 10845 descaddr |= (address >> (stride * (4 - level))) & indexmask; 10846 descaddr &= ~7ULL; 10847 nstable = extract32(tableattrs, 4, 1); 10848 descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi); 10849 if (fi->type != ARMFault_None) { 10850 goto do_fault; 10851 } 10852 10853 if (!(descriptor & 1) || 10854 (!(descriptor & 2) && (level == 3))) { 10855 /* Invalid, or the Reserved level 3 encoding */ 10856 goto do_fault; 10857 } 10858 descaddr = descriptor & descaddrmask; 10859 10860 if ((descriptor & 2) && (level < 3)) { 10861 /* Table entry. The top five bits are attributes which may 10862 * propagate down through lower levels of the table (and 10863 * which are all arranged so that 0 means "no effect", so 10864 * we can gather them up by ORing in the bits at each level). 10865 */ 10866 tableattrs |= extract64(descriptor, 59, 5); 10867 level++; 10868 indexmask = indexmask_grainsize; 10869 continue; 10870 } 10871 /* Block entry at level 1 or 2, or page entry at level 3. 10872 * These are basically the same thing, although the number 10873 * of bits we pull in from the vaddr varies. 10874 */ 10875 page_size = (1ULL << ((stride * (4 - level)) + 3)); 10876 descaddr |= (address & (page_size - 1)); 10877 /* Extract attributes from the descriptor */ 10878 attrs = extract64(descriptor, 2, 10) 10879 | (extract64(descriptor, 52, 12) << 10); 10880 10881 if (mmu_idx == ARMMMUIdx_S2NS) { 10882 /* Stage 2 table descriptors do not include any attribute fields */ 10883 break; 10884 } 10885 /* Merge in attributes from table descriptors */ 10886 attrs |= nstable << 3; /* NS */ 10887 guarded = extract64(descriptor, 50, 1); /* GP */ 10888 if (param.hpd) { 10889 /* HPD disables all the table attributes except NSTable. */ 10890 break; 10891 } 10892 attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */ 10893 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 10894 * means "force PL1 access only", which means forcing AP[1] to 0. 10895 */ 10896 attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */ 10897 attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */ 10898 break; 10899 } 10900 /* Here descaddr is the final physical address, and attributes 10901 * are all in attrs. 10902 */ 10903 fault_type = ARMFault_AccessFlag; 10904 if ((attrs & (1 << 8)) == 0) { 10905 /* Access flag */ 10906 goto do_fault; 10907 } 10908 10909 ap = extract32(attrs, 4, 2); 10910 xn = extract32(attrs, 12, 1); 10911 10912 if (mmu_idx == ARMMMUIdx_S2NS) { 10913 ns = true; 10914 *prot = get_S2prot(env, ap, xn); 10915 } else { 10916 ns = extract32(attrs, 3, 1); 10917 pxn = extract32(attrs, 11, 1); 10918 *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn); 10919 } 10920 10921 fault_type = ARMFault_Permission; 10922 if (!(*prot & (1 << access_type))) { 10923 goto do_fault; 10924 } 10925 10926 if (ns) { 10927 /* The NS bit will (as required by the architecture) have no effect if 10928 * the CPU doesn't support TZ or this is a non-secure translation 10929 * regime, because the attribute will already be non-secure. 10930 */ 10931 txattrs->secure = false; 10932 } 10933 /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */ 10934 if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) { 10935 txattrs->target_tlb_bit0 = true; 10936 } 10937 10938 if (cacheattrs != NULL) { 10939 if (mmu_idx == ARMMMUIdx_S2NS) { 10940 cacheattrs->attrs = convert_stage2_attrs(env, 10941 extract32(attrs, 0, 4)); 10942 } else { 10943 /* Index into MAIR registers for cache attributes */ 10944 uint8_t attrindx = extract32(attrs, 0, 3); 10945 uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; 10946 assert(attrindx <= 7); 10947 cacheattrs->attrs = extract64(mair, attrindx * 8, 8); 10948 } 10949 cacheattrs->shareability = extract32(attrs, 6, 2); 10950 } 10951 10952 *phys_ptr = descaddr; 10953 *page_size_ptr = page_size; 10954 return false; 10955 10956 do_fault: 10957 fi->type = fault_type; 10958 fi->level = level; 10959 /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */ 10960 fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS); 10961 return true; 10962 } 10963 10964 static inline void get_phys_addr_pmsav7_default(CPUARMState *env, 10965 ARMMMUIdx mmu_idx, 10966 int32_t address, int *prot) 10967 { 10968 if (!arm_feature(env, ARM_FEATURE_M)) { 10969 *prot = PAGE_READ | PAGE_WRITE; 10970 switch (address) { 10971 case 0xF0000000 ... 0xFFFFFFFF: 10972 if (regime_sctlr(env, mmu_idx) & SCTLR_V) { 10973 /* hivecs execing is ok */ 10974 *prot |= PAGE_EXEC; 10975 } 10976 break; 10977 case 0x00000000 ... 0x7FFFFFFF: 10978 *prot |= PAGE_EXEC; 10979 break; 10980 } 10981 } else { 10982 /* Default system address map for M profile cores. 10983 * The architecture specifies which regions are execute-never; 10984 * at the MPU level no other checks are defined. 10985 */ 10986 switch (address) { 10987 case 0x00000000 ... 0x1fffffff: /* ROM */ 10988 case 0x20000000 ... 0x3fffffff: /* SRAM */ 10989 case 0x60000000 ... 0x7fffffff: /* RAM */ 10990 case 0x80000000 ... 0x9fffffff: /* RAM */ 10991 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 10992 break; 10993 case 0x40000000 ... 0x5fffffff: /* Peripheral */ 10994 case 0xa0000000 ... 0xbfffffff: /* Device */ 10995 case 0xc0000000 ... 0xdfffffff: /* Device */ 10996 case 0xe0000000 ... 0xffffffff: /* System */ 10997 *prot = PAGE_READ | PAGE_WRITE; 10998 break; 10999 default: 11000 g_assert_not_reached(); 11001 } 11002 } 11003 } 11004 11005 static bool pmsav7_use_background_region(ARMCPU *cpu, 11006 ARMMMUIdx mmu_idx, bool is_user) 11007 { 11008 /* Return true if we should use the default memory map as a 11009 * "background" region if there are no hits against any MPU regions. 11010 */ 11011 CPUARMState *env = &cpu->env; 11012 11013 if (is_user) { 11014 return false; 11015 } 11016 11017 if (arm_feature(env, ARM_FEATURE_M)) { 11018 return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] 11019 & R_V7M_MPU_CTRL_PRIVDEFENA_MASK; 11020 } else { 11021 return regime_sctlr(env, mmu_idx) & SCTLR_BR; 11022 } 11023 } 11024 11025 static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address) 11026 { 11027 /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */ 11028 return arm_feature(env, ARM_FEATURE_M) && 11029 extract32(address, 20, 12) == 0xe00; 11030 } 11031 11032 static inline bool m_is_system_region(CPUARMState *env, uint32_t address) 11033 { 11034 /* True if address is in the M profile system region 11035 * 0xe0000000 - 0xffffffff 11036 */ 11037 return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7; 11038 } 11039 11040 static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address, 11041 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11042 hwaddr *phys_ptr, int *prot, 11043 target_ulong *page_size, 11044 ARMMMUFaultInfo *fi) 11045 { 11046 ARMCPU *cpu = arm_env_get_cpu(env); 11047 int n; 11048 bool is_user = regime_is_user(env, mmu_idx); 11049 11050 *phys_ptr = address; 11051 *page_size = TARGET_PAGE_SIZE; 11052 *prot = 0; 11053 11054 if (regime_translation_disabled(env, mmu_idx) || 11055 m_is_ppb_region(env, address)) { 11056 /* MPU disabled or M profile PPB access: use default memory map. 11057 * The other case which uses the default memory map in the 11058 * v7M ARM ARM pseudocode is exception vector reads from the vector 11059 * table. In QEMU those accesses are done in arm_v7m_load_vector(), 11060 * which always does a direct read using address_space_ldl(), rather 11061 * than going via this function, so we don't need to check that here. 11062 */ 11063 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11064 } else { /* MPU enabled */ 11065 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11066 /* region search */ 11067 uint32_t base = env->pmsav7.drbar[n]; 11068 uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5); 11069 uint32_t rmask; 11070 bool srdis = false; 11071 11072 if (!(env->pmsav7.drsr[n] & 0x1)) { 11073 continue; 11074 } 11075 11076 if (!rsize) { 11077 qemu_log_mask(LOG_GUEST_ERROR, 11078 "DRSR[%d]: Rsize field cannot be 0\n", n); 11079 continue; 11080 } 11081 rsize++; 11082 rmask = (1ull << rsize) - 1; 11083 11084 if (base & rmask) { 11085 qemu_log_mask(LOG_GUEST_ERROR, 11086 "DRBAR[%d]: 0x%" PRIx32 " misaligned " 11087 "to DRSR region size, mask = 0x%" PRIx32 "\n", 11088 n, base, rmask); 11089 continue; 11090 } 11091 11092 if (address < base || address > base + rmask) { 11093 /* 11094 * Address not in this region. We must check whether the 11095 * region covers addresses in the same page as our address. 11096 * In that case we must not report a size that covers the 11097 * whole page for a subsequent hit against a different MPU 11098 * region or the background region, because it would result in 11099 * incorrect TLB hits for subsequent accesses to addresses that 11100 * are in this MPU region. 11101 */ 11102 if (ranges_overlap(base, rmask, 11103 address & TARGET_PAGE_MASK, 11104 TARGET_PAGE_SIZE)) { 11105 *page_size = 1; 11106 } 11107 continue; 11108 } 11109 11110 /* Region matched */ 11111 11112 if (rsize >= 8) { /* no subregions for regions < 256 bytes */ 11113 int i, snd; 11114 uint32_t srdis_mask; 11115 11116 rsize -= 3; /* sub region size (power of 2) */ 11117 snd = ((address - base) >> rsize) & 0x7; 11118 srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1); 11119 11120 srdis_mask = srdis ? 0x3 : 0x0; 11121 for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) { 11122 /* This will check in groups of 2, 4 and then 8, whether 11123 * the subregion bits are consistent. rsize is incremented 11124 * back up to give the region size, considering consistent 11125 * adjacent subregions as one region. Stop testing if rsize 11126 * is already big enough for an entire QEMU page. 11127 */ 11128 int snd_rounded = snd & ~(i - 1); 11129 uint32_t srdis_multi = extract32(env->pmsav7.drsr[n], 11130 snd_rounded + 8, i); 11131 if (srdis_mask ^ srdis_multi) { 11132 break; 11133 } 11134 srdis_mask = (srdis_mask << i) | srdis_mask; 11135 rsize++; 11136 } 11137 } 11138 if (srdis) { 11139 continue; 11140 } 11141 if (rsize < TARGET_PAGE_BITS) { 11142 *page_size = 1 << rsize; 11143 } 11144 break; 11145 } 11146 11147 if (n == -1) { /* no hits */ 11148 if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11149 /* background fault */ 11150 fi->type = ARMFault_Background; 11151 return true; 11152 } 11153 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11154 } else { /* a MPU hit! */ 11155 uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3); 11156 uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1); 11157 11158 if (m_is_system_region(env, address)) { 11159 /* System space is always execute never */ 11160 xn = 1; 11161 } 11162 11163 if (is_user) { /* User mode AP bit decoding */ 11164 switch (ap) { 11165 case 0: 11166 case 1: 11167 case 5: 11168 break; /* no access */ 11169 case 3: 11170 *prot |= PAGE_WRITE; 11171 /* fall through */ 11172 case 2: 11173 case 6: 11174 *prot |= PAGE_READ | PAGE_EXEC; 11175 break; 11176 case 7: 11177 /* for v7M, same as 6; for R profile a reserved value */ 11178 if (arm_feature(env, ARM_FEATURE_M)) { 11179 *prot |= PAGE_READ | PAGE_EXEC; 11180 break; 11181 } 11182 /* fall through */ 11183 default: 11184 qemu_log_mask(LOG_GUEST_ERROR, 11185 "DRACR[%d]: Bad value for AP bits: 0x%" 11186 PRIx32 "\n", n, ap); 11187 } 11188 } else { /* Priv. mode AP bits decoding */ 11189 switch (ap) { 11190 case 0: 11191 break; /* no access */ 11192 case 1: 11193 case 2: 11194 case 3: 11195 *prot |= PAGE_WRITE; 11196 /* fall through */ 11197 case 5: 11198 case 6: 11199 *prot |= PAGE_READ | PAGE_EXEC; 11200 break; 11201 case 7: 11202 /* for v7M, same as 6; for R profile a reserved value */ 11203 if (arm_feature(env, ARM_FEATURE_M)) { 11204 *prot |= PAGE_READ | PAGE_EXEC; 11205 break; 11206 } 11207 /* fall through */ 11208 default: 11209 qemu_log_mask(LOG_GUEST_ERROR, 11210 "DRACR[%d]: Bad value for AP bits: 0x%" 11211 PRIx32 "\n", n, ap); 11212 } 11213 } 11214 11215 /* execute never */ 11216 if (xn) { 11217 *prot &= ~PAGE_EXEC; 11218 } 11219 } 11220 } 11221 11222 fi->type = ARMFault_Permission; 11223 fi->level = 1; 11224 return !(*prot & (1 << access_type)); 11225 } 11226 11227 static bool v8m_is_sau_exempt(CPUARMState *env, 11228 uint32_t address, MMUAccessType access_type) 11229 { 11230 /* The architecture specifies that certain address ranges are 11231 * exempt from v8M SAU/IDAU checks. 11232 */ 11233 return 11234 (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) || 11235 (address >= 0xe0000000 && address <= 0xe0002fff) || 11236 (address >= 0xe000e000 && address <= 0xe000efff) || 11237 (address >= 0xe002e000 && address <= 0xe002efff) || 11238 (address >= 0xe0040000 && address <= 0xe0041fff) || 11239 (address >= 0xe00ff000 && address <= 0xe00fffff); 11240 } 11241 11242 static void v8m_security_lookup(CPUARMState *env, uint32_t address, 11243 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11244 V8M_SAttributes *sattrs) 11245 { 11246 /* Look up the security attributes for this address. Compare the 11247 * pseudocode SecurityCheck() function. 11248 * We assume the caller has zero-initialized *sattrs. 11249 */ 11250 ARMCPU *cpu = arm_env_get_cpu(env); 11251 int r; 11252 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 11253 int idau_region = IREGION_NOTVALID; 11254 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 11255 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 11256 11257 if (cpu->idau) { 11258 IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau); 11259 IDAUInterface *ii = IDAU_INTERFACE(cpu->idau); 11260 11261 iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns, 11262 &idau_nsc); 11263 } 11264 11265 if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) { 11266 /* 0xf0000000..0xffffffff is always S for insn fetches */ 11267 return; 11268 } 11269 11270 if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) { 11271 sattrs->ns = !regime_is_secure(env, mmu_idx); 11272 return; 11273 } 11274 11275 if (idau_region != IREGION_NOTVALID) { 11276 sattrs->irvalid = true; 11277 sattrs->iregion = idau_region; 11278 } 11279 11280 switch (env->sau.ctrl & 3) { 11281 case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */ 11282 break; 11283 case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */ 11284 sattrs->ns = true; 11285 break; 11286 default: /* SAU.ENABLE == 1 */ 11287 for (r = 0; r < cpu->sau_sregion; r++) { 11288 if (env->sau.rlar[r] & 1) { 11289 uint32_t base = env->sau.rbar[r] & ~0x1f; 11290 uint32_t limit = env->sau.rlar[r] | 0x1f; 11291 11292 if (base <= address && limit >= address) { 11293 if (base > addr_page_base || limit < addr_page_limit) { 11294 sattrs->subpage = true; 11295 } 11296 if (sattrs->srvalid) { 11297 /* If we hit in more than one region then we must report 11298 * as Secure, not NS-Callable, with no valid region 11299 * number info. 11300 */ 11301 sattrs->ns = false; 11302 sattrs->nsc = false; 11303 sattrs->sregion = 0; 11304 sattrs->srvalid = false; 11305 break; 11306 } else { 11307 if (env->sau.rlar[r] & 2) { 11308 sattrs->nsc = true; 11309 } else { 11310 sattrs->ns = true; 11311 } 11312 sattrs->srvalid = true; 11313 sattrs->sregion = r; 11314 } 11315 } else { 11316 /* 11317 * Address not in this region. We must check whether the 11318 * region covers addresses in the same page as our address. 11319 * In that case we must not report a size that covers the 11320 * whole page for a subsequent hit against a different MPU 11321 * region or the background region, because it would result 11322 * in incorrect TLB hits for subsequent accesses to 11323 * addresses that are in this MPU region. 11324 */ 11325 if (limit >= base && 11326 ranges_overlap(base, limit - base + 1, 11327 addr_page_base, 11328 TARGET_PAGE_SIZE)) { 11329 sattrs->subpage = true; 11330 } 11331 } 11332 } 11333 } 11334 break; 11335 } 11336 11337 /* 11338 * The IDAU will override the SAU lookup results if it specifies 11339 * higher security than the SAU does. 11340 */ 11341 if (!idau_ns) { 11342 if (sattrs->ns || (!idau_nsc && sattrs->nsc)) { 11343 sattrs->ns = false; 11344 sattrs->nsc = idau_nsc; 11345 } 11346 } 11347 } 11348 11349 static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, 11350 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11351 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11352 int *prot, bool *is_subpage, 11353 ARMMMUFaultInfo *fi, uint32_t *mregion) 11354 { 11355 /* Perform a PMSAv8 MPU lookup (without also doing the SAU check 11356 * that a full phys-to-virt translation does). 11357 * mregion is (if not NULL) set to the region number which matched, 11358 * or -1 if no region number is returned (MPU off, address did not 11359 * hit a region, address hit in multiple regions). 11360 * We set is_subpage to true if the region hit doesn't cover the 11361 * entire TARGET_PAGE the address is within. 11362 */ 11363 ARMCPU *cpu = arm_env_get_cpu(env); 11364 bool is_user = regime_is_user(env, mmu_idx); 11365 uint32_t secure = regime_is_secure(env, mmu_idx); 11366 int n; 11367 int matchregion = -1; 11368 bool hit = false; 11369 uint32_t addr_page_base = address & TARGET_PAGE_MASK; 11370 uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); 11371 11372 *is_subpage = false; 11373 *phys_ptr = address; 11374 *prot = 0; 11375 if (mregion) { 11376 *mregion = -1; 11377 } 11378 11379 /* Unlike the ARM ARM pseudocode, we don't need to check whether this 11380 * was an exception vector read from the vector table (which is always 11381 * done using the default system address map), because those accesses 11382 * are done in arm_v7m_load_vector(), which always does a direct 11383 * read using address_space_ldl(), rather than going via this function. 11384 */ 11385 if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */ 11386 hit = true; 11387 } else if (m_is_ppb_region(env, address)) { 11388 hit = true; 11389 } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) { 11390 hit = true; 11391 } else { 11392 for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) { 11393 /* region search */ 11394 /* Note that the base address is bits [31:5] from the register 11395 * with bits [4:0] all zeroes, but the limit address is bits 11396 * [31:5] from the register with bits [4:0] all ones. 11397 */ 11398 uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f; 11399 uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f; 11400 11401 if (!(env->pmsav8.rlar[secure][n] & 0x1)) { 11402 /* Region disabled */ 11403 continue; 11404 } 11405 11406 if (address < base || address > limit) { 11407 /* 11408 * Address not in this region. We must check whether the 11409 * region covers addresses in the same page as our address. 11410 * In that case we must not report a size that covers the 11411 * whole page for a subsequent hit against a different MPU 11412 * region or the background region, because it would result in 11413 * incorrect TLB hits for subsequent accesses to addresses that 11414 * are in this MPU region. 11415 */ 11416 if (limit >= base && 11417 ranges_overlap(base, limit - base + 1, 11418 addr_page_base, 11419 TARGET_PAGE_SIZE)) { 11420 *is_subpage = true; 11421 } 11422 continue; 11423 } 11424 11425 if (base > addr_page_base || limit < addr_page_limit) { 11426 *is_subpage = true; 11427 } 11428 11429 if (hit) { 11430 /* Multiple regions match -- always a failure (unlike 11431 * PMSAv7 where highest-numbered-region wins) 11432 */ 11433 fi->type = ARMFault_Permission; 11434 fi->level = 1; 11435 return true; 11436 } 11437 11438 matchregion = n; 11439 hit = true; 11440 } 11441 } 11442 11443 if (!hit) { 11444 /* background fault */ 11445 fi->type = ARMFault_Background; 11446 return true; 11447 } 11448 11449 if (matchregion == -1) { 11450 /* hit using the background region */ 11451 get_phys_addr_pmsav7_default(env, mmu_idx, address, prot); 11452 } else { 11453 uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2); 11454 uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1); 11455 11456 if (m_is_system_region(env, address)) { 11457 /* System space is always execute never */ 11458 xn = 1; 11459 } 11460 11461 *prot = simple_ap_to_rw_prot(env, mmu_idx, ap); 11462 if (*prot && !xn) { 11463 *prot |= PAGE_EXEC; 11464 } 11465 /* We don't need to look the attribute up in the MAIR0/MAIR1 11466 * registers because that only tells us about cacheability. 11467 */ 11468 if (mregion) { 11469 *mregion = matchregion; 11470 } 11471 } 11472 11473 fi->type = ARMFault_Permission; 11474 fi->level = 1; 11475 return !(*prot & (1 << access_type)); 11476 } 11477 11478 11479 static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address, 11480 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11481 hwaddr *phys_ptr, MemTxAttrs *txattrs, 11482 int *prot, target_ulong *page_size, 11483 ARMMMUFaultInfo *fi) 11484 { 11485 uint32_t secure = regime_is_secure(env, mmu_idx); 11486 V8M_SAttributes sattrs = {}; 11487 bool ret; 11488 bool mpu_is_subpage; 11489 11490 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 11491 v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs); 11492 if (access_type == MMU_INST_FETCH) { 11493 /* Instruction fetches always use the MMU bank and the 11494 * transaction attribute determined by the fetch address, 11495 * regardless of CPU state. This is painful for QEMU 11496 * to handle, because it would mean we need to encode 11497 * into the mmu_idx not just the (user, negpri) information 11498 * for the current security state but also that for the 11499 * other security state, which would balloon the number 11500 * of mmu_idx values needed alarmingly. 11501 * Fortunately we can avoid this because it's not actually 11502 * possible to arbitrarily execute code from memory with 11503 * the wrong security attribute: it will always generate 11504 * an exception of some kind or another, apart from the 11505 * special case of an NS CPU executing an SG instruction 11506 * in S&NSC memory. So we always just fail the translation 11507 * here and sort things out in the exception handler 11508 * (including possibly emulating an SG instruction). 11509 */ 11510 if (sattrs.ns != !secure) { 11511 if (sattrs.nsc) { 11512 fi->type = ARMFault_QEMU_NSCExec; 11513 } else { 11514 fi->type = ARMFault_QEMU_SFault; 11515 } 11516 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11517 *phys_ptr = address; 11518 *prot = 0; 11519 return true; 11520 } 11521 } else { 11522 /* For data accesses we always use the MMU bank indicated 11523 * by the current CPU state, but the security attributes 11524 * might downgrade a secure access to nonsecure. 11525 */ 11526 if (sattrs.ns) { 11527 txattrs->secure = false; 11528 } else if (!secure) { 11529 /* NS access to S memory must fault. 11530 * Architecturally we should first check whether the 11531 * MPU information for this address indicates that we 11532 * are doing an unaligned access to Device memory, which 11533 * should generate a UsageFault instead. QEMU does not 11534 * currently check for that kind of unaligned access though. 11535 * If we added it we would need to do so as a special case 11536 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt(). 11537 */ 11538 fi->type = ARMFault_QEMU_SFault; 11539 *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE; 11540 *phys_ptr = address; 11541 *prot = 0; 11542 return true; 11543 } 11544 } 11545 } 11546 11547 ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr, 11548 txattrs, prot, &mpu_is_subpage, fi, NULL); 11549 *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE; 11550 return ret; 11551 } 11552 11553 static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address, 11554 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11555 hwaddr *phys_ptr, int *prot, 11556 ARMMMUFaultInfo *fi) 11557 { 11558 int n; 11559 uint32_t mask; 11560 uint32_t base; 11561 bool is_user = regime_is_user(env, mmu_idx); 11562 11563 if (regime_translation_disabled(env, mmu_idx)) { 11564 /* MPU disabled. */ 11565 *phys_ptr = address; 11566 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11567 return false; 11568 } 11569 11570 *phys_ptr = address; 11571 for (n = 7; n >= 0; n--) { 11572 base = env->cp15.c6_region[n]; 11573 if ((base & 1) == 0) { 11574 continue; 11575 } 11576 mask = 1 << ((base >> 1) & 0x1f); 11577 /* Keep this shift separate from the above to avoid an 11578 (undefined) << 32. */ 11579 mask = (mask << 1) - 1; 11580 if (((base ^ address) & ~mask) == 0) { 11581 break; 11582 } 11583 } 11584 if (n < 0) { 11585 fi->type = ARMFault_Background; 11586 return true; 11587 } 11588 11589 if (access_type == MMU_INST_FETCH) { 11590 mask = env->cp15.pmsav5_insn_ap; 11591 } else { 11592 mask = env->cp15.pmsav5_data_ap; 11593 } 11594 mask = (mask >> (n * 4)) & 0xf; 11595 switch (mask) { 11596 case 0: 11597 fi->type = ARMFault_Permission; 11598 fi->level = 1; 11599 return true; 11600 case 1: 11601 if (is_user) { 11602 fi->type = ARMFault_Permission; 11603 fi->level = 1; 11604 return true; 11605 } 11606 *prot = PAGE_READ | PAGE_WRITE; 11607 break; 11608 case 2: 11609 *prot = PAGE_READ; 11610 if (!is_user) { 11611 *prot |= PAGE_WRITE; 11612 } 11613 break; 11614 case 3: 11615 *prot = PAGE_READ | PAGE_WRITE; 11616 break; 11617 case 5: 11618 if (is_user) { 11619 fi->type = ARMFault_Permission; 11620 fi->level = 1; 11621 return true; 11622 } 11623 *prot = PAGE_READ; 11624 break; 11625 case 6: 11626 *prot = PAGE_READ; 11627 break; 11628 default: 11629 /* Bad permission. */ 11630 fi->type = ARMFault_Permission; 11631 fi->level = 1; 11632 return true; 11633 } 11634 *prot |= PAGE_EXEC; 11635 return false; 11636 } 11637 11638 /* Combine either inner or outer cacheability attributes for normal 11639 * memory, according to table D4-42 and pseudocode procedure 11640 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM). 11641 * 11642 * NB: only stage 1 includes allocation hints (RW bits), leading to 11643 * some asymmetry. 11644 */ 11645 static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2) 11646 { 11647 if (s1 == 4 || s2 == 4) { 11648 /* non-cacheable has precedence */ 11649 return 4; 11650 } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) { 11651 /* stage 1 write-through takes precedence */ 11652 return s1; 11653 } else if (extract32(s2, 2, 2) == 2) { 11654 /* stage 2 write-through takes precedence, but the allocation hint 11655 * is still taken from stage 1 11656 */ 11657 return (2 << 2) | extract32(s1, 0, 2); 11658 } else { /* write-back */ 11659 return s1; 11660 } 11661 } 11662 11663 /* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4 11664 * and CombineS1S2Desc() 11665 * 11666 * @s1: Attributes from stage 1 walk 11667 * @s2: Attributes from stage 2 walk 11668 */ 11669 static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2) 11670 { 11671 uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4); 11672 uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4); 11673 ARMCacheAttrs ret; 11674 11675 /* Combine shareability attributes (table D4-43) */ 11676 if (s1.shareability == 2 || s2.shareability == 2) { 11677 /* if either are outer-shareable, the result is outer-shareable */ 11678 ret.shareability = 2; 11679 } else if (s1.shareability == 3 || s2.shareability == 3) { 11680 /* if either are inner-shareable, the result is inner-shareable */ 11681 ret.shareability = 3; 11682 } else { 11683 /* both non-shareable */ 11684 ret.shareability = 0; 11685 } 11686 11687 /* Combine memory type and cacheability attributes */ 11688 if (s1hi == 0 || s2hi == 0) { 11689 /* Device has precedence over normal */ 11690 if (s1lo == 0 || s2lo == 0) { 11691 /* nGnRnE has precedence over anything */ 11692 ret.attrs = 0; 11693 } else if (s1lo == 4 || s2lo == 4) { 11694 /* non-Reordering has precedence over Reordering */ 11695 ret.attrs = 4; /* nGnRE */ 11696 } else if (s1lo == 8 || s2lo == 8) { 11697 /* non-Gathering has precedence over Gathering */ 11698 ret.attrs = 8; /* nGRE */ 11699 } else { 11700 ret.attrs = 0xc; /* GRE */ 11701 } 11702 11703 /* Any location for which the resultant memory type is any 11704 * type of Device memory is always treated as Outer Shareable. 11705 */ 11706 ret.shareability = 2; 11707 } else { /* Normal memory */ 11708 /* Outer/inner cacheability combine independently */ 11709 ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4 11710 | combine_cacheattr_nibble(s1lo, s2lo); 11711 11712 if (ret.attrs == 0x44) { 11713 /* Any location for which the resultant memory type is Normal 11714 * Inner Non-cacheable, Outer Non-cacheable is always treated 11715 * as Outer Shareable. 11716 */ 11717 ret.shareability = 2; 11718 } 11719 } 11720 11721 return ret; 11722 } 11723 11724 11725 /* get_phys_addr - get the physical address for this virtual address 11726 * 11727 * Find the physical address corresponding to the given virtual address, 11728 * by doing a translation table walk on MMU based systems or using the 11729 * MPU state on MPU based systems. 11730 * 11731 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs, 11732 * prot and page_size may not be filled in, and the populated fsr value provides 11733 * information on why the translation aborted, in the format of a 11734 * DFSR/IFSR fault register, with the following caveats: 11735 * * we honour the short vs long DFSR format differences. 11736 * * the WnR bit is never set (the caller must do this). 11737 * * for PSMAv5 based systems we don't bother to return a full FSR format 11738 * value. 11739 * 11740 * @env: CPUARMState 11741 * @address: virtual address to get physical address for 11742 * @access_type: 0 for read, 1 for write, 2 for execute 11743 * @mmu_idx: MMU index indicating required translation regime 11744 * @phys_ptr: set to the physical address corresponding to the virtual address 11745 * @attrs: set to the memory transaction attributes to use 11746 * @prot: set to the permissions for the page containing phys_ptr 11747 * @page_size: set to the size of the page containing phys_ptr 11748 * @fi: set to fault info if the translation fails 11749 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes 11750 */ 11751 static bool get_phys_addr(CPUARMState *env, target_ulong address, 11752 MMUAccessType access_type, ARMMMUIdx mmu_idx, 11753 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 11754 target_ulong *page_size, 11755 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11756 { 11757 if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) { 11758 /* Call ourselves recursively to do the stage 1 and then stage 2 11759 * translations. 11760 */ 11761 if (arm_feature(env, ARM_FEATURE_EL2)) { 11762 hwaddr ipa; 11763 int s2_prot; 11764 int ret; 11765 ARMCacheAttrs cacheattrs2 = {}; 11766 11767 ret = get_phys_addr(env, address, access_type, 11768 stage_1_mmu_idx(mmu_idx), &ipa, attrs, 11769 prot, page_size, fi, cacheattrs); 11770 11771 /* If S1 fails or S2 is disabled, return early. */ 11772 if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) { 11773 *phys_ptr = ipa; 11774 return ret; 11775 } 11776 11777 /* S1 is done. Now do S2 translation. */ 11778 ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS, 11779 phys_ptr, attrs, &s2_prot, 11780 page_size, fi, 11781 cacheattrs != NULL ? &cacheattrs2 : NULL); 11782 fi->s2addr = ipa; 11783 /* Combine the S1 and S2 perms. */ 11784 *prot &= s2_prot; 11785 11786 /* Combine the S1 and S2 cache attributes, if needed */ 11787 if (!ret && cacheattrs != NULL) { 11788 if (env->cp15.hcr_el2 & HCR_DC) { 11789 /* 11790 * HCR.DC forces the first stage attributes to 11791 * Normal Non-Shareable, 11792 * Inner Write-Back Read-Allocate Write-Allocate, 11793 * Outer Write-Back Read-Allocate Write-Allocate. 11794 */ 11795 cacheattrs->attrs = 0xff; 11796 cacheattrs->shareability = 0; 11797 } 11798 *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2); 11799 } 11800 11801 return ret; 11802 } else { 11803 /* 11804 * For non-EL2 CPUs a stage1+stage2 translation is just stage 1. 11805 */ 11806 mmu_idx = stage_1_mmu_idx(mmu_idx); 11807 } 11808 } 11809 11810 /* The page table entries may downgrade secure to non-secure, but 11811 * cannot upgrade an non-secure translation regime's attributes 11812 * to secure. 11813 */ 11814 attrs->secure = regime_is_secure(env, mmu_idx); 11815 attrs->user = regime_is_user(env, mmu_idx); 11816 11817 /* Fast Context Switch Extension. This doesn't exist at all in v8. 11818 * In v7 and earlier it affects all stage 1 translations. 11819 */ 11820 if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS 11821 && !arm_feature(env, ARM_FEATURE_V8)) { 11822 if (regime_el(env, mmu_idx) == 3) { 11823 address += env->cp15.fcseidr_s; 11824 } else { 11825 address += env->cp15.fcseidr_ns; 11826 } 11827 } 11828 11829 if (arm_feature(env, ARM_FEATURE_PMSA)) { 11830 bool ret; 11831 *page_size = TARGET_PAGE_SIZE; 11832 11833 if (arm_feature(env, ARM_FEATURE_V8)) { 11834 /* PMSAv8 */ 11835 ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx, 11836 phys_ptr, attrs, prot, page_size, fi); 11837 } else if (arm_feature(env, ARM_FEATURE_V7)) { 11838 /* PMSAv7 */ 11839 ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx, 11840 phys_ptr, prot, page_size, fi); 11841 } else { 11842 /* Pre-v7 MPU */ 11843 ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx, 11844 phys_ptr, prot, fi); 11845 } 11846 qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32 11847 " mmu_idx %u -> %s (prot %c%c%c)\n", 11848 access_type == MMU_DATA_LOAD ? "reading" : 11849 (access_type == MMU_DATA_STORE ? "writing" : "execute"), 11850 (uint32_t)address, mmu_idx, 11851 ret ? "Miss" : "Hit", 11852 *prot & PAGE_READ ? 'r' : '-', 11853 *prot & PAGE_WRITE ? 'w' : '-', 11854 *prot & PAGE_EXEC ? 'x' : '-'); 11855 11856 return ret; 11857 } 11858 11859 /* Definitely a real MMU, not an MPU */ 11860 11861 if (regime_translation_disabled(env, mmu_idx)) { 11862 /* MMU disabled. */ 11863 *phys_ptr = address; 11864 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 11865 *page_size = TARGET_PAGE_SIZE; 11866 return 0; 11867 } 11868 11869 if (regime_using_lpae_format(env, mmu_idx)) { 11870 return get_phys_addr_lpae(env, address, access_type, mmu_idx, 11871 phys_ptr, attrs, prot, page_size, 11872 fi, cacheattrs); 11873 } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) { 11874 return get_phys_addr_v6(env, address, access_type, mmu_idx, 11875 phys_ptr, attrs, prot, page_size, fi); 11876 } else { 11877 return get_phys_addr_v5(env, address, access_type, mmu_idx, 11878 phys_ptr, prot, page_size, fi); 11879 } 11880 } 11881 11882 /* Walk the page table and (if the mapping exists) add the page 11883 * to the TLB. Return false on success, or true on failure. Populate 11884 * fsr with ARM DFSR/IFSR fault register format value on failure. 11885 */ 11886 bool arm_tlb_fill(CPUState *cs, vaddr address, 11887 MMUAccessType access_type, int mmu_idx, 11888 ARMMMUFaultInfo *fi) 11889 { 11890 ARMCPU *cpu = ARM_CPU(cs); 11891 CPUARMState *env = &cpu->env; 11892 hwaddr phys_addr; 11893 target_ulong page_size; 11894 int prot; 11895 int ret; 11896 MemTxAttrs attrs = {}; 11897 11898 ret = get_phys_addr(env, address, access_type, 11899 core_to_arm_mmu_idx(env, mmu_idx), &phys_addr, 11900 &attrs, &prot, &page_size, fi, NULL); 11901 if (!ret) { 11902 /* 11903 * Map a single [sub]page. Regions smaller than our declared 11904 * target page size are handled specially, so for those we 11905 * pass in the exact addresses. 11906 */ 11907 if (page_size >= TARGET_PAGE_SIZE) { 11908 phys_addr &= TARGET_PAGE_MASK; 11909 address &= TARGET_PAGE_MASK; 11910 } 11911 tlb_set_page_with_attrs(cs, address, phys_addr, attrs, 11912 prot, mmu_idx, page_size); 11913 return 0; 11914 } 11915 11916 return ret; 11917 } 11918 11919 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, 11920 MemTxAttrs *attrs) 11921 { 11922 ARMCPU *cpu = ARM_CPU(cs); 11923 CPUARMState *env = &cpu->env; 11924 hwaddr phys_addr; 11925 target_ulong page_size; 11926 int prot; 11927 bool ret; 11928 ARMMMUFaultInfo fi = {}; 11929 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 11930 11931 *attrs = (MemTxAttrs) {}; 11932 11933 ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr, 11934 attrs, &prot, &page_size, &fi, NULL); 11935 11936 if (ret) { 11937 return -1; 11938 } 11939 return phys_addr; 11940 } 11941 11942 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 11943 { 11944 uint32_t mask; 11945 unsigned el = arm_current_el(env); 11946 11947 /* First handle registers which unprivileged can read */ 11948 11949 switch (reg) { 11950 case 0 ... 7: /* xPSR sub-fields */ 11951 mask = 0; 11952 if ((reg & 1) && el) { 11953 mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */ 11954 } 11955 if (!(reg & 4)) { 11956 mask |= XPSR_NZCV | XPSR_Q; /* APSR */ 11957 } 11958 /* EPSR reads as zero */ 11959 return xpsr_read(env) & mask; 11960 break; 11961 case 20: /* CONTROL */ 11962 return env->v7m.control[env->v7m.secure]; 11963 case 0x94: /* CONTROL_NS */ 11964 /* We have to handle this here because unprivileged Secure code 11965 * can read the NS CONTROL register. 11966 */ 11967 if (!env->v7m.secure) { 11968 return 0; 11969 } 11970 return env->v7m.control[M_REG_NS]; 11971 } 11972 11973 if (el == 0) { 11974 return 0; /* unprivileged reads others as zero */ 11975 } 11976 11977 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 11978 switch (reg) { 11979 case 0x88: /* MSP_NS */ 11980 if (!env->v7m.secure) { 11981 return 0; 11982 } 11983 return env->v7m.other_ss_msp; 11984 case 0x89: /* PSP_NS */ 11985 if (!env->v7m.secure) { 11986 return 0; 11987 } 11988 return env->v7m.other_ss_psp; 11989 case 0x8a: /* MSPLIM_NS */ 11990 if (!env->v7m.secure) { 11991 return 0; 11992 } 11993 return env->v7m.msplim[M_REG_NS]; 11994 case 0x8b: /* PSPLIM_NS */ 11995 if (!env->v7m.secure) { 11996 return 0; 11997 } 11998 return env->v7m.psplim[M_REG_NS]; 11999 case 0x90: /* PRIMASK_NS */ 12000 if (!env->v7m.secure) { 12001 return 0; 12002 } 12003 return env->v7m.primask[M_REG_NS]; 12004 case 0x91: /* BASEPRI_NS */ 12005 if (!env->v7m.secure) { 12006 return 0; 12007 } 12008 return env->v7m.basepri[M_REG_NS]; 12009 case 0x93: /* FAULTMASK_NS */ 12010 if (!env->v7m.secure) { 12011 return 0; 12012 } 12013 return env->v7m.faultmask[M_REG_NS]; 12014 case 0x98: /* SP_NS */ 12015 { 12016 /* This gives the non-secure SP selected based on whether we're 12017 * currently in handler mode or not, using the NS CONTROL.SPSEL. 12018 */ 12019 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 12020 12021 if (!env->v7m.secure) { 12022 return 0; 12023 } 12024 if (!arm_v7m_is_handler_mode(env) && spsel) { 12025 return env->v7m.other_ss_psp; 12026 } else { 12027 return env->v7m.other_ss_msp; 12028 } 12029 } 12030 default: 12031 break; 12032 } 12033 } 12034 12035 switch (reg) { 12036 case 8: /* MSP */ 12037 return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13]; 12038 case 9: /* PSP */ 12039 return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp; 12040 case 10: /* MSPLIM */ 12041 if (!arm_feature(env, ARM_FEATURE_V8)) { 12042 goto bad_reg; 12043 } 12044 return env->v7m.msplim[env->v7m.secure]; 12045 case 11: /* PSPLIM */ 12046 if (!arm_feature(env, ARM_FEATURE_V8)) { 12047 goto bad_reg; 12048 } 12049 return env->v7m.psplim[env->v7m.secure]; 12050 case 16: /* PRIMASK */ 12051 return env->v7m.primask[env->v7m.secure]; 12052 case 17: /* BASEPRI */ 12053 case 18: /* BASEPRI_MAX */ 12054 return env->v7m.basepri[env->v7m.secure]; 12055 case 19: /* FAULTMASK */ 12056 return env->v7m.faultmask[env->v7m.secure]; 12057 default: 12058 bad_reg: 12059 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special" 12060 " register %d\n", reg); 12061 return 0; 12062 } 12063 } 12064 12065 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val) 12066 { 12067 /* We're passed bits [11..0] of the instruction; extract 12068 * SYSm and the mask bits. 12069 * Invalid combinations of SYSm and mask are UNPREDICTABLE; 12070 * we choose to treat them as if the mask bits were valid. 12071 * NB that the pseudocode 'mask' variable is bits [11..10], 12072 * whereas ours is [11..8]. 12073 */ 12074 uint32_t mask = extract32(maskreg, 8, 4); 12075 uint32_t reg = extract32(maskreg, 0, 8); 12076 12077 if (arm_current_el(env) == 0 && reg > 7) { 12078 /* only xPSR sub-fields may be written by unprivileged */ 12079 return; 12080 } 12081 12082 if (arm_feature(env, ARM_FEATURE_M_SECURITY)) { 12083 switch (reg) { 12084 case 0x88: /* MSP_NS */ 12085 if (!env->v7m.secure) { 12086 return; 12087 } 12088 env->v7m.other_ss_msp = val; 12089 return; 12090 case 0x89: /* PSP_NS */ 12091 if (!env->v7m.secure) { 12092 return; 12093 } 12094 env->v7m.other_ss_psp = val; 12095 return; 12096 case 0x8a: /* MSPLIM_NS */ 12097 if (!env->v7m.secure) { 12098 return; 12099 } 12100 env->v7m.msplim[M_REG_NS] = val & ~7; 12101 return; 12102 case 0x8b: /* PSPLIM_NS */ 12103 if (!env->v7m.secure) { 12104 return; 12105 } 12106 env->v7m.psplim[M_REG_NS] = val & ~7; 12107 return; 12108 case 0x90: /* PRIMASK_NS */ 12109 if (!env->v7m.secure) { 12110 return; 12111 } 12112 env->v7m.primask[M_REG_NS] = val & 1; 12113 return; 12114 case 0x91: /* BASEPRI_NS */ 12115 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { 12116 return; 12117 } 12118 env->v7m.basepri[M_REG_NS] = val & 0xff; 12119 return; 12120 case 0x93: /* FAULTMASK_NS */ 12121 if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) { 12122 return; 12123 } 12124 env->v7m.faultmask[M_REG_NS] = val & 1; 12125 return; 12126 case 0x94: /* CONTROL_NS */ 12127 if (!env->v7m.secure) { 12128 return; 12129 } 12130 write_v7m_control_spsel_for_secstate(env, 12131 val & R_V7M_CONTROL_SPSEL_MASK, 12132 M_REG_NS); 12133 if (arm_feature(env, ARM_FEATURE_M_MAIN)) { 12134 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK; 12135 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK; 12136 } 12137 return; 12138 case 0x98: /* SP_NS */ 12139 { 12140 /* This gives the non-secure SP selected based on whether we're 12141 * currently in handler mode or not, using the NS CONTROL.SPSEL. 12142 */ 12143 bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK; 12144 bool is_psp = !arm_v7m_is_handler_mode(env) && spsel; 12145 uint32_t limit; 12146 12147 if (!env->v7m.secure) { 12148 return; 12149 } 12150 12151 limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false]; 12152 12153 if (val < limit) { 12154 CPUState *cs = CPU(arm_env_get_cpu(env)); 12155 12156 cpu_restore_state(cs, GETPC(), true); 12157 raise_exception(env, EXCP_STKOF, 0, 1); 12158 } 12159 12160 if (is_psp) { 12161 env->v7m.other_ss_psp = val; 12162 } else { 12163 env->v7m.other_ss_msp = val; 12164 } 12165 return; 12166 } 12167 default: 12168 break; 12169 } 12170 } 12171 12172 switch (reg) { 12173 case 0 ... 7: /* xPSR sub-fields */ 12174 /* only APSR is actually writable */ 12175 if (!(reg & 4)) { 12176 uint32_t apsrmask = 0; 12177 12178 if (mask & 8) { 12179 apsrmask |= XPSR_NZCV | XPSR_Q; 12180 } 12181 if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) { 12182 apsrmask |= XPSR_GE; 12183 } 12184 xpsr_write(env, val, apsrmask); 12185 } 12186 break; 12187 case 8: /* MSP */ 12188 if (v7m_using_psp(env)) { 12189 env->v7m.other_sp = val; 12190 } else { 12191 env->regs[13] = val; 12192 } 12193 break; 12194 case 9: /* PSP */ 12195 if (v7m_using_psp(env)) { 12196 env->regs[13] = val; 12197 } else { 12198 env->v7m.other_sp = val; 12199 } 12200 break; 12201 case 10: /* MSPLIM */ 12202 if (!arm_feature(env, ARM_FEATURE_V8)) { 12203 goto bad_reg; 12204 } 12205 env->v7m.msplim[env->v7m.secure] = val & ~7; 12206 break; 12207 case 11: /* PSPLIM */ 12208 if (!arm_feature(env, ARM_FEATURE_V8)) { 12209 goto bad_reg; 12210 } 12211 env->v7m.psplim[env->v7m.secure] = val & ~7; 12212 break; 12213 case 16: /* PRIMASK */ 12214 env->v7m.primask[env->v7m.secure] = val & 1; 12215 break; 12216 case 17: /* BASEPRI */ 12217 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 12218 goto bad_reg; 12219 } 12220 env->v7m.basepri[env->v7m.secure] = val & 0xff; 12221 break; 12222 case 18: /* BASEPRI_MAX */ 12223 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 12224 goto bad_reg; 12225 } 12226 val &= 0xff; 12227 if (val != 0 && (val < env->v7m.basepri[env->v7m.secure] 12228 || env->v7m.basepri[env->v7m.secure] == 0)) { 12229 env->v7m.basepri[env->v7m.secure] = val; 12230 } 12231 break; 12232 case 19: /* FAULTMASK */ 12233 if (!arm_feature(env, ARM_FEATURE_M_MAIN)) { 12234 goto bad_reg; 12235 } 12236 env->v7m.faultmask[env->v7m.secure] = val & 1; 12237 break; 12238 case 20: /* CONTROL */ 12239 /* Writing to the SPSEL bit only has an effect if we are in 12240 * thread mode; other bits can be updated by any privileged code. 12241 * write_v7m_control_spsel() deals with updating the SPSEL bit in 12242 * env->v7m.control, so we only need update the others. 12243 * For v7M, we must just ignore explicit writes to SPSEL in handler 12244 * mode; for v8M the write is permitted but will have no effect. 12245 */ 12246 if (arm_feature(env, ARM_FEATURE_V8) || 12247 !arm_v7m_is_handler_mode(env)) { 12248 write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0); 12249 } 12250 if (arm_feature(env, ARM_FEATURE_M_MAIN)) { 12251 env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK; 12252 env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK; 12253 } 12254 break; 12255 default: 12256 bad_reg: 12257 qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special" 12258 " register %d\n", reg); 12259 return; 12260 } 12261 } 12262 12263 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) 12264 { 12265 /* Implement the TT instruction. op is bits [7:6] of the insn. */ 12266 bool forceunpriv = op & 1; 12267 bool alt = op & 2; 12268 V8M_SAttributes sattrs = {}; 12269 uint32_t tt_resp; 12270 bool r, rw, nsr, nsrw, mrvalid; 12271 int prot; 12272 ARMMMUFaultInfo fi = {}; 12273 MemTxAttrs attrs = {}; 12274 hwaddr phys_addr; 12275 ARMMMUIdx mmu_idx; 12276 uint32_t mregion; 12277 bool targetpriv; 12278 bool targetsec = env->v7m.secure; 12279 bool is_subpage; 12280 12281 /* Work out what the security state and privilege level we're 12282 * interested in is... 12283 */ 12284 if (alt) { 12285 targetsec = !targetsec; 12286 } 12287 12288 if (forceunpriv) { 12289 targetpriv = false; 12290 } else { 12291 targetpriv = arm_v7m_is_handler_mode(env) || 12292 !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK); 12293 } 12294 12295 /* ...and then figure out which MMU index this is */ 12296 mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv); 12297 12298 /* We know that the MPU and SAU don't care about the access type 12299 * for our purposes beyond that we don't want to claim to be 12300 * an insn fetch, so we arbitrarily call this a read. 12301 */ 12302 12303 /* MPU region info only available for privileged or if 12304 * inspecting the other MPU state. 12305 */ 12306 if (arm_current_el(env) != 0 || alt) { 12307 /* We can ignore the return value as prot is always set */ 12308 pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, 12309 &phys_addr, &attrs, &prot, &is_subpage, 12310 &fi, &mregion); 12311 if (mregion == -1) { 12312 mrvalid = false; 12313 mregion = 0; 12314 } else { 12315 mrvalid = true; 12316 } 12317 r = prot & PAGE_READ; 12318 rw = prot & PAGE_WRITE; 12319 } else { 12320 r = false; 12321 rw = false; 12322 mrvalid = false; 12323 mregion = 0; 12324 } 12325 12326 if (env->v7m.secure) { 12327 v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs); 12328 nsr = sattrs.ns && r; 12329 nsrw = sattrs.ns && rw; 12330 } else { 12331 sattrs.ns = true; 12332 nsr = false; 12333 nsrw = false; 12334 } 12335 12336 tt_resp = (sattrs.iregion << 24) | 12337 (sattrs.irvalid << 23) | 12338 ((!sattrs.ns) << 22) | 12339 (nsrw << 21) | 12340 (nsr << 20) | 12341 (rw << 19) | 12342 (r << 18) | 12343 (sattrs.srvalid << 17) | 12344 (mrvalid << 16) | 12345 (sattrs.sregion << 8) | 12346 mregion; 12347 12348 return tt_resp; 12349 } 12350 12351 #endif 12352 12353 void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) 12354 { 12355 /* Implement DC ZVA, which zeroes a fixed-length block of memory. 12356 * Note that we do not implement the (architecturally mandated) 12357 * alignment fault for attempts to use this on Device memory 12358 * (which matches the usual QEMU behaviour of not implementing either 12359 * alignment faults or any memory attribute handling). 12360 */ 12361 12362 ARMCPU *cpu = arm_env_get_cpu(env); 12363 uint64_t blocklen = 4 << cpu->dcz_blocksize; 12364 uint64_t vaddr = vaddr_in & ~(blocklen - 1); 12365 12366 #ifndef CONFIG_USER_ONLY 12367 { 12368 /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than 12369 * the block size so we might have to do more than one TLB lookup. 12370 * We know that in fact for any v8 CPU the page size is at least 4K 12371 * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only 12372 * 1K as an artefact of legacy v5 subpage support being present in the 12373 * same QEMU executable. 12374 */ 12375 int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); 12376 void *hostaddr[maxidx]; 12377 int try, i; 12378 unsigned mmu_idx = cpu_mmu_index(env, false); 12379 TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); 12380 12381 for (try = 0; try < 2; try++) { 12382 12383 for (i = 0; i < maxidx; i++) { 12384 hostaddr[i] = tlb_vaddr_to_host(env, 12385 vaddr + TARGET_PAGE_SIZE * i, 12386 1, mmu_idx); 12387 if (!hostaddr[i]) { 12388 break; 12389 } 12390 } 12391 if (i == maxidx) { 12392 /* If it's all in the TLB it's fair game for just writing to; 12393 * we know we don't need to update dirty status, etc. 12394 */ 12395 for (i = 0; i < maxidx - 1; i++) { 12396 memset(hostaddr[i], 0, TARGET_PAGE_SIZE); 12397 } 12398 memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); 12399 return; 12400 } 12401 /* OK, try a store and see if we can populate the tlb. This 12402 * might cause an exception if the memory isn't writable, 12403 * in which case we will longjmp out of here. We must for 12404 * this purpose use the actual register value passed to us 12405 * so that we get the fault address right. 12406 */ 12407 helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); 12408 /* Now we can populate the other TLB entries, if any */ 12409 for (i = 0; i < maxidx; i++) { 12410 uint64_t va = vaddr + TARGET_PAGE_SIZE * i; 12411 if (va != (vaddr_in & TARGET_PAGE_MASK)) { 12412 helper_ret_stb_mmu(env, va, 0, oi, GETPC()); 12413 } 12414 } 12415 } 12416 12417 /* Slow path (probably attempt to do this to an I/O device or 12418 * similar, or clearing of a block of code we have translations 12419 * cached for). Just do a series of byte writes as the architecture 12420 * demands. It's not worth trying to use a cpu_physical_memory_map(), 12421 * memset(), unmap() sequence here because: 12422 * + we'd need to account for the blocksize being larger than a page 12423 * + the direct-RAM access case is almost always going to be dealt 12424 * with in the fastpath code above, so there's no speed benefit 12425 * + we would have to deal with the map returning NULL because the 12426 * bounce buffer was in use 12427 */ 12428 for (i = 0; i < blocklen; i++) { 12429 helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); 12430 } 12431 } 12432 #else 12433 memset(g2h(vaddr), 0, blocklen); 12434 #endif 12435 } 12436 12437 /* Note that signed overflow is undefined in C. The following routines are 12438 careful to use unsigned types where modulo arithmetic is required. 12439 Failure to do so _will_ break on newer gcc. */ 12440 12441 /* Signed saturating arithmetic. */ 12442 12443 /* Perform 16-bit signed saturating addition. */ 12444 static inline uint16_t add16_sat(uint16_t a, uint16_t b) 12445 { 12446 uint16_t res; 12447 12448 res = a + b; 12449 if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { 12450 if (a & 0x8000) 12451 res = 0x8000; 12452 else 12453 res = 0x7fff; 12454 } 12455 return res; 12456 } 12457 12458 /* Perform 8-bit signed saturating addition. */ 12459 static inline uint8_t add8_sat(uint8_t a, uint8_t b) 12460 { 12461 uint8_t res; 12462 12463 res = a + b; 12464 if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { 12465 if (a & 0x80) 12466 res = 0x80; 12467 else 12468 res = 0x7f; 12469 } 12470 return res; 12471 } 12472 12473 /* Perform 16-bit signed saturating subtraction. */ 12474 static inline uint16_t sub16_sat(uint16_t a, uint16_t b) 12475 { 12476 uint16_t res; 12477 12478 res = a - b; 12479 if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { 12480 if (a & 0x8000) 12481 res = 0x8000; 12482 else 12483 res = 0x7fff; 12484 } 12485 return res; 12486 } 12487 12488 /* Perform 8-bit signed saturating subtraction. */ 12489 static inline uint8_t sub8_sat(uint8_t a, uint8_t b) 12490 { 12491 uint8_t res; 12492 12493 res = a - b; 12494 if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { 12495 if (a & 0x80) 12496 res = 0x80; 12497 else 12498 res = 0x7f; 12499 } 12500 return res; 12501 } 12502 12503 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); 12504 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); 12505 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); 12506 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); 12507 #define PFX q 12508 12509 #include "op_addsub.h" 12510 12511 /* Unsigned saturating arithmetic. */ 12512 static inline uint16_t add16_usat(uint16_t a, uint16_t b) 12513 { 12514 uint16_t res; 12515 res = a + b; 12516 if (res < a) 12517 res = 0xffff; 12518 return res; 12519 } 12520 12521 static inline uint16_t sub16_usat(uint16_t a, uint16_t b) 12522 { 12523 if (a > b) 12524 return a - b; 12525 else 12526 return 0; 12527 } 12528 12529 static inline uint8_t add8_usat(uint8_t a, uint8_t b) 12530 { 12531 uint8_t res; 12532 res = a + b; 12533 if (res < a) 12534 res = 0xff; 12535 return res; 12536 } 12537 12538 static inline uint8_t sub8_usat(uint8_t a, uint8_t b) 12539 { 12540 if (a > b) 12541 return a - b; 12542 else 12543 return 0; 12544 } 12545 12546 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); 12547 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); 12548 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); 12549 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); 12550 #define PFX uq 12551 12552 #include "op_addsub.h" 12553 12554 /* Signed modulo arithmetic. */ 12555 #define SARITH16(a, b, n, op) do { \ 12556 int32_t sum; \ 12557 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ 12558 RESULT(sum, n, 16); \ 12559 if (sum >= 0) \ 12560 ge |= 3 << (n * 2); \ 12561 } while(0) 12562 12563 #define SARITH8(a, b, n, op) do { \ 12564 int32_t sum; \ 12565 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ 12566 RESULT(sum, n, 8); \ 12567 if (sum >= 0) \ 12568 ge |= 1 << n; \ 12569 } while(0) 12570 12571 12572 #define ADD16(a, b, n) SARITH16(a, b, n, +) 12573 #define SUB16(a, b, n) SARITH16(a, b, n, -) 12574 #define ADD8(a, b, n) SARITH8(a, b, n, +) 12575 #define SUB8(a, b, n) SARITH8(a, b, n, -) 12576 #define PFX s 12577 #define ARITH_GE 12578 12579 #include "op_addsub.h" 12580 12581 /* Unsigned modulo arithmetic. */ 12582 #define ADD16(a, b, n) do { \ 12583 uint32_t sum; \ 12584 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ 12585 RESULT(sum, n, 16); \ 12586 if ((sum >> 16) == 1) \ 12587 ge |= 3 << (n * 2); \ 12588 } while(0) 12589 12590 #define ADD8(a, b, n) do { \ 12591 uint32_t sum; \ 12592 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ 12593 RESULT(sum, n, 8); \ 12594 if ((sum >> 8) == 1) \ 12595 ge |= 1 << n; \ 12596 } while(0) 12597 12598 #define SUB16(a, b, n) do { \ 12599 uint32_t sum; \ 12600 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ 12601 RESULT(sum, n, 16); \ 12602 if ((sum >> 16) == 0) \ 12603 ge |= 3 << (n * 2); \ 12604 } while(0) 12605 12606 #define SUB8(a, b, n) do { \ 12607 uint32_t sum; \ 12608 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ 12609 RESULT(sum, n, 8); \ 12610 if ((sum >> 8) == 0) \ 12611 ge |= 1 << n; \ 12612 } while(0) 12613 12614 #define PFX u 12615 #define ARITH_GE 12616 12617 #include "op_addsub.h" 12618 12619 /* Halved signed arithmetic. */ 12620 #define ADD16(a, b, n) \ 12621 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) 12622 #define SUB16(a, b, n) \ 12623 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) 12624 #define ADD8(a, b, n) \ 12625 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) 12626 #define SUB8(a, b, n) \ 12627 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) 12628 #define PFX sh 12629 12630 #include "op_addsub.h" 12631 12632 /* Halved unsigned arithmetic. */ 12633 #define ADD16(a, b, n) \ 12634 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) 12635 #define SUB16(a, b, n) \ 12636 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) 12637 #define ADD8(a, b, n) \ 12638 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) 12639 #define SUB8(a, b, n) \ 12640 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) 12641 #define PFX uh 12642 12643 #include "op_addsub.h" 12644 12645 static inline uint8_t do_usad(uint8_t a, uint8_t b) 12646 { 12647 if (a > b) 12648 return a - b; 12649 else 12650 return b - a; 12651 } 12652 12653 /* Unsigned sum of absolute byte differences. */ 12654 uint32_t HELPER(usad8)(uint32_t a, uint32_t b) 12655 { 12656 uint32_t sum; 12657 sum = do_usad(a, b); 12658 sum += do_usad(a >> 8, b >> 8); 12659 sum += do_usad(a >> 16, b >>16); 12660 sum += do_usad(a >> 24, b >> 24); 12661 return sum; 12662 } 12663 12664 /* For ARMv6 SEL instruction. */ 12665 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) 12666 { 12667 uint32_t mask; 12668 12669 mask = 0; 12670 if (flags & 1) 12671 mask |= 0xff; 12672 if (flags & 2) 12673 mask |= 0xff00; 12674 if (flags & 4) 12675 mask |= 0xff0000; 12676 if (flags & 8) 12677 mask |= 0xff000000; 12678 return (a & mask) | (b & ~mask); 12679 } 12680 12681 /* VFP support. We follow the convention used for VFP instructions: 12682 Single precision routines have a "s" suffix, double precision a 12683 "d" suffix. */ 12684 12685 /* Convert host exception flags to vfp form. */ 12686 static inline int vfp_exceptbits_from_host(int host_bits) 12687 { 12688 int target_bits = 0; 12689 12690 if (host_bits & float_flag_invalid) 12691 target_bits |= 1; 12692 if (host_bits & float_flag_divbyzero) 12693 target_bits |= 2; 12694 if (host_bits & float_flag_overflow) 12695 target_bits |= 4; 12696 if (host_bits & (float_flag_underflow | float_flag_output_denormal)) 12697 target_bits |= 8; 12698 if (host_bits & float_flag_inexact) 12699 target_bits |= 0x10; 12700 if (host_bits & float_flag_input_denormal) 12701 target_bits |= 0x80; 12702 return target_bits; 12703 } 12704 12705 uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env) 12706 { 12707 uint32_t i, fpscr; 12708 12709 fpscr = env->vfp.xregs[ARM_VFP_FPSCR] 12710 | (env->vfp.vec_len << 16) 12711 | (env->vfp.vec_stride << 20); 12712 12713 i = get_float_exception_flags(&env->vfp.fp_status); 12714 i |= get_float_exception_flags(&env->vfp.standard_fp_status); 12715 /* FZ16 does not generate an input denormal exception. */ 12716 i |= (get_float_exception_flags(&env->vfp.fp_status_f16) 12717 & ~float_flag_input_denormal); 12718 fpscr |= vfp_exceptbits_from_host(i); 12719 12720 i = env->vfp.qc[0] | env->vfp.qc[1] | env->vfp.qc[2] | env->vfp.qc[3]; 12721 fpscr |= i ? FPCR_QC : 0; 12722 12723 return fpscr; 12724 } 12725 12726 uint32_t vfp_get_fpscr(CPUARMState *env) 12727 { 12728 return HELPER(vfp_get_fpscr)(env); 12729 } 12730 12731 /* Convert vfp exception flags to target form. */ 12732 static inline int vfp_exceptbits_to_host(int target_bits) 12733 { 12734 int host_bits = 0; 12735 12736 if (target_bits & 1) 12737 host_bits |= float_flag_invalid; 12738 if (target_bits & 2) 12739 host_bits |= float_flag_divbyzero; 12740 if (target_bits & 4) 12741 host_bits |= float_flag_overflow; 12742 if (target_bits & 8) 12743 host_bits |= float_flag_underflow; 12744 if (target_bits & 0x10) 12745 host_bits |= float_flag_inexact; 12746 if (target_bits & 0x80) 12747 host_bits |= float_flag_input_denormal; 12748 return host_bits; 12749 } 12750 12751 void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) 12752 { 12753 int i; 12754 uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR]; 12755 12756 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ 12757 if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) { 12758 val &= ~FPCR_FZ16; 12759 } 12760 12761 /* 12762 * We don't implement trapped exception handling, so the 12763 * trap enable bits, IDE|IXE|UFE|OFE|DZE|IOE are all RAZ/WI (not RES0!) 12764 * 12765 * If we exclude the exception flags, IOC|DZC|OFC|UFC|IXC|IDC 12766 * (which are stored in fp_status), and the other RES0 bits 12767 * in between, then we clear all of the low 16 bits. 12768 */ 12769 env->vfp.xregs[ARM_VFP_FPSCR] = val & 0xf7c80000; 12770 env->vfp.vec_len = (val >> 16) & 7; 12771 env->vfp.vec_stride = (val >> 20) & 3; 12772 12773 /* 12774 * The bit we set within fpscr_q is arbitrary; the register as a 12775 * whole being zero/non-zero is what counts. 12776 */ 12777 env->vfp.qc[0] = val & FPCR_QC; 12778 env->vfp.qc[1] = 0; 12779 env->vfp.qc[2] = 0; 12780 env->vfp.qc[3] = 0; 12781 12782 changed ^= val; 12783 if (changed & (3 << 22)) { 12784 i = (val >> 22) & 3; 12785 switch (i) { 12786 case FPROUNDING_TIEEVEN: 12787 i = float_round_nearest_even; 12788 break; 12789 case FPROUNDING_POSINF: 12790 i = float_round_up; 12791 break; 12792 case FPROUNDING_NEGINF: 12793 i = float_round_down; 12794 break; 12795 case FPROUNDING_ZERO: 12796 i = float_round_to_zero; 12797 break; 12798 } 12799 set_float_rounding_mode(i, &env->vfp.fp_status); 12800 set_float_rounding_mode(i, &env->vfp.fp_status_f16); 12801 } 12802 if (changed & FPCR_FZ16) { 12803 bool ftz_enabled = val & FPCR_FZ16; 12804 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 12805 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status_f16); 12806 } 12807 if (changed & FPCR_FZ) { 12808 bool ftz_enabled = val & FPCR_FZ; 12809 set_flush_to_zero(ftz_enabled, &env->vfp.fp_status); 12810 set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status); 12811 } 12812 if (changed & FPCR_DN) { 12813 bool dnan_enabled = val & FPCR_DN; 12814 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status); 12815 set_default_nan_mode(dnan_enabled, &env->vfp.fp_status_f16); 12816 } 12817 12818 /* The exception flags are ORed together when we read fpscr so we 12819 * only need to preserve the current state in one of our 12820 * float_status values. 12821 */ 12822 i = vfp_exceptbits_to_host(val); 12823 set_float_exception_flags(i, &env->vfp.fp_status); 12824 set_float_exception_flags(0, &env->vfp.fp_status_f16); 12825 set_float_exception_flags(0, &env->vfp.standard_fp_status); 12826 } 12827 12828 void vfp_set_fpscr(CPUARMState *env, uint32_t val) 12829 { 12830 HELPER(vfp_set_fpscr)(env, val); 12831 } 12832 12833 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p)) 12834 12835 #define VFP_BINOP(name) \ 12836 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \ 12837 { \ 12838 float_status *fpst = fpstp; \ 12839 return float32_ ## name(a, b, fpst); \ 12840 } \ 12841 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \ 12842 { \ 12843 float_status *fpst = fpstp; \ 12844 return float64_ ## name(a, b, fpst); \ 12845 } 12846 VFP_BINOP(add) 12847 VFP_BINOP(sub) 12848 VFP_BINOP(mul) 12849 VFP_BINOP(div) 12850 VFP_BINOP(min) 12851 VFP_BINOP(max) 12852 VFP_BINOP(minnum) 12853 VFP_BINOP(maxnum) 12854 #undef VFP_BINOP 12855 12856 float32 VFP_HELPER(neg, s)(float32 a) 12857 { 12858 return float32_chs(a); 12859 } 12860 12861 float64 VFP_HELPER(neg, d)(float64 a) 12862 { 12863 return float64_chs(a); 12864 } 12865 12866 float32 VFP_HELPER(abs, s)(float32 a) 12867 { 12868 return float32_abs(a); 12869 } 12870 12871 float64 VFP_HELPER(abs, d)(float64 a) 12872 { 12873 return float64_abs(a); 12874 } 12875 12876 float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env) 12877 { 12878 return float32_sqrt(a, &env->vfp.fp_status); 12879 } 12880 12881 float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env) 12882 { 12883 return float64_sqrt(a, &env->vfp.fp_status); 12884 } 12885 12886 static void softfloat_to_vfp_compare(CPUARMState *env, int cmp) 12887 { 12888 uint32_t flags; 12889 switch (cmp) { 12890 case float_relation_equal: 12891 flags = 0x6; 12892 break; 12893 case float_relation_less: 12894 flags = 0x8; 12895 break; 12896 case float_relation_greater: 12897 flags = 0x2; 12898 break; 12899 case float_relation_unordered: 12900 flags = 0x3; 12901 break; 12902 default: 12903 g_assert_not_reached(); 12904 } 12905 env->vfp.xregs[ARM_VFP_FPSCR] = 12906 deposit32(env->vfp.xregs[ARM_VFP_FPSCR], 28, 4, flags); 12907 } 12908 12909 /* XXX: check quiet/signaling case */ 12910 #define DO_VFP_cmp(p, type) \ 12911 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \ 12912 { \ 12913 softfloat_to_vfp_compare(env, \ 12914 type ## _compare_quiet(a, b, &env->vfp.fp_status)); \ 12915 } \ 12916 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \ 12917 { \ 12918 softfloat_to_vfp_compare(env, \ 12919 type ## _compare(a, b, &env->vfp.fp_status)); \ 12920 } 12921 DO_VFP_cmp(s, float32) 12922 DO_VFP_cmp(d, float64) 12923 #undef DO_VFP_cmp 12924 12925 /* Integer to float and float to integer conversions */ 12926 12927 #define CONV_ITOF(name, ftype, fsz, sign) \ 12928 ftype HELPER(name)(uint32_t x, void *fpstp) \ 12929 { \ 12930 float_status *fpst = fpstp; \ 12931 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \ 12932 } 12933 12934 #define CONV_FTOI(name, ftype, fsz, sign, round) \ 12935 sign##int32_t HELPER(name)(ftype x, void *fpstp) \ 12936 { \ 12937 float_status *fpst = fpstp; \ 12938 if (float##fsz##_is_any_nan(x)) { \ 12939 float_raise(float_flag_invalid, fpst); \ 12940 return 0; \ 12941 } \ 12942 return float##fsz##_to_##sign##int32##round(x, fpst); \ 12943 } 12944 12945 #define FLOAT_CONVS(name, p, ftype, fsz, sign) \ 12946 CONV_ITOF(vfp_##name##to##p, ftype, fsz, sign) \ 12947 CONV_FTOI(vfp_to##name##p, ftype, fsz, sign, ) \ 12948 CONV_FTOI(vfp_to##name##z##p, ftype, fsz, sign, _round_to_zero) 12949 12950 FLOAT_CONVS(si, h, uint32_t, 16, ) 12951 FLOAT_CONVS(si, s, float32, 32, ) 12952 FLOAT_CONVS(si, d, float64, 64, ) 12953 FLOAT_CONVS(ui, h, uint32_t, 16, u) 12954 FLOAT_CONVS(ui, s, float32, 32, u) 12955 FLOAT_CONVS(ui, d, float64, 64, u) 12956 12957 #undef CONV_ITOF 12958 #undef CONV_FTOI 12959 #undef FLOAT_CONVS 12960 12961 /* floating point conversion */ 12962 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env) 12963 { 12964 return float32_to_float64(x, &env->vfp.fp_status); 12965 } 12966 12967 float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env) 12968 { 12969 return float64_to_float32(x, &env->vfp.fp_status); 12970 } 12971 12972 /* VFP3 fixed point conversion. */ 12973 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 12974 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \ 12975 void *fpstp) \ 12976 { return itype##_to_##float##fsz##_scalbn(x, -shift, fpstp); } 12977 12978 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, ROUND, suff) \ 12979 uint##isz##_t HELPER(vfp_to##name##p##suff)(float##fsz x, uint32_t shift, \ 12980 void *fpst) \ 12981 { \ 12982 if (unlikely(float##fsz##_is_any_nan(x))) { \ 12983 float_raise(float_flag_invalid, fpst); \ 12984 return 0; \ 12985 } \ 12986 return float##fsz##_to_##itype##_scalbn(x, ROUND, shift, fpst); \ 12987 } 12988 12989 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \ 12990 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 12991 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ 12992 float_round_to_zero, _round_to_zero) \ 12993 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ 12994 get_float_rounding_mode(fpst), ) 12995 12996 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \ 12997 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \ 12998 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, \ 12999 get_float_rounding_mode(fpst), ) 13000 13001 VFP_CONV_FIX(sh, d, 64, 64, int16) 13002 VFP_CONV_FIX(sl, d, 64, 64, int32) 13003 VFP_CONV_FIX_A64(sq, d, 64, 64, int64) 13004 VFP_CONV_FIX(uh, d, 64, 64, uint16) 13005 VFP_CONV_FIX(ul, d, 64, 64, uint32) 13006 VFP_CONV_FIX_A64(uq, d, 64, 64, uint64) 13007 VFP_CONV_FIX(sh, s, 32, 32, int16) 13008 VFP_CONV_FIX(sl, s, 32, 32, int32) 13009 VFP_CONV_FIX_A64(sq, s, 32, 64, int64) 13010 VFP_CONV_FIX(uh, s, 32, 32, uint16) 13011 VFP_CONV_FIX(ul, s, 32, 32, uint32) 13012 VFP_CONV_FIX_A64(uq, s, 32, 64, uint64) 13013 13014 #undef VFP_CONV_FIX 13015 #undef VFP_CONV_FIX_FLOAT 13016 #undef VFP_CONV_FLOAT_FIX_ROUND 13017 #undef VFP_CONV_FIX_A64 13018 13019 uint32_t HELPER(vfp_sltoh)(uint32_t x, uint32_t shift, void *fpst) 13020 { 13021 return int32_to_float16_scalbn(x, -shift, fpst); 13022 } 13023 13024 uint32_t HELPER(vfp_ultoh)(uint32_t x, uint32_t shift, void *fpst) 13025 { 13026 return uint32_to_float16_scalbn(x, -shift, fpst); 13027 } 13028 13029 uint32_t HELPER(vfp_sqtoh)(uint64_t x, uint32_t shift, void *fpst) 13030 { 13031 return int64_to_float16_scalbn(x, -shift, fpst); 13032 } 13033 13034 uint32_t HELPER(vfp_uqtoh)(uint64_t x, uint32_t shift, void *fpst) 13035 { 13036 return uint64_to_float16_scalbn(x, -shift, fpst); 13037 } 13038 13039 uint32_t HELPER(vfp_toshh)(uint32_t x, uint32_t shift, void *fpst) 13040 { 13041 if (unlikely(float16_is_any_nan(x))) { 13042 float_raise(float_flag_invalid, fpst); 13043 return 0; 13044 } 13045 return float16_to_int16_scalbn(x, get_float_rounding_mode(fpst), 13046 shift, fpst); 13047 } 13048 13049 uint32_t HELPER(vfp_touhh)(uint32_t x, uint32_t shift, void *fpst) 13050 { 13051 if (unlikely(float16_is_any_nan(x))) { 13052 float_raise(float_flag_invalid, fpst); 13053 return 0; 13054 } 13055 return float16_to_uint16_scalbn(x, get_float_rounding_mode(fpst), 13056 shift, fpst); 13057 } 13058 13059 uint32_t HELPER(vfp_toslh)(uint32_t x, uint32_t shift, void *fpst) 13060 { 13061 if (unlikely(float16_is_any_nan(x))) { 13062 float_raise(float_flag_invalid, fpst); 13063 return 0; 13064 } 13065 return float16_to_int32_scalbn(x, get_float_rounding_mode(fpst), 13066 shift, fpst); 13067 } 13068 13069 uint32_t HELPER(vfp_toulh)(uint32_t x, uint32_t shift, void *fpst) 13070 { 13071 if (unlikely(float16_is_any_nan(x))) { 13072 float_raise(float_flag_invalid, fpst); 13073 return 0; 13074 } 13075 return float16_to_uint32_scalbn(x, get_float_rounding_mode(fpst), 13076 shift, fpst); 13077 } 13078 13079 uint64_t HELPER(vfp_tosqh)(uint32_t x, uint32_t shift, void *fpst) 13080 { 13081 if (unlikely(float16_is_any_nan(x))) { 13082 float_raise(float_flag_invalid, fpst); 13083 return 0; 13084 } 13085 return float16_to_int64_scalbn(x, get_float_rounding_mode(fpst), 13086 shift, fpst); 13087 } 13088 13089 uint64_t HELPER(vfp_touqh)(uint32_t x, uint32_t shift, void *fpst) 13090 { 13091 if (unlikely(float16_is_any_nan(x))) { 13092 float_raise(float_flag_invalid, fpst); 13093 return 0; 13094 } 13095 return float16_to_uint64_scalbn(x, get_float_rounding_mode(fpst), 13096 shift, fpst); 13097 } 13098 13099 /* Set the current fp rounding mode and return the old one. 13100 * The argument is a softfloat float_round_ value. 13101 */ 13102 uint32_t HELPER(set_rmode)(uint32_t rmode, void *fpstp) 13103 { 13104 float_status *fp_status = fpstp; 13105 13106 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 13107 set_float_rounding_mode(rmode, fp_status); 13108 13109 return prev_rmode; 13110 } 13111 13112 /* Set the current fp rounding mode in the standard fp status and return 13113 * the old one. This is for NEON instructions that need to change the 13114 * rounding mode but wish to use the standard FPSCR values for everything 13115 * else. Always set the rounding mode back to the correct value after 13116 * modifying it. 13117 * The argument is a softfloat float_round_ value. 13118 */ 13119 uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env) 13120 { 13121 float_status *fp_status = &env->vfp.standard_fp_status; 13122 13123 uint32_t prev_rmode = get_float_rounding_mode(fp_status); 13124 set_float_rounding_mode(rmode, fp_status); 13125 13126 return prev_rmode; 13127 } 13128 13129 /* Half precision conversions. */ 13130 float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, void *fpstp, uint32_t ahp_mode) 13131 { 13132 /* Squash FZ16 to 0 for the duration of conversion. In this case, 13133 * it would affect flushing input denormals. 13134 */ 13135 float_status *fpst = fpstp; 13136 flag save = get_flush_inputs_to_zero(fpst); 13137 set_flush_inputs_to_zero(false, fpst); 13138 float32 r = float16_to_float32(a, !ahp_mode, fpst); 13139 set_flush_inputs_to_zero(save, fpst); 13140 return r; 13141 } 13142 13143 uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, void *fpstp, uint32_t ahp_mode) 13144 { 13145 /* Squash FZ16 to 0 for the duration of conversion. In this case, 13146 * it would affect flushing output denormals. 13147 */ 13148 float_status *fpst = fpstp; 13149 flag save = get_flush_to_zero(fpst); 13150 set_flush_to_zero(false, fpst); 13151 float16 r = float32_to_float16(a, !ahp_mode, fpst); 13152 set_flush_to_zero(save, fpst); 13153 return r; 13154 } 13155 13156 float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, void *fpstp, uint32_t ahp_mode) 13157 { 13158 /* Squash FZ16 to 0 for the duration of conversion. In this case, 13159 * it would affect flushing input denormals. 13160 */ 13161 float_status *fpst = fpstp; 13162 flag save = get_flush_inputs_to_zero(fpst); 13163 set_flush_inputs_to_zero(false, fpst); 13164 float64 r = float16_to_float64(a, !ahp_mode, fpst); 13165 set_flush_inputs_to_zero(save, fpst); 13166 return r; 13167 } 13168 13169 uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, void *fpstp, uint32_t ahp_mode) 13170 { 13171 /* Squash FZ16 to 0 for the duration of conversion. In this case, 13172 * it would affect flushing output denormals. 13173 */ 13174 float_status *fpst = fpstp; 13175 flag save = get_flush_to_zero(fpst); 13176 set_flush_to_zero(false, fpst); 13177 float16 r = float64_to_float16(a, !ahp_mode, fpst); 13178 set_flush_to_zero(save, fpst); 13179 return r; 13180 } 13181 13182 #define float32_two make_float32(0x40000000) 13183 #define float32_three make_float32(0x40400000) 13184 #define float32_one_point_five make_float32(0x3fc00000) 13185 13186 float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env) 13187 { 13188 float_status *s = &env->vfp.standard_fp_status; 13189 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 13190 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 13191 if (!(float32_is_zero(a) || float32_is_zero(b))) { 13192 float_raise(float_flag_input_denormal, s); 13193 } 13194 return float32_two; 13195 } 13196 return float32_sub(float32_two, float32_mul(a, b, s), s); 13197 } 13198 13199 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env) 13200 { 13201 float_status *s = &env->vfp.standard_fp_status; 13202 float32 product; 13203 if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) || 13204 (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) { 13205 if (!(float32_is_zero(a) || float32_is_zero(b))) { 13206 float_raise(float_flag_input_denormal, s); 13207 } 13208 return float32_one_point_five; 13209 } 13210 product = float32_mul(a, b, s); 13211 return float32_div(float32_sub(float32_three, product, s), float32_two, s); 13212 } 13213 13214 /* NEON helpers. */ 13215 13216 /* Constants 256 and 512 are used in some helpers; we avoid relying on 13217 * int->float conversions at run-time. */ 13218 #define float64_256 make_float64(0x4070000000000000LL) 13219 #define float64_512 make_float64(0x4080000000000000LL) 13220 #define float16_maxnorm make_float16(0x7bff) 13221 #define float32_maxnorm make_float32(0x7f7fffff) 13222 #define float64_maxnorm make_float64(0x7fefffffffffffffLL) 13223 13224 /* Reciprocal functions 13225 * 13226 * The algorithm that must be used to calculate the estimate 13227 * is specified by the ARM ARM, see FPRecipEstimate()/RecipEstimate 13228 */ 13229 13230 /* See RecipEstimate() 13231 * 13232 * input is a 9 bit fixed point number 13233 * input range 256 .. 511 for a number from 0.5 <= x < 1.0. 13234 * result range 256 .. 511 for a number from 1.0 to 511/256. 13235 */ 13236 13237 static int recip_estimate(int input) 13238 { 13239 int a, b, r; 13240 assert(256 <= input && input < 512); 13241 a = (input * 2) + 1; 13242 b = (1 << 19) / a; 13243 r = (b + 1) >> 1; 13244 assert(256 <= r && r < 512); 13245 return r; 13246 } 13247 13248 /* 13249 * Common wrapper to call recip_estimate 13250 * 13251 * The parameters are exponent and 64 bit fraction (without implicit 13252 * bit) where the binary point is nominally at bit 52. Returns a 13253 * float64 which can then be rounded to the appropriate size by the 13254 * callee. 13255 */ 13256 13257 static uint64_t call_recip_estimate(int *exp, int exp_off, uint64_t frac) 13258 { 13259 uint32_t scaled, estimate; 13260 uint64_t result_frac; 13261 int result_exp; 13262 13263 /* Handle sub-normals */ 13264 if (*exp == 0) { 13265 if (extract64(frac, 51, 1) == 0) { 13266 *exp = -1; 13267 frac <<= 2; 13268 } else { 13269 frac <<= 1; 13270 } 13271 } 13272 13273 /* scaled = UInt('1':fraction<51:44>) */ 13274 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 13275 estimate = recip_estimate(scaled); 13276 13277 result_exp = exp_off - *exp; 13278 result_frac = deposit64(0, 44, 8, estimate); 13279 if (result_exp == 0) { 13280 result_frac = deposit64(result_frac >> 1, 51, 1, 1); 13281 } else if (result_exp == -1) { 13282 result_frac = deposit64(result_frac >> 2, 50, 2, 1); 13283 result_exp = 0; 13284 } 13285 13286 *exp = result_exp; 13287 13288 return result_frac; 13289 } 13290 13291 static bool round_to_inf(float_status *fpst, bool sign_bit) 13292 { 13293 switch (fpst->float_rounding_mode) { 13294 case float_round_nearest_even: /* Round to Nearest */ 13295 return true; 13296 case float_round_up: /* Round to +Inf */ 13297 return !sign_bit; 13298 case float_round_down: /* Round to -Inf */ 13299 return sign_bit; 13300 case float_round_to_zero: /* Round to Zero */ 13301 return false; 13302 } 13303 13304 g_assert_not_reached(); 13305 } 13306 13307 uint32_t HELPER(recpe_f16)(uint32_t input, void *fpstp) 13308 { 13309 float_status *fpst = fpstp; 13310 float16 f16 = float16_squash_input_denormal(input, fpst); 13311 uint32_t f16_val = float16_val(f16); 13312 uint32_t f16_sign = float16_is_neg(f16); 13313 int f16_exp = extract32(f16_val, 10, 5); 13314 uint32_t f16_frac = extract32(f16_val, 0, 10); 13315 uint64_t f64_frac; 13316 13317 if (float16_is_any_nan(f16)) { 13318 float16 nan = f16; 13319 if (float16_is_signaling_nan(f16, fpst)) { 13320 float_raise(float_flag_invalid, fpst); 13321 nan = float16_silence_nan(f16, fpst); 13322 } 13323 if (fpst->default_nan_mode) { 13324 nan = float16_default_nan(fpst); 13325 } 13326 return nan; 13327 } else if (float16_is_infinity(f16)) { 13328 return float16_set_sign(float16_zero, float16_is_neg(f16)); 13329 } else if (float16_is_zero(f16)) { 13330 float_raise(float_flag_divbyzero, fpst); 13331 return float16_set_sign(float16_infinity, float16_is_neg(f16)); 13332 } else if (float16_abs(f16) < (1 << 8)) { 13333 /* Abs(value) < 2.0^-16 */ 13334 float_raise(float_flag_overflow | float_flag_inexact, fpst); 13335 if (round_to_inf(fpst, f16_sign)) { 13336 return float16_set_sign(float16_infinity, f16_sign); 13337 } else { 13338 return float16_set_sign(float16_maxnorm, f16_sign); 13339 } 13340 } else if (f16_exp >= 29 && fpst->flush_to_zero) { 13341 float_raise(float_flag_underflow, fpst); 13342 return float16_set_sign(float16_zero, float16_is_neg(f16)); 13343 } 13344 13345 f64_frac = call_recip_estimate(&f16_exp, 29, 13346 ((uint64_t) f16_frac) << (52 - 10)); 13347 13348 /* result = sign : result_exp<4:0> : fraction<51:42> */ 13349 f16_val = deposit32(0, 15, 1, f16_sign); 13350 f16_val = deposit32(f16_val, 10, 5, f16_exp); 13351 f16_val = deposit32(f16_val, 0, 10, extract64(f64_frac, 52 - 10, 10)); 13352 return make_float16(f16_val); 13353 } 13354 13355 float32 HELPER(recpe_f32)(float32 input, void *fpstp) 13356 { 13357 float_status *fpst = fpstp; 13358 float32 f32 = float32_squash_input_denormal(input, fpst); 13359 uint32_t f32_val = float32_val(f32); 13360 bool f32_sign = float32_is_neg(f32); 13361 int f32_exp = extract32(f32_val, 23, 8); 13362 uint32_t f32_frac = extract32(f32_val, 0, 23); 13363 uint64_t f64_frac; 13364 13365 if (float32_is_any_nan(f32)) { 13366 float32 nan = f32; 13367 if (float32_is_signaling_nan(f32, fpst)) { 13368 float_raise(float_flag_invalid, fpst); 13369 nan = float32_silence_nan(f32, fpst); 13370 } 13371 if (fpst->default_nan_mode) { 13372 nan = float32_default_nan(fpst); 13373 } 13374 return nan; 13375 } else if (float32_is_infinity(f32)) { 13376 return float32_set_sign(float32_zero, float32_is_neg(f32)); 13377 } else if (float32_is_zero(f32)) { 13378 float_raise(float_flag_divbyzero, fpst); 13379 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 13380 } else if (float32_abs(f32) < (1ULL << 21)) { 13381 /* Abs(value) < 2.0^-128 */ 13382 float_raise(float_flag_overflow | float_flag_inexact, fpst); 13383 if (round_to_inf(fpst, f32_sign)) { 13384 return float32_set_sign(float32_infinity, f32_sign); 13385 } else { 13386 return float32_set_sign(float32_maxnorm, f32_sign); 13387 } 13388 } else if (f32_exp >= 253 && fpst->flush_to_zero) { 13389 float_raise(float_flag_underflow, fpst); 13390 return float32_set_sign(float32_zero, float32_is_neg(f32)); 13391 } 13392 13393 f64_frac = call_recip_estimate(&f32_exp, 253, 13394 ((uint64_t) f32_frac) << (52 - 23)); 13395 13396 /* result = sign : result_exp<7:0> : fraction<51:29> */ 13397 f32_val = deposit32(0, 31, 1, f32_sign); 13398 f32_val = deposit32(f32_val, 23, 8, f32_exp); 13399 f32_val = deposit32(f32_val, 0, 23, extract64(f64_frac, 52 - 23, 23)); 13400 return make_float32(f32_val); 13401 } 13402 13403 float64 HELPER(recpe_f64)(float64 input, void *fpstp) 13404 { 13405 float_status *fpst = fpstp; 13406 float64 f64 = float64_squash_input_denormal(input, fpst); 13407 uint64_t f64_val = float64_val(f64); 13408 bool f64_sign = float64_is_neg(f64); 13409 int f64_exp = extract64(f64_val, 52, 11); 13410 uint64_t f64_frac = extract64(f64_val, 0, 52); 13411 13412 /* Deal with any special cases */ 13413 if (float64_is_any_nan(f64)) { 13414 float64 nan = f64; 13415 if (float64_is_signaling_nan(f64, fpst)) { 13416 float_raise(float_flag_invalid, fpst); 13417 nan = float64_silence_nan(f64, fpst); 13418 } 13419 if (fpst->default_nan_mode) { 13420 nan = float64_default_nan(fpst); 13421 } 13422 return nan; 13423 } else if (float64_is_infinity(f64)) { 13424 return float64_set_sign(float64_zero, float64_is_neg(f64)); 13425 } else if (float64_is_zero(f64)) { 13426 float_raise(float_flag_divbyzero, fpst); 13427 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 13428 } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) { 13429 /* Abs(value) < 2.0^-1024 */ 13430 float_raise(float_flag_overflow | float_flag_inexact, fpst); 13431 if (round_to_inf(fpst, f64_sign)) { 13432 return float64_set_sign(float64_infinity, f64_sign); 13433 } else { 13434 return float64_set_sign(float64_maxnorm, f64_sign); 13435 } 13436 } else if (f64_exp >= 2045 && fpst->flush_to_zero) { 13437 float_raise(float_flag_underflow, fpst); 13438 return float64_set_sign(float64_zero, float64_is_neg(f64)); 13439 } 13440 13441 f64_frac = call_recip_estimate(&f64_exp, 2045, f64_frac); 13442 13443 /* result = sign : result_exp<10:0> : fraction<51:0>; */ 13444 f64_val = deposit64(0, 63, 1, f64_sign); 13445 f64_val = deposit64(f64_val, 52, 11, f64_exp); 13446 f64_val = deposit64(f64_val, 0, 52, f64_frac); 13447 return make_float64(f64_val); 13448 } 13449 13450 /* The algorithm that must be used to calculate the estimate 13451 * is specified by the ARM ARM. 13452 */ 13453 13454 static int do_recip_sqrt_estimate(int a) 13455 { 13456 int b, estimate; 13457 13458 assert(128 <= a && a < 512); 13459 if (a < 256) { 13460 a = a * 2 + 1; 13461 } else { 13462 a = (a >> 1) << 1; 13463 a = (a + 1) * 2; 13464 } 13465 b = 512; 13466 while (a * (b + 1) * (b + 1) < (1 << 28)) { 13467 b += 1; 13468 } 13469 estimate = (b + 1) / 2; 13470 assert(256 <= estimate && estimate < 512); 13471 13472 return estimate; 13473 } 13474 13475 13476 static uint64_t recip_sqrt_estimate(int *exp , int exp_off, uint64_t frac) 13477 { 13478 int estimate; 13479 uint32_t scaled; 13480 13481 if (*exp == 0) { 13482 while (extract64(frac, 51, 1) == 0) { 13483 frac = frac << 1; 13484 *exp -= 1; 13485 } 13486 frac = extract64(frac, 0, 51) << 1; 13487 } 13488 13489 if (*exp & 1) { 13490 /* scaled = UInt('01':fraction<51:45>) */ 13491 scaled = deposit32(1 << 7, 0, 7, extract64(frac, 45, 7)); 13492 } else { 13493 /* scaled = UInt('1':fraction<51:44>) */ 13494 scaled = deposit32(1 << 8, 0, 8, extract64(frac, 44, 8)); 13495 } 13496 estimate = do_recip_sqrt_estimate(scaled); 13497 13498 *exp = (exp_off - *exp) / 2; 13499 return extract64(estimate, 0, 8) << 44; 13500 } 13501 13502 uint32_t HELPER(rsqrte_f16)(uint32_t input, void *fpstp) 13503 { 13504 float_status *s = fpstp; 13505 float16 f16 = float16_squash_input_denormal(input, s); 13506 uint16_t val = float16_val(f16); 13507 bool f16_sign = float16_is_neg(f16); 13508 int f16_exp = extract32(val, 10, 5); 13509 uint16_t f16_frac = extract32(val, 0, 10); 13510 uint64_t f64_frac; 13511 13512 if (float16_is_any_nan(f16)) { 13513 float16 nan = f16; 13514 if (float16_is_signaling_nan(f16, s)) { 13515 float_raise(float_flag_invalid, s); 13516 nan = float16_silence_nan(f16, s); 13517 } 13518 if (s->default_nan_mode) { 13519 nan = float16_default_nan(s); 13520 } 13521 return nan; 13522 } else if (float16_is_zero(f16)) { 13523 float_raise(float_flag_divbyzero, s); 13524 return float16_set_sign(float16_infinity, f16_sign); 13525 } else if (f16_sign) { 13526 float_raise(float_flag_invalid, s); 13527 return float16_default_nan(s); 13528 } else if (float16_is_infinity(f16)) { 13529 return float16_zero; 13530 } 13531 13532 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 13533 * preserving the parity of the exponent. */ 13534 13535 f64_frac = ((uint64_t) f16_frac) << (52 - 10); 13536 13537 f64_frac = recip_sqrt_estimate(&f16_exp, 44, f64_frac); 13538 13539 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(2) */ 13540 val = deposit32(0, 15, 1, f16_sign); 13541 val = deposit32(val, 10, 5, f16_exp); 13542 val = deposit32(val, 2, 8, extract64(f64_frac, 52 - 8, 8)); 13543 return make_float16(val); 13544 } 13545 13546 float32 HELPER(rsqrte_f32)(float32 input, void *fpstp) 13547 { 13548 float_status *s = fpstp; 13549 float32 f32 = float32_squash_input_denormal(input, s); 13550 uint32_t val = float32_val(f32); 13551 uint32_t f32_sign = float32_is_neg(f32); 13552 int f32_exp = extract32(val, 23, 8); 13553 uint32_t f32_frac = extract32(val, 0, 23); 13554 uint64_t f64_frac; 13555 13556 if (float32_is_any_nan(f32)) { 13557 float32 nan = f32; 13558 if (float32_is_signaling_nan(f32, s)) { 13559 float_raise(float_flag_invalid, s); 13560 nan = float32_silence_nan(f32, s); 13561 } 13562 if (s->default_nan_mode) { 13563 nan = float32_default_nan(s); 13564 } 13565 return nan; 13566 } else if (float32_is_zero(f32)) { 13567 float_raise(float_flag_divbyzero, s); 13568 return float32_set_sign(float32_infinity, float32_is_neg(f32)); 13569 } else if (float32_is_neg(f32)) { 13570 float_raise(float_flag_invalid, s); 13571 return float32_default_nan(s); 13572 } else if (float32_is_infinity(f32)) { 13573 return float32_zero; 13574 } 13575 13576 /* Scale and normalize to a double-precision value between 0.25 and 1.0, 13577 * preserving the parity of the exponent. */ 13578 13579 f64_frac = ((uint64_t) f32_frac) << 29; 13580 13581 f64_frac = recip_sqrt_estimate(&f32_exp, 380, f64_frac); 13582 13583 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(15) */ 13584 val = deposit32(0, 31, 1, f32_sign); 13585 val = deposit32(val, 23, 8, f32_exp); 13586 val = deposit32(val, 15, 8, extract64(f64_frac, 52 - 8, 8)); 13587 return make_float32(val); 13588 } 13589 13590 float64 HELPER(rsqrte_f64)(float64 input, void *fpstp) 13591 { 13592 float_status *s = fpstp; 13593 float64 f64 = float64_squash_input_denormal(input, s); 13594 uint64_t val = float64_val(f64); 13595 bool f64_sign = float64_is_neg(f64); 13596 int f64_exp = extract64(val, 52, 11); 13597 uint64_t f64_frac = extract64(val, 0, 52); 13598 13599 if (float64_is_any_nan(f64)) { 13600 float64 nan = f64; 13601 if (float64_is_signaling_nan(f64, s)) { 13602 float_raise(float_flag_invalid, s); 13603 nan = float64_silence_nan(f64, s); 13604 } 13605 if (s->default_nan_mode) { 13606 nan = float64_default_nan(s); 13607 } 13608 return nan; 13609 } else if (float64_is_zero(f64)) { 13610 float_raise(float_flag_divbyzero, s); 13611 return float64_set_sign(float64_infinity, float64_is_neg(f64)); 13612 } else if (float64_is_neg(f64)) { 13613 float_raise(float_flag_invalid, s); 13614 return float64_default_nan(s); 13615 } else if (float64_is_infinity(f64)) { 13616 return float64_zero; 13617 } 13618 13619 f64_frac = recip_sqrt_estimate(&f64_exp, 3068, f64_frac); 13620 13621 /* result = sign : result_exp<4:0> : estimate<7:0> : Zeros(44) */ 13622 val = deposit64(0, 61, 1, f64_sign); 13623 val = deposit64(val, 52, 11, f64_exp); 13624 val = deposit64(val, 44, 8, extract64(f64_frac, 52 - 8, 8)); 13625 return make_float64(val); 13626 } 13627 13628 uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp) 13629 { 13630 /* float_status *s = fpstp; */ 13631 int input, estimate; 13632 13633 if ((a & 0x80000000) == 0) { 13634 return 0xffffffff; 13635 } 13636 13637 input = extract32(a, 23, 9); 13638 estimate = recip_estimate(input); 13639 13640 return deposit32(0, (32 - 9), 9, estimate); 13641 } 13642 13643 uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp) 13644 { 13645 int estimate; 13646 13647 if ((a & 0xc0000000) == 0) { 13648 return 0xffffffff; 13649 } 13650 13651 estimate = do_recip_sqrt_estimate(extract32(a, 23, 9)); 13652 13653 return deposit32(0, 23, 9, estimate); 13654 } 13655 13656 /* VFPv4 fused multiply-accumulate */ 13657 float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp) 13658 { 13659 float_status *fpst = fpstp; 13660 return float32_muladd(a, b, c, 0, fpst); 13661 } 13662 13663 float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp) 13664 { 13665 float_status *fpst = fpstp; 13666 return float64_muladd(a, b, c, 0, fpst); 13667 } 13668 13669 /* ARMv8 round to integral */ 13670 float32 HELPER(rints_exact)(float32 x, void *fp_status) 13671 { 13672 return float32_round_to_int(x, fp_status); 13673 } 13674 13675 float64 HELPER(rintd_exact)(float64 x, void *fp_status) 13676 { 13677 return float64_round_to_int(x, fp_status); 13678 } 13679 13680 float32 HELPER(rints)(float32 x, void *fp_status) 13681 { 13682 int old_flags = get_float_exception_flags(fp_status), new_flags; 13683 float32 ret; 13684 13685 ret = float32_round_to_int(x, fp_status); 13686 13687 /* Suppress any inexact exceptions the conversion produced */ 13688 if (!(old_flags & float_flag_inexact)) { 13689 new_flags = get_float_exception_flags(fp_status); 13690 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 13691 } 13692 13693 return ret; 13694 } 13695 13696 float64 HELPER(rintd)(float64 x, void *fp_status) 13697 { 13698 int old_flags = get_float_exception_flags(fp_status), new_flags; 13699 float64 ret; 13700 13701 ret = float64_round_to_int(x, fp_status); 13702 13703 new_flags = get_float_exception_flags(fp_status); 13704 13705 /* Suppress any inexact exceptions the conversion produced */ 13706 if (!(old_flags & float_flag_inexact)) { 13707 new_flags = get_float_exception_flags(fp_status); 13708 set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status); 13709 } 13710 13711 return ret; 13712 } 13713 13714 /* Convert ARM rounding mode to softfloat */ 13715 int arm_rmode_to_sf(int rmode) 13716 { 13717 switch (rmode) { 13718 case FPROUNDING_TIEAWAY: 13719 rmode = float_round_ties_away; 13720 break; 13721 case FPROUNDING_ODD: 13722 /* FIXME: add support for TIEAWAY and ODD */ 13723 qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n", 13724 rmode); 13725 /* fall through for now */ 13726 case FPROUNDING_TIEEVEN: 13727 default: 13728 rmode = float_round_nearest_even; 13729 break; 13730 case FPROUNDING_POSINF: 13731 rmode = float_round_up; 13732 break; 13733 case FPROUNDING_NEGINF: 13734 rmode = float_round_down; 13735 break; 13736 case FPROUNDING_ZERO: 13737 rmode = float_round_to_zero; 13738 break; 13739 } 13740 return rmode; 13741 } 13742 13743 /* CRC helpers. 13744 * The upper bytes of val (above the number specified by 'bytes') must have 13745 * been zeroed out by the caller. 13746 */ 13747 uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) 13748 { 13749 uint8_t buf[4]; 13750 13751 stl_le_p(buf, val); 13752 13753 /* zlib crc32 converts the accumulator and output to one's complement. */ 13754 return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; 13755 } 13756 13757 uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) 13758 { 13759 uint8_t buf[4]; 13760 13761 stl_le_p(buf, val); 13762 13763 /* Linux crc32c converts the output to one's complement. */ 13764 return crc32c(acc, buf, bytes) ^ 0xffffffff; 13765 } 13766 13767 /* Return the exception level to which FP-disabled exceptions should 13768 * be taken, or 0 if FP is enabled. 13769 */ 13770 int fp_exception_el(CPUARMState *env, int cur_el) 13771 { 13772 #ifndef CONFIG_USER_ONLY 13773 int fpen; 13774 13775 /* CPACR and the CPTR registers don't exist before v6, so FP is 13776 * always accessible 13777 */ 13778 if (!arm_feature(env, ARM_FEATURE_V6)) { 13779 return 0; 13780 } 13781 13782 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit: 13783 * 0, 2 : trap EL0 and EL1/PL1 accesses 13784 * 1 : trap only EL0 accesses 13785 * 3 : trap no accesses 13786 */ 13787 fpen = extract32(env->cp15.cpacr_el1, 20, 2); 13788 switch (fpen) { 13789 case 0: 13790 case 2: 13791 if (cur_el == 0 || cur_el == 1) { 13792 /* Trap to PL1, which might be EL1 or EL3 */ 13793 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) { 13794 return 3; 13795 } 13796 return 1; 13797 } 13798 if (cur_el == 3 && !is_a64(env)) { 13799 /* Secure PL1 running at EL3 */ 13800 return 3; 13801 } 13802 break; 13803 case 1: 13804 if (cur_el == 0) { 13805 return 1; 13806 } 13807 break; 13808 case 3: 13809 break; 13810 } 13811 13812 /* For the CPTR registers we don't need to guard with an ARM_FEATURE 13813 * check because zero bits in the registers mean "don't trap". 13814 */ 13815 13816 /* CPTR_EL2 : present in v7VE or v8 */ 13817 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1) 13818 && !arm_is_secure_below_el3(env)) { 13819 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */ 13820 return 2; 13821 } 13822 13823 /* CPTR_EL3 : present in v8 */ 13824 if (extract32(env->cp15.cptr_el[3], 10, 1)) { 13825 /* Trap all FP ops to EL3 */ 13826 return 3; 13827 } 13828 #endif 13829 return 0; 13830 } 13831 13832 ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env, 13833 bool secstate, bool priv) 13834 { 13835 ARMMMUIdx mmu_idx = ARM_MMU_IDX_M; 13836 13837 if (priv) { 13838 mmu_idx |= ARM_MMU_IDX_M_PRIV; 13839 } 13840 13841 if (armv7m_nvic_neg_prio_requested(env->nvic, secstate)) { 13842 mmu_idx |= ARM_MMU_IDX_M_NEGPRI; 13843 } 13844 13845 if (secstate) { 13846 mmu_idx |= ARM_MMU_IDX_M_S; 13847 } 13848 13849 return mmu_idx; 13850 } 13851 13852 /* Return the MMU index for a v7M CPU in the specified security state */ 13853 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) 13854 { 13855 bool priv = arm_current_el(env) != 0; 13856 13857 return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv); 13858 } 13859 13860 ARMMMUIdx arm_mmu_idx(CPUARMState *env) 13861 { 13862 int el; 13863 13864 if (arm_feature(env, ARM_FEATURE_M)) { 13865 return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); 13866 } 13867 13868 el = arm_current_el(env); 13869 if (el < 2 && arm_is_secure_below_el3(env)) { 13870 return ARMMMUIdx_S1SE0 + el; 13871 } else { 13872 return ARMMMUIdx_S12NSE0 + el; 13873 } 13874 } 13875 13876 int cpu_mmu_index(CPUARMState *env, bool ifetch) 13877 { 13878 return arm_to_core_mmu_idx(arm_mmu_idx(env)); 13879 } 13880 13881 #ifndef CONFIG_USER_ONLY 13882 ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env) 13883 { 13884 return stage_1_mmu_idx(arm_mmu_idx(env)); 13885 } 13886 #endif 13887 13888 void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, 13889 target_ulong *cs_base, uint32_t *pflags) 13890 { 13891 ARMMMUIdx mmu_idx = arm_mmu_idx(env); 13892 int current_el = arm_current_el(env); 13893 int fp_el = fp_exception_el(env, current_el); 13894 uint32_t flags = 0; 13895 13896 if (is_a64(env)) { 13897 ARMCPU *cpu = arm_env_get_cpu(env); 13898 uint64_t sctlr; 13899 13900 *pc = env->pc; 13901 flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); 13902 13903 /* Get control bits for tagged addresses. */ 13904 { 13905 ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); 13906 ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1); 13907 int tbii, tbid; 13908 13909 /* FIXME: ARMv8.1-VHE S2 translation regime. */ 13910 if (regime_el(env, stage1) < 2) { 13911 ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1); 13912 tbid = (p1.tbi << 1) | p0.tbi; 13913 tbii = tbid & ~((p1.tbid << 1) | p0.tbid); 13914 } else { 13915 tbid = p0.tbi; 13916 tbii = tbid & !p0.tbid; 13917 } 13918 13919 flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); 13920 flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid); 13921 } 13922 13923 if (cpu_isar_feature(aa64_sve, cpu)) { 13924 int sve_el = sve_exception_el(env, current_el); 13925 uint32_t zcr_len; 13926 13927 /* If SVE is disabled, but FP is enabled, 13928 * then the effective len is 0. 13929 */ 13930 if (sve_el != 0 && fp_el == 0) { 13931 zcr_len = 0; 13932 } else { 13933 zcr_len = sve_zcr_len_for_el(env, current_el); 13934 } 13935 flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el); 13936 flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len); 13937 } 13938 13939 if (current_el == 0) { 13940 /* FIXME: ARMv8.1-VHE S2 translation regime. */ 13941 sctlr = env->cp15.sctlr_el[1]; 13942 } else { 13943 sctlr = env->cp15.sctlr_el[current_el]; 13944 } 13945 if (cpu_isar_feature(aa64_pauth, cpu)) { 13946 /* 13947 * In order to save space in flags, we record only whether 13948 * pauth is "inactive", meaning all insns are implemented as 13949 * a nop, or "active" when some action must be performed. 13950 * The decision of which action to take is left to a helper. 13951 */ 13952 if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) { 13953 flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1); 13954 } 13955 } 13956 13957 if (cpu_isar_feature(aa64_bti, cpu)) { 13958 /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */ 13959 if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) { 13960 flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1); 13961 } 13962 flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype); 13963 } 13964 } else { 13965 *pc = env->regs[15]; 13966 flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb); 13967 flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len); 13968 flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride); 13969 flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits); 13970 flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env)); 13971 flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env)); 13972 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30) 13973 || arm_el_is_aa64(env, 1)) { 13974 flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); 13975 } 13976 flags = FIELD_DP32(flags, TBFLAG_A32, XSCALE_CPAR, env->cp15.c15_cpar); 13977 } 13978 13979 flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx)); 13980 13981 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine 13982 * states defined in the ARM ARM for software singlestep: 13983 * SS_ACTIVE PSTATE.SS State 13984 * 0 x Inactive (the TB flag for SS is always 0) 13985 * 1 0 Active-pending 13986 * 1 1 Active-not-pending 13987 */ 13988 if (arm_singlestep_active(env)) { 13989 flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1); 13990 if (is_a64(env)) { 13991 if (env->pstate & PSTATE_SS) { 13992 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); 13993 } 13994 } else { 13995 if (env->uncached_cpsr & PSTATE_SS) { 13996 flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1); 13997 } 13998 } 13999 } 14000 if (arm_cpu_data_is_big_endian(env)) { 14001 flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1); 14002 } 14003 flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el); 14004 14005 if (arm_v7m_is_handler_mode(env)) { 14006 flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1); 14007 } 14008 14009 /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is 14010 * suppressing them because the requested execution priority is less than 0. 14011 */ 14012 if (arm_feature(env, ARM_FEATURE_V8) && 14013 arm_feature(env, ARM_FEATURE_M) && 14014 !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) && 14015 (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) { 14016 flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1); 14017 } 14018 14019 *pflags = flags; 14020 *cs_base = 0; 14021 } 14022 14023 #ifdef TARGET_AARCH64 14024 /* 14025 * The manual says that when SVE is enabled and VQ is widened the 14026 * implementation is allowed to zero the previously inaccessible 14027 * portion of the registers. The corollary to that is that when 14028 * SVE is enabled and VQ is narrowed we are also allowed to zero 14029 * the now inaccessible portion of the registers. 14030 * 14031 * The intent of this is that no predicate bit beyond VQ is ever set. 14032 * Which means that some operations on predicate registers themselves 14033 * may operate on full uint64_t or even unrolled across the maximum 14034 * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally 14035 * may well be cheaper than conditionals to restrict the operation 14036 * to the relevant portion of a uint16_t[16]. 14037 */ 14038 void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) 14039 { 14040 int i, j; 14041 uint64_t pmask; 14042 14043 assert(vq >= 1 && vq <= ARM_MAX_VQ); 14044 assert(vq <= arm_env_get_cpu(env)->sve_max_vq); 14045 14046 /* Zap the high bits of the zregs. */ 14047 for (i = 0; i < 32; i++) { 14048 memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); 14049 } 14050 14051 /* Zap the high bits of the pregs and ffr. */ 14052 pmask = 0; 14053 if (vq & 3) { 14054 pmask = ~(-1ULL << (16 * (vq & 3))); 14055 } 14056 for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { 14057 for (i = 0; i < 17; ++i) { 14058 env->vfp.pregs[i].p[j] &= pmask; 14059 } 14060 pmask = 0; 14061 } 14062 } 14063 14064 /* 14065 * Notice a change in SVE vector size when changing EL. 14066 */ 14067 void aarch64_sve_change_el(CPUARMState *env, int old_el, 14068 int new_el, bool el0_a64) 14069 { 14070 ARMCPU *cpu = arm_env_get_cpu(env); 14071 int old_len, new_len; 14072 bool old_a64, new_a64; 14073 14074 /* Nothing to do if no SVE. */ 14075 if (!cpu_isar_feature(aa64_sve, cpu)) { 14076 return; 14077 } 14078 14079 /* Nothing to do if FP is disabled in either EL. */ 14080 if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { 14081 return; 14082 } 14083 14084 /* 14085 * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped 14086 * at ELx, or not available because the EL is in AArch32 state, then 14087 * for all purposes other than a direct read, the ZCR_ELx.LEN field 14088 * has an effective value of 0". 14089 * 14090 * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). 14091 * If we ignore aa32 state, we would fail to see the vq4->vq0 transition 14092 * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that 14093 * we already have the correct register contents when encountering the 14094 * vq0->vq0 transition between EL0->EL1. 14095 */ 14096 old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; 14097 old_len = (old_a64 && !sve_exception_el(env, old_el) 14098 ? sve_zcr_len_for_el(env, old_el) : 0); 14099 new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; 14100 new_len = (new_a64 && !sve_exception_el(env, new_el) 14101 ? sve_zcr_len_for_el(env, new_el) : 0); 14102 14103 /* When changing vector length, clear inaccessible state. */ 14104 if (new_len < old_len) { 14105 aarch64_sve_narrow_vq(env, new_len + 1); 14106 } 14107 } 14108 #endif 14109